repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
WeixiongLin/Quanser-robots
[ "733d4aeed1a8d91a42e51abb7c7884a6933d8fb6" ]
[ "Quanser Robot/MPC/MPC-Qube/dynamics.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport pickle\nfrom utils import *\n\nclass MLP(nn.Module):\n '''A simple implementation of the multi-layer neural network'''\n def __init__(self, n_input=7, n_output=6, n_h=2, size_h=128):\n '''\n Specify the neural network architecture\n\n :param n_input: The dimension of the input\n :param n_output: The dimension of the output\n :param n_h: The number of the hidden layer\n :param size_h: The dimension of the hidden layer\n '''\n super(MLP, self).__init__()\n self.n_input = n_input\n self.fc_in = nn.Linear(n_input, size_h)\n self.relu = nn.ReLU()\n self.tanh = nn.Tanh()\n assert n_h >= 1, \"h must be integer and >= 1\"\n self.fc_list = nn.ModuleList()\n for i in range(n_h - 1):\n self.fc_list.append(nn.Linear(size_h, size_h))\n self.fc_out = nn.Linear(size_h, n_output)\n # Initialize weight\n nn.init.uniform_(self.fc_in.weight, -0.1, 0.1)\n nn.init.uniform_(self.fc_out.weight, -0.1, 0.1)\n self.fc_list.apply(self.init_normal)\n\n def forward(self, x):\n out = x.view(-1, self.n_input)\n out = self.fc_in(out)\n out = self.tanh(out)\n for _, layer in enumerate(self.fc_list, start=0):\n out = layer(out)\n out = self.tanh(out)\n out = self.fc_out(out)\n return out\n\n def init_normal(self, m):\n if type(m) == nn.Linear:\n nn.init.uniform_(m.weight, -0.1, 0.1)\n\nclass DynamicModel(object):\n '''Neural network dynamic model '''\n def __init__(self,config):\n model_config = config[\"model_config\"]\n self.n_states = model_config[\"n_states\"]\n self.n_actions = model_config[\"n_actions\"]\n self.use_cuda = model_config[\"use_cuda\"]\n if model_config[\"load_model\"]:\n self.model = torch.load(model_config[\"model_path\"])\n else:\n self.model = MLP(self.n_states + self.n_actions, self.n_states, model_config[\"n_hidden\"],\n model_config[\"size_hidden\"])\n if self.use_cuda:\n self.model = self.model.cuda()\n self.Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs).cuda()\n else:\n self.model = self.model.cpu()\n self.Variable = lambda *args, **kwargs: autograd.Variable(*args, **kwargs)\n training_config = config[\"training_config\"]\n self.n_epochs = training_config[\"n_epochs\"]\n self.lr = training_config[\"learning_rate\"]\n self.batch_size = training_config[\"batch_size\"]\n self.save_model_flag = training_config[\"save_model_flag\"]\n self.save_model_path = training_config[\"save_model_path\"]\n self.exp_number = training_config[\"exp_number\"]\n self.save_loss_fig = training_config[\"save_loss_fig\"]\n self.save_loss_fig_frequency = training_config[\"save_loss_fig_frequency\"]\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)\n\n def train(self, trainset, testset=0):\n '''\n Train the dynamic model with input dataset\n\n :param trainset: (Dictionary) The input training set\n :param testset: (Dictionary) The input test set\n :return:\n '''\n # Normalize the dataset and record data distribution (mean and std)\n datasets, labels = self.norm_train_data(trainset[\"data\"],trainset[\"label\"])\n if testset != 0:\n test_datasets, test_labels = self.norm_test_data(testset[\"data\"],testset[\"label\"])\n train_dataset = MyDataset(datasets, labels)\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True)\n total_step = len(train_loader)\n print(f\"Total training step per epoch [{total_step}]\")\n loss_epochs = []\n for epoch in range(1, self.n_epochs + 1):\n loss_this_epoch = []\n for i, (datas, labels) in enumerate(train_loader):\n datas = self.Variable(torch.FloatTensor(np.float32(datas)))\n labels = self.Variable(torch.FloatTensor(np.float32(labels)))\n self.optimizer.zero_grad()\n outputs = self.model(datas)\n loss = self.criterion(outputs, labels)\n loss.backward()\n self.optimizer.step()\n loss_this_epoch.append(loss.item())\n loss_epochs.append(np.mean(loss_this_epoch))\n if self.save_model_flag:\n torch.save(self.model, self.save_model_path)\n if self.save_loss_fig and epoch % self.save_loss_fig_frequency == 0:\n self.save_figure(epoch, loss_epochs, loss_this_epoch)\n if testset != 0:\n loss_test = self.validate_model(test_datasets, test_labels)\n print(f\"Epoch [{epoch}/{self.n_epochs}], Training Loss: {np.mean(loss_this_epoch):.8f}, \"\n f\"Test Loss: {loss_test:.8f}\")\n return loss_epochs\n\n def predict(self, x):\n '''\n Given the current state and action, predict the next state\n\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) next state numpy array\n '''\n x = np.array(x)\n x = self.pre_process(x)\n x_tensor = self.Variable(torch.FloatTensor(x).unsqueeze(0), volatile=True) # not sure here\n out_tensor = self.model(x_tensor)\n out = out_tensor.cpu().detach().numpy()\n out = self.after_process(out)\n return out\n\n def pre_process(self, x):\n '''\n Pre-process the input data\n :param x: (numpy array) current state and action in one array\n :return: (numpy array) normalized input array\n '''\n x = (x - self.mean_data) / self.std_data\n return x\n\n def after_process(self, x):\n x = x * self.std_label + self.mean_label\n return x\n\n def norm_train_data(self, datas, labels):\n '''\n Normalize the training data and record the data distribution\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '''\n self.mean_data = np.mean(datas, axis=0)\n self.mean_label = np.mean(labels, axis=0)\n self.std_data = np.std(datas, axis=0)\n self.std_label = np.std(labels, axis=0)\n datas = (datas - self.mean_data) / self.std_data\n labels = (labels - self.mean_label) / self.std_label\n return datas, labels\n\n def norm_test_data(self, datas, labels):\n '''\n Normalize the test data\n\n :param datas: (numpy array) input data\n :param labels: (numpy array) the label\n :return: (numpy array) normalized data and label\n '''\n datas = (datas - self.mean_data) / self.std_data\n labels = (labels - self.mean_label) / self.std_label\n return datas, labels\n\n def validate_model(self, datasets, labels):\n '''\n Validate the trained model\n\n :param datasets: (numpy array) input data\n :param labels: (numpy array) corresponding label\n :return: average loss\n '''\n test_dataset = MyDataset(datasets, labels)\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=self.batch_size)\n loss_list = []\n for i, (datas, labels) in enumerate(test_loader):\n datas = self.Variable(torch.FloatTensor(np.float32(datas)))\n labels = self.Variable(torch.FloatTensor(np.float32(labels)))\n outputs = self.model(datas)\n loss = self.criterion(outputs, labels)\n loss_list.append(loss.item())\n loss_avr = np.average(loss_list)\n return loss_avr\n\n def save_figure(self, epoch, loss_epochs,loss_this_epoch):\n '''\n Save the loss figures\n '''\n plt.clf()\n plt.close(\"all\")\n plt.figure(figsize=(12, 5))\n plt.subplot(121)\n plt.title('Loss Trend with %s Epochs' % (epoch))\n plt.plot(loss_epochs)\n plt.subplot(122)\n plt.title('Loss Trend in the latest Epoch')\n plt.plot(loss_this_epoch)\n plt.savefig(\"storage/loss-\" + str(self.exp_number) + \".png\")\n\n def model_validation(self,env, horizon=40, n_sample=200, mpc=[]):\n '''\n Validate the model in the environment\n\n :param env: OpenAI gym style environment\n :param horizon: The prediction horizon\n :param n_sample:\n :param mpc: whether to use the mpc to generate action\n :return: the errors along the horizon\n '''\n n_state = env.observation_space.shape[0]\n errors = np.zeros([n_sample, horizon, n_state])\n for i in range(n_sample):\n state = env.reset()\n state_pred = state.copy()\n state_real = state.copy()\n for j in range(horizon): # predicted results\n if mpc != []:\n action = mpc.act(state_pred, self)\n action = np.array([action])\n else:\n action = env.action_space.sample()\n input_data = np.concatenate((state_pred, action))\n state_dt = self.predict(input_data)\n state_pred = state_pred + state_dt[0]\n state_real, reward, done, info = env.step(action)\n error_tmp = state_real - state_pred\n errors[i, j] = abs(error_tmp)\n errors_mean = np.mean(errors, axis=0)\n errors_max = np.max(errors, axis=0)\n errors_min = np.min(errors, axis=0)\n errors_std = np.min(errors, axis=0)\n return errors_mean, errors_max, errors_min, errors_std\n\n def plot_model_validation(self, env, horizon=40, n_sample=200, mpc=[], mode=\"mean\"):\n ''' Plot the model validation in the simulation environment'''\n if mode == \"mean\":\n errors = self.model_validation(env, horizon, n_sample, mpc)[0]\n elif mode == \"max\":\n errors = self.model_validation(env, horizon, n_sample, mpc)[1]\n elif mode == \"min\":\n errors = self.model_validation(env, horizon, n_sample, mpc)[2]\n elif mode == \"std\":\n errors = self.model_validation(env, horizon, n_sample, mpc)[3]\n else:\n return 0\n plt.close(\"all\")\n plt.ioff()\n plt.figure(figsize=[12, 6])\n plt.title(mode + \" state error between the predictive model and real world along different horizons\")\n plt.xlabel(\"horizon\")\n plt.ylabel(\"error\")\n for i in range(errors.shape[1]):\n plt.plot(errors[:, i], label='state ' + str(i))\n plt.legend()\n plt.savefig(\"storage/model_error_exp_\"+str(self.exp_number)+\".png\")\n plt.show()\n\nclass DatasetFactory(object):\n '''Manage all the dataset'''\n def __init__(self, env, config):\n self.env = env\n dataset_config = config[\"dataset_config\"]\n self.load_flag = dataset_config[\"load_flag\"]\n self.load_path = dataset_config[\"load_path\"]\n self.n_max_steps = dataset_config[\"n_max_steps\"]\n self.n_random_episodes = dataset_config[\"n_random_episodes\"]\n self.testset_split = dataset_config[\"testset_split\"]\n self.n_mpc_episodes = dataset_config[\"n_mpc_episodes\"]\n self.mpc_dataset_split = dataset_config[\"mpc_dataset_split\"]\n self.n_mpc_itrs = dataset_config[\"n_mpc_itrs\"]\n self.save_flag = dataset_config[\"save_flag\"]\n self.save_path = dataset_config[\"save_path\"]\n self.min_train_samples = dataset_config[\"min_train_samples\"]\n self.random_dataset = []\n self.random_trainset = []\n self.random_testset = []\n self.mpc_dataset = []\n self.mpc_dataset_len = 0\n self.trainset = []\n if self.load_flag:\n self.all_dataset = self.load_dataset()\n else:\n self.all_dataset = []\n\n def collect_random_dataset(self):\n '''\n Collect n_random_episodes data (numpy array) with maximum n_max_steps steps per episode\n '''\n datasets = []\n labels = []\n for i in range(self.n_random_episodes):\n data_tmp = []\n label_tmp = []\n state_old = self.env.reset()\n for j in range(self.n_max_steps):\n action = self.env.action_space.sample()\n data_tmp.append(np.concatenate((state_old, action)))\n state_new, reward, done, info = self.env.step(action)\n label_tmp.append(state_new - state_old)\n if done:\n break\n state_old = state_new\n data_tmp = np.array(data_tmp)\n label_tmp = np.array(label_tmp)\n if datasets == []:\n datasets = data_tmp\n else:\n datasets = np.concatenate((datasets, data_tmp))\n if labels == []:\n labels = label_tmp\n else:\n labels = np.concatenate((labels, label_tmp))\n data_and_label = np.concatenate((datasets, labels), axis=1)\n # Merge the data and label into one array and then shuffle\n np.random.shuffle(data_and_label)\n print(\"Collect random dataset shape: \", datasets.shape)\n testset_len = int(datasets.shape[0] * self.testset_split)\n data_len = datasets.shape[1]\n self.random_testset = {\"data\": data_and_label[:testset_len, :data_len],\n \"label\": data_and_label[:testset_len, data_len:]}\n self.random_trainset = {\"data\": data_and_label[testset_len:, :data_len],\n \"label\": data_and_label[testset_len:, data_len:]}\n self.random_dataset = {\"data\": datasets, \"label\": labels}\n self.all_dataset = self.random_dataset\n\n def collect_mpc_dataset(self, mpc, dynamic_model, render = False):\n '''\n Collect reinforced dataset by model predictive control\n\n :param mpc: MPC controller\n :param dynamic_model: System dynamic model\n :param render: Whether render the environment\n :return: list of reward of each episodes\n '''\n datasets = []\n labels = []\n reward_episodes = []\n # n_mpc_episodes = 4: how many episodes data sampled with the MPC controller\n # 使用 MPC 控制器采集了多少集数据\n for i in range(self.n_mpc_episodes):\n data_tmp = []\n label_tmp = []\n reward_episode = 0\n state_old = self.env.reset()\n for j in range(self.n_max_steps):\n # n_max_steps = 500: 如果这么多步之后仍然没有到达 done 状态就停止\n if render:\n self.env.render()\n action = mpc.act(state_old, dynamic_model)\n action = np.array([action])\n data_tmp.append(np.concatenate((state_old, action)))\n state_new, reward, done, info = self.env.step(action)\n reward_episode += reward\n label_tmp.append(state_new - state_old)\n if done:\n break\n state_old = state_new\n data_tmp = np.array(data_tmp)\n label_tmp = np.array(label_tmp)\n if datasets == []:\n datasets = data_tmp\n else:\n datasets = np.concatenate((datasets, data_tmp))\n if labels == []:\n labels = label_tmp\n else:\n labels = np.concatenate((labels, label_tmp))\n reward_episodes.append(reward_episode)\n print(f\"Episode [{i}/{self.n_mpc_episodes}], Reward: {reward_episode:.8f}, Step: [{j}/{self.n_max_steps}]\")\n self.mpc_dataset = {\"data\": datasets, \"label\": labels}\n self.mpc_dataset_len = datasets.shape[0]\n print(\"Totally collect %s data based on MPC\" % self.mpc_dataset_len)\n all_datasets = np.concatenate((datasets, self.all_dataset[\"data\"]))\n all_labels = np.concatenate((labels, self.all_dataset[\"label\"]))\n self.all_dataset = {\"data\": all_datasets, \"label\": all_labels}\n if self.save_flag:\n self.save_datasets(self.all_dataset)\n return reward_episodes\n\n def make_dataset(self):\n '''\n Sample the training dataset from MPC-based data and previous data\n :return: (numpy array) trainingset and testset\n '''\n # calculate how many samples needed from the all datasets\n all_length = max(int(self.mpc_dataset_len / self.mpc_dataset_split), self.min_train_samples)\n sample_length = all_length - self.mpc_dataset_len\n sample_length = min(self.all_dataset[\"data\"].shape[0], sample_length)\n print(\"Sample %s training data from all previous dataset, total training sample: %s\" % (\n sample_length, all_length))\n data_and_label = np.concatenate((self.all_dataset[\"data\"], self.all_dataset[\"label\"]), axis=1)\n # Merge the data and label into one array and then shuffle\n np.random.shuffle(data_and_label)\n testset_len = min(int(all_length * self.testset_split), self.all_dataset[\"data\"].shape[0])\n data_len = self.mpc_dataset[\"data\"].shape[1]\n\n trainset_data = np.concatenate((self.mpc_dataset[\"data\"], data_and_label[:sample_length, :data_len]))\n trainset_label = np.concatenate((self.mpc_dataset[\"label\"], data_and_label[:sample_length, data_len:]))\n testset_data = data_and_label[testset_len:, :data_len]\n testset_label = data_and_label[testset_len:, data_len:]\n trainset = {\"data\": trainset_data, \"label\": trainset_label}\n testset = {\"data\": testset_data, \"label\": testset_label}\n return trainset, testset\n\n def save_datasets(self,data):\n '''Save the collected dataset (dictionary)'''\n print(\"Saving all datas to %s\" % self.save_path)\n with open(self.save_path, 'wb') as f: # open file with write-mode\n pickle.dump(data, f, -1) # serialize and save object\n\n def load_dataset(self):\n '''Load the dataset (dictionary)'''\n print(\"Load datas from %s\" % self.load_path)\n with open(self.load_path, 'rb') as f:\n dataset = pickle.load(f)\n return dataset\n" ]
[ [ "torch.nn.init.uniform_", "torch.load", "torch.nn.ModuleList", "torch.utils.data.DataLoader", "torch.nn.Tanh", "torch.nn.Linear", "torch.save", "torch.FloatTensor", "torch.nn.ReLU", "torch.nn.MSELoss", "torch.autograd.Variable" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
50417/SLGPT
[ "df29217d20d6bd35d7841b90cc92f784da95b0af" ]
[ "code-process/calculatemetrics.py" ]
[ "import networkx as nx\nfrom Graph_Info import Graph_Info\nfrom math import isinf\nimport numpy as np\nimport os\nimport json\ndef calculate_graph_metric( using_json=False,simulink_name=None, adjList=None, source=None, sinks=None, blocks=None):\n import json\n DG = nx.DiGraph()\n gi = Graph_Info(simulink_name)\n if not using_json:\n #Deprecated. TODO : Add this feature\n tmp_output, model_info = extract_system_blk()\n\n for nodes in model_info.blk_info.keys():\n DG.add_node(nodes)\n for blk in model_info.graph.keys():\n\n for tup in model_info.graph[blk]:\n dst_lst = tup[0]\n for dst in dst_lst:\n DG.add_edge(blk, dst)\n else:\n for nodes in blocks:\n DG.add_node(nodes)\n\n for src, dst in adjList.items():\n for d in dst:\n # print(src)\n DG.add_edge(src, d)\n\n # import matplotlib.pyplot as plt\n # nx.draw(DG,with_labels=True, font_weight='bold')\n # plt.show()\n '''\n source =[]\n dst = []\n for node, deg in DG.in_degree():\n if(deg==0):\n source.append(node)\n\n for node, deg in DG.out_degree():\n if(deg==0):\n dst.append(node)\n #print(dst)\n '''\n\n for src in source:\n\n for d in sinks:\n\n for path in nx.all_simple_paths(DG, src, d):\n gi.addpath(path)\n\n # print(path)\n\n UG = DG.to_undirected()\n sub_graphs = nx.connected_components(UG)\n\n for c in sub_graphs:\n gi.add_subgraph(list(c))\n # print(c)\n gi.add_subgraph([])\n gi.subgraphs_size.sort()\n\n return gi # json.dumps(gi.__dict__)\n\ndef run(model_name,train_data,dir,dbfile):\n saveImageIn ='../Experiments/Plots'\n if not os.path.isdir(saveImageIn):\n os.makedirs(saveImageIn)\n ans = []\n file= 'adjList-'+model_name+'From'+train_data+'Compiled.json'\n inputfile = os.path.join(dir,file)\n\n with open(inputfile) as f:\n dict = json.load(f)\n for d in dict:\n #print(d['adjList'])\n ans.append(calculate_graph_metric(True,d['simulink_name'], d['adjList'],d['sources'],d['sinks'],d['blocks']))\n\n\n\n outputfile = 'test.json'\n with open(outputfile, 'w', encoding='utf') as f:\n f.write('[')\n for k in range(len(ans)-1):\n f.write(json.dumps(ans[k].__dict__,indent=4)+\",\\n\")\n f.write(json.dumps(ans[len(ans)-1].__dict__, indent=4) + \"\\n\")\n f.write(']')\n\n\n ans = []\n counter = 0\n no_of_subgraphs = []\n source_destination_path = []\n max_source_destination_path_length = []\n min_source_destination_path_length = []\n max_sub_graph_size = []\n x = []\n with open(outputfile) as f:\n dict = json.load(f)\n\n for d in dict:\n x.append(counter)\n counter += 1\n no_of_subgraphs.append(d['no_of_subgraphs'])\n source_destination_path.append(d['source_destination_path'])\n max_source_destination_path_length.append(d['max_source_destination_path_length'])\n max_sub_graph_size.append(d['max_sub_graph_size'])\n if isinf(d['min_source_destination_path_length']):\n min_source_destination_path_length.append(0)\n else:\n min_source_destination_path_length.append(d['min_source_destination_path_length'])\n #print(d['adjList'])\n #ans.append(processor.calculate_graph_metric(True, d['adjList']))\n no_of_subgraphs.sort()\n source_destination_path.sort()\n max_source_destination_path_length.sort()\n min_source_destination_path_length.sort()\n max_sub_graph_size.sort()\n\n from sqlalchemy import create_engine\n from sqlalchemy.sql import text\n\n\n engine = create_engine('sqlite:///'+dbfile, echo=True) \n blk_count = []\n conn_count = []\n with engine.connect() as con:\n\n statement = text(\"Select SCHK_Block_count from \"+model_name+\"_From_\"+train_data+\"_Metric\") \n st2 = text(\"Select total_connH_cnt from \"+model_name+\"_From_\"+train_data+\"_Metric\")\n\n r2 = con.execute(st2)\n result = con.execute(statement)\n for row in result:\n for key, val in row.items():\n\n blk_count.append(val)\n for row in r2:\n for key, val in row.items():\n conn_count.append(val)\n\n blk_count.sort()\n conn_count.sort()\n import matplotlib.ticker as mtick\n import matplotlib.pyplot as plt\n plt.rcParams.update({'font.size': 18})\n\n plt.plot(x,no_of_subgraphs, color=\"black\")\n plt.xticks(np.linspace(1,len(no_of_subgraphs),5), ('0%', '20%', '40%', '60%', '100%'))\n ax = plt.gca()\n plt.text(0.15, 0.9, 'N='+str(len(x)), ha='center', va='center', transform=ax.transAxes, style='italic' )\n plt.savefig(os.path.join(saveImageIn,model_name+\"_From_\"+train_data+\"_no_of_subgraphs.png\"))\n plt.close()\n\n plt.plot(x,max_sub_graph_size,color=\"black\")\n plt.xticks(np.linspace(1,len(no_of_subgraphs),5), ('0%', '20%', '40%', '60%', '100%'))\n ax = plt.gca()\n plt.text(0.15, 0.9, 'N='+str(len(x)), ha='center', va='center', transform=ax.transAxes, style='italic' )\n plt.savefig(os.path.join(saveImageIn,model_name+\"_From_\"+train_data+\"_max_sub_graph_size.png\"))\n plt.close()\n\n plt.plot(max_source_destination_path_length,color=\"black\")\n plt.xticks(np.linspace(1,len(no_of_subgraphs),5), ('0%', '20%', '40%', '60%', '100%'))\n import matplotlib.ticker as ticker\n ax = plt.gca()\n ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))\n ax = plt.gca()\n plt.text(0.15, 0.9, 'N='+str(len(x)), ha='center', va='center', transform=ax.transAxes, style='italic' )\n plt.savefig(os.path.join(saveImageIn,model_name+\"_From_\"+train_data+\"_max_source_destination_path_length.png\"))\n plt.close()\n\n plt.plot(conn_count,color=\"black\")\n plt.xticks(np.linspace(1,len(conn_count),5), ('0%', '20%', '40%', '60%', '100%'))\n ax = plt.gca()\n plt.text(0.15, 0.9, 'N='+str(len(x)), ha='center', va='center', transform=ax.transAxes, style='italic' )\n plt.savefig(os.path.join(saveImageIn,model_name+\"_From_\"+train_data+\"_conn_count.png\"))\n plt.close()\n\n plt.plot(x,blk_count,color=\"black\")\n plt.xticks(np.linspace(1,len(blk_count),5), ('0%', '20%', '40%', '60%', '100%'))\n ax = plt.gca()\n plt.text(0.15, 0.9, 'N='+str(len(x)), ha='center', va='center', transform=ax.transAxes, style='italic' )\n plt.savefig(os.path.join(saveImageIn,model_name+\"_From_\"+train_data+\"_blk_count.png\"))\n plt.close()\nmodels = [\"DeepFuzzSL\",\"SLGPT\",\"RealWorld\",\"SLforge\"]\ndata_source = [\"RealWorld\",\"SLforge\"]\n\ndir = ''\ndbfile = \"\"\n\nfor data in data_source:\n for model in models:\n try:\n run(model,data,dir,dbfile)\n except Exception as e:\n continue\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.plot", "matplotlib.ticker.MaxNLocator", "matplotlib.pyplot.close", "matplotlib.pyplot.rcParams.update" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JediLuke/rufus
[ "177c6012ecdaeaab42f45f76e478b14b5610c6b3" ]
[ "donkeycar/parts/lidar.py" ]
[ "\"\"\"\nLidar\n\"\"\"\n\nimport time\nimport numpy as np\n\n\nclass Ultrasonic():\n def __init__(self):\n # self.sensor = someThing()\n self.dist = 1000\n\n def run_threaded(self):\n return self.dist\n\n def update(self):\n #self.dist = self.sensor.getReading()\n print(\"uSonic dist \" + str(self.dist))\n self.dist = self.dist - 10\n\n\nclass RPLidar():\n def __init__(self, port='/dev/ttyUSB0'):\n from rplidar import RPLidar\n self.port = port\n self.frame = np.zeros(shape=365)\n self.lidar = RPLidar(self.port)\n self.lidar.clear_input()\n time.sleep(1)\n self.on = True\n\n\n def update(self):\n self.measurements = self.lidar.iter_measurments(500)\n for new_scan, quality, angle, distance in self.measurements:\n angle = int(angle)\n self.frame[angle] = 2*distance/3 + self.frame[angle]/3\n if not self.on: \n break\n \n def run_threaded(self):\n return self.frame" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jurafish/Paddle
[ "15724e745409cf6af3df99ae3eec90511e482cbc" ]
[ "python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_reduce_sum.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons\nfrom program_config import TensorConfig, ProgramConfig\nimport unittest\nimport numpy as np\nimport paddle.inference as paddle_infer\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\n\n\nclass TrtConvertReduceSumTest(TrtLayerAutoScanTest):\n def is_program_valid(self, program_config: ProgramConfig) -> bool:\n inputs = program_config.inputs\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n ## dim should be in (-rank, rank), and not NONE\n rank = len(inputs['input_data'].shape)\n for x in attrs[0][\"dim\"]:\n if x >= rank or x <= -rank:\n return False\n if len(attrs[0][\"dim\"]) == 0:\n return False\n ## skip not use \n if attrs[0][\"out_dtype\"] != -1:\n return False\n\n return True\n\n def sample_program_configs(self):\n def generate_input1(attrs: List[Dict[str, Any]]):\n return np.random.random([1, 3, 64, 64]).astype(np.float32)\n\n for keep_dim in [False, True]:\n for dim in [[], [1], [0], [0, 1], [1, 2, 3], [-2, 0, 3], [-3],\n [-4, 1], [3, 4, 5]]:\n for reduce_all in [False, True]:\n for out_dtype in [-1, 0, 1]:\n dics = [{\n \"keep_dim\": keep_dim,\n \"dim\": dim,\n \"reduce_all\": reduce_all,\n \"out_dtype\": out_dtype\n }, {}]\n\n ops_config = [{\n \"op_type\": \"reduce_sum\",\n \"op_inputs\": {\n \"X\": [\"input_data\"]\n },\n \"op_outputs\": {\n \"Out\": [\"reduce_output_data\"]\n },\n \"op_attrs\": dics[0]\n }]\n ops = self.generate_op_config(ops_config)\n\n program_config = ProgramConfig(\n ops=ops,\n weights={},\n inputs={\n \"input_data\": TensorConfig(data_gen=partial(\n generate_input1, dics))\n },\n outputs=[\"reduce_output_data\"])\n\n if not self.is_program_valid(program_config):\n continue\n\n yield program_config\n\n def sample_predictor_configs(self, program_config):\n def generate_dynamic_shape(attrs):\n self.dynamic_shape.min_input_shape = {\"input_data\": [1, 3, 32, 32]}\n self.dynamic_shape.max_input_shape = {\"input_data\": [4, 3, 64, 64]}\n self.dynamic_shape.opt_input_shape = {\"input_data\": [1, 3, 64, 64]}\n\n def clear_dynamic_shape():\n self.dynamic_shape.min_input_shape = {}\n self.dynamic_shape.max_input_shape = {}\n self.dynamic_shape.opt_input_shape = {}\n\n def generate_trt_nodes_num(attrs, dynamic_shape):\n if dynamic_shape:\n if (not attrs[0]['keep_dim']) and attrs[0]['reduce_all']:\n return 0, 3\n else:\n return 1, 2\n else:\n if 0 in attrs[0]['dim'] or attrs[0]['reduce_all']:\n return 0, 3\n else:\n return 1, 2\n\n attrs = [\n program_config.ops[i].attrs\n for i in range(len(program_config.ops))\n ]\n\n # for static_shape\n clear_dynamic_shape()\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), (1e-5, 1e-5)\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, False), (1e-5, 1e-5)\n\n # for dynamic_shape\n generate_dynamic_shape(attrs)\n self.trt_param.precision = paddle_infer.PrecisionType.Float32\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, True), (1e-5, 1e-5)\n self.trt_param.precision = paddle_infer.PrecisionType.Half\n yield self.create_inference_config(), generate_trt_nodes_num(\n attrs, True), (1e-5, 1e-5)\n\n pass\n\n def add_skip_trt_case(self):\n def teller1(program_config, predictor_config):\n if program_config.ops[0].attrs['out_dtype'] != -1:\n return True\n return False\n\n self.add_skip_case(\n teller1, SkipReasons.TRT_NOT_IMPLEMENTED,\n \"NOT Implemented: we will add out_dtype not equal to -1 in the future\"\n )\n\n pass\n\n def test(self):\n self.add_skip_trt_case()\n self.run_test()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.random.random" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tt8000/Paddle
[ "17b660c0827bb7c2a24d1a17fe18349ad6702513" ]
[ "python/paddle/tests/test_pretrained_model.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport tempfile\nimport shutil\nimport numpy as np\n\nimport paddle\nfrom paddle.static import InputSpec\nimport paddle.vision.models as models\n\n\n# test the predicted resutls of static graph and dynamic graph are equal\n# when used pretrained model\nclass TestPretrainedModel(unittest.TestCase):\n def infer(self, arch):\n path = tempfile.mkdtemp()\n x = np.array(np.random.random((2, 3, 224, 224)), dtype=np.float32)\n res = {}\n for dygraph in [True, False]:\n if not dygraph:\n paddle.enable_static()\n\n net = models.__dict__[arch](pretrained=True)\n inputs = [InputSpec([None, 3, 224, 224], 'float32', 'image')]\n model = paddle.Model(network=net, inputs=inputs)\n model.prepare()\n\n if dygraph:\n model.save(path)\n res['dygraph'] = model.predict_batch(x)\n else:\n model.load(path)\n res['static'] = model.predict_batch(x)\n\n if not dygraph:\n paddle.disable_static()\n\n shutil.rmtree(path)\n np.testing.assert_allclose(res['dygraph'], res['static'])\n\n def test_models(self):\n arches = [\n 'mobilenet_v1', 'mobilenet_v2', 'resnet18', 'vgg16',\n 'shufflenetv2_swish'\n ]\n for arch in arches:\n self.infer(arch)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.random.random", "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
explorer2326/Stance-Detection-Fake-News-Challenge
[ "3eb473aa5d7973cb9901abc1f91f50d58a235200" ]
[ "language_model.py" ]
[ "#%%\r\nimport os\r\nimport sys\r\nsys.path.append(os.getcwd())\r\nimport warnings\r\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\r\nimport gensim \r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nimport numpy as np\r\nimport calculation\r\nfrom dataset import DataSet\r\nfrom gensim import models\r\n\r\ndataset = DataSet()\r\ncorpus = []\r\n#load article body into corpus\r\nfor ID in dataset.articles:\r\n # raw data loading\r\n raw_sentences = dataset.articles[ID]\r\n # tokenization\r\n word_tokens = word_tokenize(raw_sentences)\r\n #stop words removal\r\n stop_words = set(stopwords.words('english'))\r\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\r\n '''\r\n #stemming\r\n ps = PorterStemmer()\r\n stemmed_sentences =[]\r\n for w in filtered_sentence: \r\n stemmed_sentences.append(ps.stem(w))\r\n '''\r\n \r\n corpus.append(filtered_sentence)\r\n\r\n# word2vec for the purpose of vocab construction and word count\r\n\r\nmodel = gensim.models.Word2Vec(corpus, size=100, window=1, min_count=10, workers=4)\r\n#%% \r\n''' \r\n#count total word in collection\r\ncollection_word_count = 0\r\nfor index in range(len(corpus)):\r\n collection_word_count = collection_word_count + len(corpus[index])\r\n \r\nprint(collection_word_count) ... 993904\r\n\r\n#count vocab size\r\nprint(len(model.wv.vocab)) ... 5883\r\n'''\r\ncollection_size = 993904\r\n# P(w|D) with dirichlet smoothing (u = 1000)\r\nu = 1000 \r\n# LM for article body\r\nlm_body = dict.fromkeys([0])\r\nfor ID in dataset.articles:\r\n # raw data loading\r\n raw_sentences = dataset.articles[ID]\r\n # constructing language model with dirichlet smoothing\r\n lm = []\r\n for word, vocab_obj in model.wv.vocab.items():\r\n p = (len(raw_sentences)/(u + len(raw_sentences)))*raw_sentences.count(word)/len(raw_sentences)+(u/(u + len(raw_sentences)))*vocab_obj.count/collection_size\r\n lm.append(p)\r\n \r\n lm_body[ID] = lm\r\nnp.save('data/lm_body.npy', lm_body) \r\n#%% \r\n#LM for headline\r\nlm_headline = dict.fromkeys([0])\r\ncount = 0\r\nfor s in dataset.stances:\r\n \r\n count = count+1\r\n # raw data loading\r\n raw_sentences = s['Headline']\r\n # constructing language model with dirichlet smoothing\r\n lm = []\r\n for word, vocab_obj in model.wv.vocab.items():\r\n p = (len(raw_sentences)/(u + len(raw_sentences)))*raw_sentences.count(word)/len(raw_sentences)+(u/(u + len(raw_sentences)))*vocab_obj.count/collection_size\r\n lm.append(p)\r\n \r\n lm_headline[count] = lm\r\nnp.save('data/lm_headline.npy', lm_headline)\r\n#%%\r\n# calculate KL divergence\r\nlm_kl_divergence = dict.fromkeys([0])\r\ncounter = 0\r\nfor s in dataset.stances:\r\n s['Body ID'] = int(s['Body ID'])\r\n counter = counter+1\r\n kl = calculation.get_kl(lm_headline.get(counter), lm_body.get(s['Body ID']))\r\n lm_kl_divergence[counter] = kl\r\nnp.save('data/lm_kl_divergence.npy', lm_kl_divergence)" ]
[ [ "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yexijoe/tinyms
[ "16ca574886cb0a8f22280fe400928d4f40e3e285" ]
[ "tinyms/vision/transforms.py" ]
[ "# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\nimport numpy as np\nimport tinyms as ts\nfrom PIL import Image\nfrom tinyms.primitives import Softmax\n\nfrom . import _transform_ops\nfrom ._transform_ops import *\nfrom .utils import ssd_bboxes_encode, ssd_bboxes_filter, jaccard_numpy\nfrom ..data import MnistDataset, Cifar10Dataset, ImageFolderDataset, VOCDataset, GeneratorDataset\n\n__all__ = [\n 'mnist_transform', 'MnistTransform',\n 'cifar10_transform', 'Cifar10Transform',\n 'imagefolder_transform', 'ImageFolderTransform',\n 'voc_transform', 'VOCTransform',\n 'cyclegan_transform', 'CycleGanDatasetTransform',\n]\n__all__.extend(_transform_ops.__all__)\n\n\nclass DatasetTransform():\n r'''\n Base class for all dataset transforms.\n '''\n\n def __init__(self, labels=None):\n self.labels = labels\n self.transform_strategy = ['TOP1_CLASS', 'TOP5_CLASS']\n\n def apply_ds(self, ds, trans_func=None, repeat_size=1, batch_size=32,\n num_parallel_workers=None):\n if not isinstance(trans_func, list):\n raise TypeError('trans_func must be list')\n\n # apply map operations on datasets\n ds = ds.map(operations=TypeCast(ts.int32), input_columns=\"label\",\n num_parallel_workers=num_parallel_workers)\n ds = ds.map(operations=trans_func, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n # apply batch operations\n ds = ds.batch(batch_size, drop_remainder=True)\n # apply repeat operations\n ds = ds.repeat(repeat_size)\n\n return ds\n\n def postprocess(self, input, strategy='TOP1_CLASS'):\n r'''\n Apply postprocess operation for prediction result.\n\n Args:\n input (numpy.ndarray): Prediction result.\n strategy (str): Specifies the postprocess strategy. Default: TOP1_CLASS.\n\n Returns:\n str, the postprocess result.\n '''\n if not isinstance(input, np.ndarray):\n raise TypeError(\"Input should be NumPy, got {}.\".format(type(input)))\n if not input.ndim == 2:\n raise TypeError(\"Input should be 2-D Numpy, got {}.\".format(input.ndim))\n if strategy not in self.transform_strategy:\n raise ValueError(\"Strategy should be one of {}, got {}.\".format(self.transform_strategy, strategy))\n\n softmax = Softmax()\n score_list = softmax(ts.array(input)).asnumpy()\n if strategy == 'TOP1_CLASS':\n score = max(score_list[0])\n return ('TOP1: ' + str(self.labels[input[0].argmax()]) + ', score: ' + str(format(score, '.20f')))\n else:\n label_index = np.argsort(input[0])[::-1]\n score_index = np.sort(score_list[0])[::-1]\n top5_labels = []\n res = ''\n top5_scores = score_index[:5].tolist()\n for i in range(5):\n top5_labels.append(self.labels[label_index[i]])\n res += 'TOP' + str(i+1) + \": \" + str(top5_labels[i]) + \\\n \", score: \" + str(format(top5_scores[i], '.20f')) + '\\n'\n return res\n\n\nclass MnistTransform(DatasetTransform):\n r'''\n Mnist dataset transform class.\n\n Inputs:\n img (Union[numpy.ndarray, PIL.Image]): Image to be transformed in Mnist-style.\n\n Outputs:\n numpy.ndarray, transformed image.\n\n Examples:\n >>> from PIL import Image\n >>> from tinyms.vision import MnistTransform\n >>>\n >>> mnist_transform = MnistTransform()\n >>> img = Image.open('example.jpg')\n >>> img = mnist_transform(img)\n '''\n\n def __init__(self):\n labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n super().__init__(labels=labels)\n self.grayscale = Grayscale()\n self.resize = Resize((32, 32))\n self.normalize = Rescale(1 / 0.3081, -1 * 0.1307 / 0.3081)\n self.rescale = Rescale(1.0 / 255.0, 0.0)\n\n def __call__(self, img):\n if not isinstance(img, (np.ndarray, Image.Image)):\n raise TypeError(\"Input type should be numpy.ndarray or PIL.Image, got {}.\".format(type(img)))\n if isinstance(img, np.ndarray):\n img = Image.fromarray(img, mode='RGB')\n img = np.asarray(self.grayscale(img), dtype=np.float32)\n img = np.expand_dims(img, 2)\n img = self.resize(img)\n img = self.normalize(img)\n img = self.rescale(img)\n img = hwc2chw(img)\n\n return img\n\n def apply_ds(self, mnist_ds, repeat_size=1, batch_size=32, num_parallel_workers=None):\n r'''\n Apply preprocess operation on MnistDataset instance.\n\n Args:\n mnist_ds (data.MnistDataset): MnistDataset instance.\n repeat_size (int): The repeat size of dataset. Default: 1.\n batch_size (int): Batch size. Default: 32.\n num_parallel_workers (int): The number of concurrent workers. Default: None.\n\n Returns:\n data.MnistDataset, the preprocessed MnistDataset instance.\n\n Examples:\n >>> from tinyms.vision import MnistTransform\n >>>\n >>> mnist_transform = MnistTransform()\n >>> mnist_ds = mnist_transform.apply_ds(mnist_ds)\n '''\n if not isinstance(mnist_ds, MnistDataset):\n raise TypeError(\"Input type should be MnistDataset, got {}.\".format(type(mnist_ds)))\n\n trans_func = [self.resize, self.normalize, self.rescale, hwc2chw]\n # apply transform functions on mnist dataset\n mnist_ds = super().apply_ds(mnist_ds, trans_func=trans_func, repeat_size=repeat_size,\n batch_size=batch_size, num_parallel_workers=num_parallel_workers)\n\n return mnist_ds\n\n\nclass Cifar10Transform(DatasetTransform):\n r'''\n Cifar10 dataset transform class.\n\n Inputs:\n img (Union[numpy.ndarray, PIL.Image]): Image to be transformed in Cifar10-style.\n\n Outputs:\n numpy.ndarray, Transformed image.\n\n Examples:\n >>> from PIL import Image\n >>> from tinyms.vision import Cifar10Transform\n >>>\n >>> cifar10_transform = Cifar10Transform()\n >>> img = Image.open('example.jpg')\n >>> img = cifar10_transform(img)\n \"\"\"\n '''\n\n def __init__(self):\n labels = ['airplane', 'automobile', 'bird', 'cat', 'deer',\n 'dog', 'frog', 'horse', 'ship', 'truck']\n super().__init__(labels=labels)\n self.random_crop = RandomCrop((32, 32), (4, 4, 4, 4))\n self.random_horizontal_flip = RandomHorizontalFlip(prob=0.5)\n self.resize = Resize((224, 224))\n self.rescale = Rescale(1.0 / 255.0, 0.0)\n self.normalize = Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])\n\n def __call__(self, img):\n if not isinstance(img, (np.ndarray, Image.Image)):\n raise TypeError(\"Input type should be numpy.ndarray or PIL.Image, got {}.\".format(type(img)))\n img = self.resize(img)\n img = self.rescale(img)\n img = self.normalize(img)\n img = hwc2chw(img)\n\n return img\n\n def apply_ds(self, cifar10_ds, repeat_size=1, batch_size=32,\n num_parallel_workers=None, is_training=True):\n r'''\n Apply preprocess operation on Cifar10Dataset instance.\n\n Args:\n cifar10_ds (data.Cifar10Dataset): Cifar10Dataset instance.\n repeat_size (int): The repeat size of dataset. Default: 1.\n batch_size (int): Batch size. Default: 32.\n num_parallel_workers (int): The number of concurrent workers. Default: None.\n is_training (bool): Specifies if is in training step. Default: True.\n\n Returns:\n data.Cifar10Dataset, the preprocessed Cifar10Dataset instance.\n\n Examples:\n >>> from tinyms.vision import Cifar10Transform\n >>>\n >>> cifar10_transform = Cifar10Transform()\n >>> cifar10_ds = cifar10_transform.apply_ds(cifar10_ds)\n '''\n if not isinstance(cifar10_ds, Cifar10Dataset):\n raise TypeError(\"Input type should be Cifar10Dataset, got {}.\".format(type(cifar10_ds)))\n\n trans_func = []\n if is_training:\n trans_func += [self.random_crop, self.random_horizontal_flip]\n trans_func += [self.resize, self.rescale, self.normalize, hwc2chw]\n # apply transform functions on cifar10 dataset\n cifar10_ds = super().apply_ds(cifar10_ds, trans_func=trans_func, repeat_size=repeat_size,\n batch_size=batch_size, num_parallel_workers=num_parallel_workers)\n\n return cifar10_ds\n\n\nclass ImageFolderTransform(DatasetTransform):\n r'''\n ImageFolder dataset transform class.\n\n Inputs:\n img(Union[numpy.ndarray, PIL.Image]): Image to be transformed in ImageFolder-style.\n\n Outputs:\n numpy.ndarray, transformed image.\n\n Examples:\n >>> from PIL import Image\n >>> from tinyms.vision import ImageFolderTransform\n >>>\n >>> imagefolder_transform = ImageFolderTransform()\n >>> img = Image.open('example.jpg')\n >>> img = imagefolder_transform(img)\n '''\n\n def __init__(self):\n labels = [\"Agaricus双孢蘑菇,伞菌目,蘑菇科,蘑菇属,广泛分布于北半球温带,无毒\",\n \"Amanita毒蝇伞,伞菌目,鹅膏菌科,鹅膏菌属,主要分布于我国黑龙江、吉林、四川、西藏、云南等地,有毒\",\n \"Boletus丽柄牛肝菌,伞菌目,牛肝菌科,牛肝菌属,分布于云南、陕西、甘肃、西藏等地,有毒\",\n \"Cortinarius掷丝膜菌,伞菌目,丝膜菌科,丝膜菌属,分布于湖南等地(夏秋季在山毛等阔叶林地上生长)\",\n \"Entoloma霍氏粉褶菌,伞菌目,粉褶菌科,粉褶菌属,主要分布于新西兰北岛和南岛西部,有毒\",\n \"Hygrocybe浅黄褐湿伞,伞菌目,蜡伞科,湿伞属,分布于香港(见于松仔园),有毒\",\n \"Lactarius松乳菇,红菇目,红菇科,乳菇属,广泛分布于亚热带松林地,无毒\",\n \"Russula褪色红菇,伞菌目,红菇科,红菇属,分布于河北、吉林、四川、江苏、西藏等地,无毒\",\n \"Suillus乳牛肝菌,牛肝菌目,乳牛肝菌科,乳牛肝菌属,分布于吉林、辽宁、山西、安徽、江西、浙江、湖南、四川、贵州等地,无毒\",\n ]\n super().__init__(labels=labels)\n self.random_crop_decode_resize = RandomCropDecodeResize((224, 224), scale=(0.08, 1.0), ratio=(0.75, 1.333))\n self.random_horizontal_flip = RandomHorizontalFlip(prob=0.5)\n self.resize = Resize((256, 256))\n self.center_crop = CenterCrop((224, 224))\n self.normalize = Normalize([0.485 * 255, 0.456 * 255, 0.406 * 255],\n [0.229 * 255, 0.224 * 255, 0.225 * 255])\n\n def _center_crop(self, img):\n y, x, _ = img.shape\n startx = x // 2 - (224 // 2)\n starty = y // 2 - (224 // 2)\n return img[starty:starty + 224, startx:startx + 224, :]\n\n def __call__(self, img):\n if not isinstance(img, (np.ndarray, Image.Image)):\n raise TypeError(\"Input type should be numpy.ndarray or PIL.Image, got {}.\".format(type(img)))\n img = self.resize(img)\n img = self._center_crop(img)\n img = self.normalize(img)\n img = hwc2chw(img)\n\n return img\n\n def apply_ds(self, imagefolder_ds, repeat_size=1, batch_size=32,\n num_parallel_workers=None, is_training=True):\n r'''\n Apply preprocess operation on ImageFolderDataset instance.\n\n Args:\n cifar10_ds (data.ImageFolderDataset): ImageFolderDataset instance.\n repeat_size (int): The repeat size of dataset. Default: 1.\n batch_size (int): Batch size. Default: 32.\n num_parallel_workers (int): The number of concurrent workers. Default: None.\n is_training (bool): Specifies if is in training step. Default: True.\n\n Returns:\n data.ImageFolderDataset, the preprocessed ImageFolderDataset instance.\n\n Examples:\n >>> from tinyms.vision import ImageFolderTransform\n >>>\n >>> imagefolder_transform = ImageFolderTransform()\n >>> imagefolder_ds = imagefolder_transform.apply_ds(imagefolder_ds)\n '''\n if not isinstance(imagefolder_ds, ImageFolderDataset):\n raise TypeError(\"Input type should be ImageFolderDataset, got {}.\".format(type(imagefolder_ds)))\n\n if is_training:\n trans_func = [self.random_crop_decode_resize, self.random_horizontal_flip]\n else:\n trans_func = [decode, self.resize, self.center_crop]\n trans_func += [self.normalize, hwc2chw]\n # apply transform functions on imagefolder dataset\n imagefolder_ds = super().apply_ds(imagefolder_ds, trans_func=trans_func, repeat_size=repeat_size,\n batch_size=batch_size, num_parallel_workers=num_parallel_workers)\n\n return imagefolder_ds\n\n\ndef _rand(a=0., b=1.):\n \"\"\"Generate random.\"\"\"\n return np.random.rand() * (b - a) + a\n\n\nclass VOCTransform(DatasetTransform):\n r'''\n VOC dataset transform class.\n\n Inputs:\n img(Union[numpy.ndarray, PIL.Image]): Image to be transformed in VOC-style.\n\n Outputs:\n numpy.ndarray, transformed image.\n\n Examples:\n >>> from PIL import Image\n >>> from tinyms.vision import VOCTransform\n >>>\n >>> voc_transform = VOCTransform()\n >>> img = Image.open('example.jpg')\n >>> img = voc_transform(img)\n '''\n\n def __init__(self):\n labels = ['background',\n 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n super().__init__(labels=labels)\n self.resize = Resize((300, 300))\n self.horizontal_flip = PILRandomHorizontalFlip(1.0)\n self.normalize = Normalize(mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],\n std=[0.229 * 255, 0.224 * 255, 0.225 * 255])\n self.random_color_adjust = RandomColorAdjust(brightness=0.4, contrast=0.4, saturation=0.4)\n\n def _preprocess_fn(self, image, boxes, labels, is_training=True):\n \"\"\"Preprocess function for voc dataset.\"\"\"\n def _random_sample_crop(image, boxes):\n \"\"\"Random Crop the image and boxes\"\"\"\n height, width, _ = image.shape\n min_iou = np.random.choice([None, 0.1, 0.3, 0.5, 0.7, 0.9])\n if min_iou is None:\n return image, boxes\n # max trails (50)\n for _ in range(50):\n image_t = image\n w = _rand(0.3, 1.0) * width\n h = _rand(0.3, 1.0) * height\n # aspect ratio constraint b/t .5 & 2\n if h / w < 0.5 or h / w > 2:\n continue\n left = _rand() * (width - w)\n top = _rand() * (height - h)\n rect = np.array([int(top), int(left), int(top + h), int(left + w)])\n overlap = jaccard_numpy(boxes, rect)\n # dropout some boxes\n drop_mask = overlap > 0\n if not drop_mask.any():\n continue\n if overlap[drop_mask].min() < min_iou and overlap[drop_mask].max() > (min_iou + 0.2):\n continue\n image_t = image_t[rect[0]:rect[2], rect[1]:rect[3], :]\n centers = (boxes[:, :2] + boxes[:, 2:4]) / 2.0\n m1 = (rect[0] < centers[:, 0]) * (rect[1] < centers[:, 1])\n m2 = (rect[2] > centers[:, 0]) * (rect[3] > centers[:, 1])\n # mask in that both m1 and m2 are true\n mask = m1 * m2 * drop_mask\n # have any valid boxes? try again if not\n if not mask.any():\n continue\n # take only matching gt boxes\n boxes_t = boxes[mask, :].copy()\n boxes_t[:, :2] = np.maximum(boxes_t[:, :2], rect[:2])\n boxes_t[:, :2] -= rect[:2]\n boxes_t[:, 2:4] = np.minimum(boxes_t[:, 2:4], rect[2:4])\n boxes_t[:, 2:4] -= rect[:2]\n return image_t, boxes_t\n return image, boxes\n\n # Only perform resize operation of data evaluation step\n if not is_training:\n img_h, img_w, _ = image.shape\n image = self.resize(image)\n return image, np.array((img_h, img_w), dtype=np.float32), labels\n # Merge [x, y, w, h] and cls to [x, y, w, h, cls]\n boxes = np.hstack((boxes, labels)).astype(np.float32)\n # Change [x, y, w, h, cls] to [ymin, xmin, ymax, xmax, cls]\n boxes_yxyx = np.zeros_like(boxes)\n boxes_yxyx[:, 4] = boxes[:, 4]\n boxes_yxyx[:, [1, 0]] = boxes[:, [0, 1]]\n boxes_yxyx[:, [3, 2]] = boxes[:, [0, 1]] + boxes[:, [2, 3]]\n # Random crop image and bbox\n image, boxes_yxyx = _random_sample_crop(image, boxes_yxyx)\n # Resize image and bbox\n ih, iw, _ = image.shape\n image = self.resize(image)\n boxes_yxyx[:, [0, 2]] = boxes_yxyx[:, [0, 2]] / ih\n boxes_yxyx[:, [1, 3]] = boxes_yxyx[:, [1, 3]] / iw\n # Flip image and bbox or not\n flip = _rand() < .5\n if flip:\n image = np.asarray(self.horizontal_flip(Image.fromarray(image, mode='RGB')))\n boxes_yxyx[:, [1, 3]] = 1 - boxes_yxyx[:, [3, 1]]\n # When the channels of image is 1\n if len(image.shape) == 2:\n image = np.expand_dims(image, axis=-1)\n image = np.concatenate([image, image, image], axis=-1)\n\n boxes_yxyx, label, num_match = ssd_bboxes_encode(boxes_yxyx)\n return image, boxes_yxyx, label, num_match\n\n def __call__(self, img):\n if not isinstance(img, (np.ndarray, Image.Image)):\n raise TypeError(\"Input type should be numpy.ndarray or PIL.Image, got {}.\".format(type(img)))\n img = self.resize(img)\n img = self.normalize(img)\n img = hwc2chw(img)\n\n return img\n\n def apply_ds(self, voc_ds, repeat_size=1, batch_size=32,\n num_parallel_workers=None, is_training=True):\n r'''\n Apply preprocess operation on VOCDataset instance.\n\n Args:\n cifar10_ds (data.VOCDataset): VOCDataset instance.\n repeat_size (int): The repeat size of dataset. Default: 1.\n batch_size (int): Batch size. Default: 32.\n num_parallel_workers (int): The number of concurrent workers. Default: None.\n is_training (bool): Specifies if is in training step. Default: True.\n\n Returns:\n data.VOCDataset, the preprocessed VOCDataset instance.\n\n Examples:\n >>> from tinyms.vision import VOCTransform\n >>>\n >>> VOC_transform = VOCTransform()\n >>> voc_ds = voc_transform.apply_ds(voc_ds)\n '''\n if not isinstance(voc_ds, VOCDataset):\n raise TypeError(\"Input type should be VOCDataset, got {}.\".format(type(voc_ds)))\n\n compose_map_func = (lambda image, boxes, labels: self._preprocess_fn(image, boxes, labels, is_training))\n if is_training:\n output_columns = [\"image\", \"bbox\", \"label\", \"num_match\"]\n trans_func = [self.random_color_adjust, self.normalize, hwc2chw]\n else:\n output_columns = [\"image\", \"image_shape\", \"label\"]\n trans_func = [self.normalize, hwc2chw]\n # apply transform functions on voc dataset\n voc_ds = voc_ds.map(operations=compose_map_func,\n input_columns=[\"image\", \"bbox\", \"label\"],\n output_columns=output_columns,\n column_order=output_columns,\n num_parallel_workers=num_parallel_workers)\n voc_ds = super().apply_ds(voc_ds, trans_func=trans_func, repeat_size=repeat_size,\n batch_size=batch_size, num_parallel_workers=num_parallel_workers)\n\n return voc_ds\n\n def postprocess(self, input, image_shape, strategy='TOP1_CLASS'):\n r'''\n Apply postprocess operation for prediction result.\n\n Args:\n input (numpy.ndarray): Prediction result.\n image_shape (tuple): Image shape.\n strategy (str): Specifies the postprocess strategy. Default: TOP1_CLASS.\n\n Returns:\n dict, the postprocess result.\n '''\n if not isinstance(input, np.ndarray):\n raise TypeError(\"Input type should be numpy.ndarray, got {}.\".format(type(input)))\n if not input.ndim == 3:\n raise TypeError(\"Input should be 3-D Numpy, got {}.\".format(input.ndim))\n if not strategy == 'TOP1_CLASS':\n raise ValueError(\"Currently VOC transform only supports 'TOP1_CLASS' strategy!\")\n\n pred_res = []\n pred_loc, pred_cls, pred_label = ssd_bboxes_filter(input[0, :, :4], input[0, :, 4:], image_shape)\n for loc, score, label in zip(pred_loc, pred_cls, pred_label):\n pred_res.append({\n 'bbox': [loc[1], loc[0], loc[3] - loc[1], loc[2] - loc[0]],\n 'score': score,\n 'category_id': self.labels[label],\n })\n\n return pred_res\n\n\nclass CycleGanDatasetTransform():\n r'''\n CycleGan dataset transform class.\n\n Inputs:\n img(Union[numpy.ndarray, PIL.Image]): Image to be transformed in city_scape.\n\n Outputs:\n numpy.ndarray, transformed image.\n\n Examples:\n >>> from PIL import Image\n >>> from tinyms.vision import CycleGanDatasetTransform\n >>>\n >>> cyclegan_transform = CycleGanDatasetTransform()\n >>> img = Image.open('example.jpg')\n >>> img = cyclegan_transform(img)\n '''\n\n def __init__(self):\n self.random_resized_crop = RandomResizedCrop(256, scale=(0.5, 1.0), ratio=(0.75, 1.333))\n self.random_horizontal_flip = RandomHorizontalFlip(prob=0.5)\n self.resize = Resize((256, 256))\n self.normalize = Normalize(mean=[0.5 * 255] * 3, std=[0.5 * 255] * 3)\n\n def __call__(self, img):\n if not isinstance(img, (np.ndarray, Image.Image)):\n raise TypeError(\"Input type should be numpy.ndarray or PIL.Image, got {}.\".format(type(img)))\n img = self.resize(img)\n img = self.normalize(img)\n img = hwc2chw(img)\n\n return img\n\n def apply_ds(self, gan_generator_ds, repeat_size=1, batch_size=1,\n num_parallel_workers=1, shuffle=True, phase='train'):\n r'''\n Apply preprocess operation on GeneratorDataset instance.\n\n Args:\n gan_generator_ds (data.GeneratorDataset): GeneratorDataset instance.\n repeat_size (int): The repeat size of dataset. Default: 1.\n batch_size (int): Batch size. Default: 32.\n num_parallel_workers (int): The number of concurrent workers. Default: 1.\n shuffle (bool): Specifies if applying shuffle operation. Default: True.\n phase (str): Specifies the current phase. Default: train.\n\n Returns:\n data.GeneratorDataset, the preprocessed GeneratorDataset instance.\n\n Examples:\n >>> from tinyms.vision import CycleGanDatasetTransform\n >>>\n >>> cyclegan_transform = CycleGanDatasetTransform()\n >>> gan_generator_ds = cyclegan_transform.apply_ds(gan_generator_ds)\n\n Raises:\n TypeError: If `gan_generator_ds` is not instance of GeneratorDataset.\n '''\n if not isinstance(gan_generator_ds, GeneratorDataset):\n raise TypeError(\"Input type should be GeneratorDataset, got {}.\".format(type(gan_generator_ds)))\n\n trans_func = []\n if phase == 'train':\n if shuffle:\n trans_func += [self.random_resized_crop, self.random_horizontal_flip, self.normalize, hwc2chw]\n else:\n trans_func += [self.resize, self.normalize, hwc2chw]\n\n # apply transform functions on gan_generator_ds dataset\n gan_generator_ds = gan_generator_ds.map(operations=trans_func,\n input_columns=[\"image_A\"],\n num_parallel_workers=num_parallel_workers)\n gan_generator_ds = gan_generator_ds.map(operations=trans_func,\n input_columns=[\"image_B\"],\n num_parallel_workers=num_parallel_workers)\n else:\n trans_func += [self.resize, self.normalize, hwc2chw]\n gan_generator_ds = gan_generator_ds.map(operations=trans_func,\n input_columns=[\"image\"],\n num_parallel_workers=num_parallel_workers)\n gan_generator_ds = gan_generator_ds.batch(batch_size, drop_remainder=True)\n gan_generator_ds = gan_generator_ds.repeat(repeat_size)\n return gan_generator_ds\n\n\nmnist_transform = MnistTransform()\ncifar10_transform = Cifar10Transform()\nimagefolder_transform = ImageFolderTransform()\nvoc_transform = VOCTransform()\ncyclegan_transform = CycleGanDatasetTransform()\n" ]
[ [ "numpy.hstack", "numpy.expand_dims", "numpy.maximum", "numpy.minimum", "numpy.random.choice", "numpy.sort", "numpy.concatenate", "numpy.zeros_like", "numpy.random.rand", "numpy.argsort", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NicolasDurrande/GPflow
[ "ba8b7a58bb5f695dc48242a31c949ee23148e555" ]
[ "gpflow/mean_functions.py" ]
[ "# Copyright 2016 James Hensman, alexggmatthews, PabloLeon, Valentine Svensson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom . import settings\nfrom .params import Parameter\nfrom .params import Parameterized\nfrom .params import ParamList\nfrom .decors import params_as_tensors\n\n\nclass MeanFunction(Parameterized):\n \"\"\"\n The base mean function class.\n To implement a mean function, write the __call__ method. This takes a\n tensor X and returns a tensor m(X). In accordance with the GPflow\n standard, each row of X represents one datum, and each row of Y is computed\n independently for each row of X.\n\n MeanFunction classes can have parameters, see the Linear class for an\n example.\n \"\"\"\n def __call__(self, X):\n raise NotImplementedError(\"Implement the __call__ method for this mean function\")\n\n def __add__(self, other):\n return Additive(self, other)\n\n def __mul__(self, other):\n return Product(self, other)\n\n\nclass Linear(MeanFunction):\n \"\"\"\n y_i = A x_i + b\n \"\"\"\n def __init__(self, A=None, b=None):\n \"\"\"\n A is a matrix which maps each element of X to Y, b is an additive\n constant.\n\n If X has N rows and D columns, and Y is intended to have Q columns,\n then A must be D x Q, b must be a vector of length Q.\n \"\"\"\n A = np.ones((1, 1)) if A is None else A\n b = np.zeros(1) if b is None else b\n MeanFunction.__init__(self)\n self.A = Parameter(np.atleast_2d(A), dtype=settings.float_type)\n self.b = Parameter(b, dtype=settings.float_type)\n\n @params_as_tensors\n def __call__(self, X):\n return tf.matmul(X, self.A) + self.b\n\n\nclass Identity(Linear):\n \"\"\"\n y_i = x_i\n \"\"\"\n def __init__(self, input_dim=None):\n Linear.__init__(self)\n self.input_dim = input_dim\n\n def __call__(self, X):\n return X\n\n @property\n def A(self):\n if self.input_dim is None:\n raise ValueError(\"An input_dim needs to be specified when using the \"\n \"`Identity` mean function in combination with expectations.\")\n\n return tf.eye(self.input_dim, dtype=settings.float_type)\n\n @property\n def b(self):\n if self.input_dim is None:\n raise ValueError(\"An input_dim needs to be specified when using the \"\n \"`Identity` mean function in combination with expectations.\")\n\n return tf.zeros(self.input_dim, dtype=settings.float_type)\n\n @A.setter\n def A(self, A):\n pass\n\n @b.setter\n def b(self, b):\n pass\n\n\nclass Constant(MeanFunction):\n \"\"\"\n y_i = c,,\n \"\"\"\n def __init__(self, c=None):\n MeanFunction.__init__(self)\n c = np.zeros(1) if c is None else c\n c = np.reshape(c, (1, -1))\n self.c = Parameter(c)\n\n @params_as_tensors\n def __call__(self, X):\n shape = tf.stack([tf.shape(X)[0], 1])\n return tf.tile(self.c, shape)\n\n\nclass Zero(Constant):\n def __init__(self, output_dim=1):\n Constant.__init__(self)\n self.output_dim = output_dim\n del self.c\n\n def __call__(self, X):\n shape = tf.concat([tf.shape(X)[:-1], [self.output_dim]], 0)\n return tf.zeros(shape, dtype=settings.float_type)\n\n\nclass SwitchedMeanFunction(MeanFunction):\n \"\"\"\n This class enables to use different (independent) mean_functions respective\n to the data 'label'.\n We assume the 'label' is stored in the extra column of X.\n \"\"\"\n def __init__(self, meanfunction_list):\n MeanFunction.__init__(self)\n for m in meanfunction_list:\n assert isinstance(m, MeanFunction)\n self.meanfunction_list = ParamList(meanfunction_list)\n\n @params_as_tensors\n def __call__(self, X):\n ind = tf.gather(tf.transpose(X), tf.shape(X)[1]-1) # ind = X[:,-1]\n ind = tf.cast(ind, tf.int32)\n X = tf.transpose(tf.gather(tf.transpose(X), tf.range(0, tf.shape(X)[1]-1))) # X = X[:,:-1]\n\n # split up X into chunks corresponding to the relevant likelihoods\n x_list = tf.dynamic_partition(X, ind, len(self.meanfunction_list))\n # apply the likelihood-function to each section of the data\n results = [m(x) for x, m in zip(x_list, self.meanfunction_list)]\n # stitch the results back together\n partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, len(self.meanfunction_list))\n return tf.dynamic_stitch(partitions, results)\n\n\nclass Additive(MeanFunction):\n def __init__(self, first_part, second_part):\n MeanFunction.__init__(self)\n self.add_1 = first_part\n self.add_2 = second_part\n\n def __call__(self, X):\n return tf.add(self.add_1(X), self.add_2(X))\n\n\nclass Product(MeanFunction):\n def __init__(self, first_part, second_part):\n MeanFunction.__init__(self)\n\n self.prod_1 = first_part\n self.prod_2 = second_part\n\n def __call__(self, X):\n return tf.multiply(self.prod_1(X), self.prod_2(X))\n" ]
[ [ "tensorflow.matmul", "tensorflow.transpose", "tensorflow.zeros", "numpy.reshape", "tensorflow.shape", "tensorflow.cast", "tensorflow.size", "tensorflow.eye", "numpy.ones", "tensorflow.dynamic_stitch", "numpy.atleast_2d", "numpy.zeros", "tensorflow.tile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tunisij/mask-rcnn
[ "2e80edf45613dfccd5ae47110c16dc006c6fac67" ]
[ "mrcnn/visualize.py" ]
[ "\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport itertools\nimport colorsys\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None,\n interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interpolation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap,\n norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, masks, class_ids, class_names,\n scores=None, title=\"\",\n figsize=(16, 16), ax=None,\n show_mask=True, show_bbox=True,\n colors=None, captions=None):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n # auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef display_differences(image,\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n class_names, title=\"\", ax=None,\n show_mask=True, show_box=True,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, masks, class_ids,\n class_names, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n edgecolor=color if class_id else \"gray\",\n facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id]\n [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\n# TODO: Replace with matplotlib equivalent?\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])\n for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,\n overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)),\n [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)),\n [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]),\n range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh\n else \"black\" if overlaps[i, j] > 0\n else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text),\n horizontalalignment=\"center\", verticalalignment=\"center\",\n fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None,\n masks=None, captions=None, visibilities=None,\n title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with different\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominent each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=alpha, linestyle=style,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5,\n 'pad': 2, 'edgecolor': 'none'})\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.concatenate", "numpy.any", "numpy.where", "matplotlib.patches.Polygon", "matplotlib.pyplot.tight_layout", "numpy.unique", "numpy.fliplr", "numpy.arange", "matplotlib.pyplot.subplot", "matplotlib.pyplot.axis", "numpy.zeros", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.random.choice", "matplotlib.patches.Rectangle", "numpy.random.rand", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.lines.Line2D", "matplotlib.pyplot.subplots", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Blackdevil132/machineLearning
[ "de048bb1473994052f8ed1afb11a15b7833b506d" ]
[ "src/old/QtableEnemy.py" ]
[ "import numpy as np\nfrom src.qrl.Qtable import Qtable\n\n\n# Qtable for 2-dim storing\nclass QtableEnemy(Qtable):\n def __init__(self, action_space, observation_space_1, observation_space_2):\n Qtable.__init__(self)\n self.action_space = action_space\n self.observation_space = (observation_space_1, observation_space_2)\n\n self.table = [{j: np.zeros(action_space) for j in range(observation_space_2)} for i in range(observation_space_1)]\n for i in range(self.observation_space[0]):\n self.table[i][255] = np.zeros(action_space)\n\n def get(self, state, action=None):\n if action is None:\n return self.table[state[0]][state[1]][:]\n\n return self.table[state[0]][state[1]][action]\n\n def update(self, state, action, newValue):\n self.table[state[0]][state[1]][action] = newValue\n\n def show(self):\n for dim1 in range(self.observation_space[0]):\n print(\"%i \" % dim1, end='')\n for key in self.table[dim1].keys():\n print(\"\\t%i: \" % key, end='')\n for action in self.table[dim1][key]:\n print(\"\\t%.3f, \" % action, end='')\n print()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stu314159/HPC_Introduction_with_LBM
[ "cbba81460513166b4814f3028807020be9b5c234" ]
[ "python/tlbm/prolate_spheroid/tLBM_partition.py" ]
[ "#!/usr/bin/env python3\n##!/home/users/sblair/anaconda2/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 26 14:23:52 2017\n\n@author: stu\n\"\"\"\n\nimport sys\nsys.path.insert(1,'.')\n\nimport pyPartition as pp\n#from pymetis import part_graph #<-- requires that the PrgEnv-intel module be selected\nimport numpy as np\nimport scipy.io\nimport math\nimport argparse\n\nparser = argparse.ArgumentParser(prog='pyNFC_partition.py',\n description='lattice partitioning script for pyNFC')\n\nparser.add_argument('geom_filename',type=str)\nparser.add_argument('lattice_type',type=str)\nparser.add_argument('partition_style',type=str)\nparser.add_argument('numProcs',type=int)\n\n# parse input arguments\nargs = parser.parse_args()\n\n# assign to required variables\ngeom_filename = args.geom_filename\nlattice_type = args.lattice_type\npartition_style = args.partition_style\nnumProcs = args.numProcs\n\ngeom_input = scipy.io.loadmat(geom_filename)\n# overall domain dimensions\nLx_p = float(geom_input['Lx_p'])\nLy_p = float(geom_input['Ly_p'])\nLz_p = float(geom_input['Lz_p'])\nLo = float(geom_input['Lo'])\nNy_divs = int(geom_input['Ny_divs'])\nrho_p = float(geom_input['rho_p'])\nnu_p = float(geom_input['nu_p'])\n\n\nNy = math.ceil((Ny_divs-1)*(Ly_p/Lo))+1\nNx = math.ceil((Ny_divs-1)*(Lx_p/Lo))+1\nNz = math.ceil((Ny_divs-1)*(Lz_p/Lo))+1\nnnodes = Nx*Ny*Nz\n\n# compute geometric data only once\nx = np.linspace(0.,Lx_p,Nx).astype(np.float32);\ny = np.linspace(0.,Ly_p,Ny).astype(np.float32);\nz = np.linspace(0.,Lz_p,Nz).astype(np.float32);\nnumEl = Nx*Ny*Nz\nY,Z,X = np.meshgrid(y,z,x);\n\nXX = np.reshape(X,int(numEl))\nYY = np.reshape(Y,int(numEl))\nZZ = np.reshape(Z,int(numEl))\n\n\n\n\nif lattice_type == 'D3Q15':\n lat = pp.D3Q15Lattice(int(Nx),int(Ny),int(Nz))\nelif lattice_type == 'D3Q19':\n lat = pp.D3Q19Lattice(int(Nx),int(Ny),int(Nz))\nelse:\n lat = pp.D3Q27Lattice(int(Nx),int(Ny),int(Nz))\n\n\nprint(\"initializing the adjacency list\")\nlat.initialize_adjDict();\nprint(\"creating %s partition for %d processes\" % (partition_style, numProcs))\nlat.set_Partition(numParts= numProcs, style = partition_style)\nlat.compute_cutSize()\nprint(\"cut size for %s partition = %g\" % (partition_style, lat.get_cutSize()))\nprint(\"writing vtk file for %s partition\" % partition_style)\npartition_vtk_filename = \"partition_%s.vtk\" % partition_style\nlat.partition.write_vtk(partition_vtk_filename)\nprint(\"writing %s partition to disk\" % partition_style)\nlat.partition.write_partition()\n" ]
[ [ "numpy.meshgrid", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zacksoliman/conditional-image-generation
[ "9f6b25d1e9dc2a3134e41ae57dc42bbe2196ed63" ]
[ "imagegen/data.py" ]
[ "import os, sys\nimport glob\nimport pickle as pkl\nimport numpy as np\nimport PIL.Image as Image\nfrom skimage.transform import resize\n\ndef resize_mscoco():\n '''\n function used to create the dataset,\n Resize original MS_COCO Image into 64x64 images\n '''\n\n ### PATH need to be fixed\n data_path = os.path.join(os.path.expanduser(\"~\"), \n \"development/conditional-image-generation/datasets/coco/train2014\")\n save_dir = os.path.join(os.path.expanduser(\"~\"), \n \"development/conditional-image-generation/datasets/coco/Tmp/64_64/train2014/\")\n\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n preserve_ratio = True\n image_size = (64, 64)\n crop_size = (32, 32)\n\n imgs = glob.glob(data_path+\"/*.jpg\")\n\n\n for i, img_path in enumerate(imgs):\n img = Image.open(img_path)\n print(i, len(imgs), img_path)\n\n if img.size[0] != image_size[0] or img.size[1] != image_size[1] :\n if not preserve_ratio:\n img = img.resize((image_size), Image.ANTIALIAS)\n else:\n ### Resize based on the smallest dimension\n scale = image_size[0] / float(np.min(img.size))\n new_size = (int(np.floor(scale * img.size[0]))+1, int(np.floor(scale * img.size[1])+1))\n img = img.resize((new_size), Image.ANTIALIAS)\n\n ### Crop the 64/64 center\n tocrop = np.array(img)\n center = (int(np.floor(tocrop.shape[0] / 2.)), int(np.floor(tocrop.shape[1] / 2.)))\n print(tocrop.shape, center, (center[0]-32,center[0]+32), (center[1]-32,center[1]+32))\n if len(tocrop.shape) == 3:\n tocrop = tocrop[center[0]-32:center[0]+32, center[1] - 32:center[1]+32, :]\n else:\n tocrop = tocrop[center[0]-32:center[0]+32, center[1] - 32:center[1]+32]\n img = Image.fromarray(tocrop)\n\n img.save(save_dir + os.path.basename(img_path))\n\ndef show_examples(batch_idx, batch_size,\n ### PATH need to be fixed\n mscoco=os.path.join(os.path.expanduser(\"~\"), \"development/conditional-image-generation/datasets/\"), \n split=\"coco/train2014\", \n caption_path=\"dict_key_imgID_value_caps_train_and_valid.pkl\"):\n '''\n Show an example of how to read the dataset\n '''\n\n data_path = os.path.join(mscoco, split)\n caption_path = os.path.join(mscoco, caption_path)\n with open(caption_path, 'rb') as fd:\n caption_dict = pkl.load(fd)\n\n print(data_path + \"/*.jpg\")\n imgs = glob.glob(data_path + \"/*.jpg\")\n batch_imgs = imgs[batch_idx*batch_size:(batch_idx+1)*batch_size]\n\n for i, img_path in enumerate(batch_imgs):\n img = Image.open(img_path)\n img_array = np.array(img)\n\n cap_id = os.path.basename(img_path)[:-4]\n\n ### Get input/target from the images\n center = (int(np.floor(img_array.shape[0] / 2.)), int(np.floor(img_array.shape[1] / 2.)))\n if len(img_array.shape) == 3:\n input_img = np.copy(img_array)\n input_img[center[0]-16:center[0]+16, center[1]-16:center[1]+16, :] = 0\n target = img_array[center[0]-16:center[0]+16, center[1] - 16:center[1]+16, :]\n else:\n input_img = np.copy(img_array)\n input_img[center[0]-16:center[0]+16, center[1]-16:center[1]+16, :] = 0\n target = img_array[center[0]-16:center[0]+16, center[1] - 16:center[1]+16]\n\n\n #Image.fromarray(img_array).show()\n Image.fromarray(input_img).show()\n Image.fromarray(target).show()\n print(i, caption_dict[cap_id])\n" ]
[ [ "numpy.copy", "numpy.array", "numpy.floor", "numpy.min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wblumberg/atm-py
[ "253fc427fb366667da2d46a9af4a5d6550a13a6d" ]
[ "atmPy/aerosols/physics/aerosol.py" ]
[ "# -*- coding: utf-8 -*-\nfrom math import pi,exp,log10,sqrt,log\n\nfrom scipy.optimize import fsolve\n\nfrom atmPy.general import constants\n\n\ndef z(d, gas, n):\n \"\"\"\n Calculate electric mobility of particle with diameter D. \n \n Parameters\n -----------\n d: float\n diameter in nm\n gas: object of type gas\n Object that defines the gas calculations\n n: int\n number of charges\n \n Returns\n --------- \n Electrical mobility in m2/V*s as defined in Hinds (1999), p. 322, eq. 15.21.\n \"\"\"\n \n try:\n return n * constants.e * cc(d, gas) / (3 * pi * gas.mu() * d * 1e-9)\n except AttributeError:\n print('Incorrect type selected for attribute \"gas\".')\n return 0\n\n\ndef z2d(zin, gas, n=1):\n \"\"\"\n Retrieve particle diameter from the electrical mobility\n \n Call this using a roots or fsolve function.\n \n Parameters\n -----------\n gas: gas object\n Gas object defining properties of variables related to gases\n zin: float\n Electrical mobility in m2/Vs\n n: float, optional, default = 1\n Number of charges\n \n Returns\n -------\n Diameter of particle in nanometers.\n \"\"\"\n \n # Inline function use with fsolve\n f = lambda d: d*1e-9- n * constants.e * cc(d, gas) / (3 * pi * gas.mu() * zin)\n d0 = 1e-9\n return fsolve(f, d0)[0]\n\n\ndef cc(d, gas):\n \"\"\"\n Calculate Cunningham correction factor.\n\n Parameters\n -----------\n d: float\n Particle diameter in nanometers.\n gas: Gas object\n gas object from the atmosphere package\n\n Returns\n --------\n Cunningham correction factor as a function of diameter and mean free path.\n\n Notes\n -------\n This is from Hinds (1999); p49, eq 3.20\n \"\"\"\n \n # Convert diameter to microns.\n d = float(d)*1e-3\n # Get the mean free path\n try:\n\n mfp = gas.l()\n return (1.05*exp(-0.39*d/mfp)+2.34)*mfp/d+1\n \n except AttributeError:\n print('Invalid type entered for \"gas\". Should be of type atmosphere.gas\".')\n return 0\n\n\ndef kn(dp, gas):\n \"\"\"\n Calculate the Knudsen number of a particle.\n\n The Knudsen number determines the appropriateness of the continuum assumption. If Kn >~1, then the continuum\n assumption is not appropriate for the problem solution.\n\n Parameters\n ----------\n dp: float\n particle diameter in nm\n gas: gas object\n Gas object used to determine the mean free path of the gas.\n\n Returns\n -------\n float\n Knudsen number\n\n \"\"\"\n return 2*gas.l/dp\n\n\ndef ndistr(dp, n=-1, t=20):\n \"\"\"\n Bipolar charge distribution.\n\n Parameters\n -----------\n dp: float\n diameter of particle in nm\n n: int\n number of charges\n t: float\n temperature in degree C\n\n Returns\n --------\n Charging efficiency\n\n Notes\n ------\n * For particles smaller than 1 micron, uses Wiedensohler (1988), J. Aerosol Sci., 19, 3.\n * For particles larger than 1 micron, uses Gunn (1956), J. Colloid Sci., 11, 661.\n \"\"\"\n \n dp = float(dp)\n a = [0, 0, 0, 0, 0]\n if (abs(n) > 1 and dp < 20) or (dp <= 70 and abs(n) > 2):\n\n # Particles less than 20 nm can carry at most 1 charge.\n # Particles less than 70 nm can carry at most 2 charges.\n return 0\n\n # Use Wiedensohler if the particle size is less than a micron and the number of\n # charges is less than or equal to 2.\n elif dp <= 1000 and abs(n) <= 2:\n if n == -2:\n a = [-26.3328, 35.9044, -21.4608, 7.0867, -1.3088, 0.1051]\n\n elif n == -1:\n a = [-2.3197, 0.6175, 0.6201, -0.1105, -0.1260, 0.0297]\n elif n == 0:\n a = [-0.0003, -0.1014, 0.3073, -0.3372, 0.1023, -0.0105]\n elif n == 1:\n\n # a[4] has been modified from the original publication\n a = [-2.3484, 0.6044, 0.4800, 0.0013, -0.1553, 0.0320]\n elif n == 2:\n\n # a[5] has been modified from original publication\n a = [-44.4756, 79.3772, -62.8900, 26.4492, -5.7480, 0.5049]\n\n power = 0\n\n for i, e in enumerate(a):\n power += e*log10(dp)**i\n\n return 10**power\n\n # Use Gunn if the particle size is > 1 micron or the number of charges is > 2\n else:\n\n # convert [°C] to [K]\n t += 273.15\n\n # convert [nm] to [m]\n dp *= 1e-9\n\n # ratio of positive and negative ion concentrations\n ionconcrat = 1\n\n # ratio of positive and negative ion mobilities\n ionmobrat = 0.875\n\n f1 = constants.e / sqrt(4 * pi ** 2 * constants.eps0 * dp * constants.k * t)\n f2 = 2*pi * constants.eps0 * dp * constants.k * t / constants.e ** 2\n return f1*exp(-1*(n-f2*log(ionconcrat*ionmobrat))**2/(2*f2))\n\n\ndef d50(n, rhop, q, gas, dj):\n \"\"\"\n Find the impactor cutpoint.\n\n Parameters\n ----------\n N: int\n number of jets\n rhop: float\n particle density in kg/m^3\n Q: float\n volumetric flow rate in lpm\n gas: Gas object\n gas object that utilize a child of the Gas class.\n dj: int\n jet diameter in meters\n\n Returns\n -------\n Impactor cutpoint in microns.\n\n Notes\n ------\n Calculations are those of Hinds (1999)\n \n \"\"\"\n\n # Convert the volumetric flow rate into m^3/s\n q = float(q)/60*1000/100**3\n\n # From Hinds, this is the Stoke's number for the 50% collections\n # efficiency (Table 5.4)\n stk50 = 0.24\n\n # Equation 5.29 in Hinds (1999)\n d50cc = sqrt(9*gas.mu()*pi*n*dj**3*stk50/(4.0*float(rhop)*q))\n\n f = lambda x: (d50cc/float(x))**2-cc(float(x*1e-6), gas)\n \n # Find the D50 of the impactor\n return fsolve(f, 0.1)" ]
[ [ "scipy.optimize.fsolve" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
0liu/Hyperactive
[ "cb1df3b6dcfbd6bc238439a63b490eaa50e37fce" ]
[ "tests/test_memory/test_shared_memory.py" ]
[ "import time\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.tree import DecisionTreeRegressor\n\n\nfrom hyperactive import Hyperactive\n\n\ndata = load_boston()\nX, y = data.data, data.target\n\n\ncv = 10\n\n\ndef model(opt):\n gbr = DecisionTreeRegressor(\n min_samples_split=opt[\"min_samples_split\"],\n )\n scores = cross_val_score(gbr, X, y, cv=cv)\n\n return scores.mean()\n\n\ndef model1(opt):\n gbr = DecisionTreeRegressor(\n min_samples_split=opt[\"min_samples_split\"],\n )\n scores = cross_val_score(gbr, X, y, cv=cv)\n\n return scores.mean()\n\n\ndef model2(opt):\n gbr = DecisionTreeRegressor(\n min_samples_split=opt[\"min_samples_split\"],\n )\n scores = cross_val_score(gbr, X, y, cv=cv)\n\n return scores.mean()\n\n\ndef model3(opt):\n gbr = DecisionTreeRegressor(\n min_samples_split=opt[\"min_samples_split\"],\n )\n scores = cross_val_score(gbr, X, y, cv=cv)\n\n return scores.mean()\n\n\ndef model4(opt):\n gbr = DecisionTreeRegressor(\n min_samples_split=opt[\"min_samples_split\"],\n )\n scores = cross_val_score(gbr, X, y, cv=cv)\n\n return scores.mean()\n\n\nsearch_space = {\n \"min_samples_split\": list(range(2, 100)),\n}\n\n\ndef test_shared_memory_0():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory=True,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=4,\n memory=True,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_2 - d_time_1 < 0\n\n\ndef test_shared_memory_1():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=4,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 > 1.1\n\n\ndef test_shared_memory_2():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 > 1.1\n\n\ndef test_shared_memory_3():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model1,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model2,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model1,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model2,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model3,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model4,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 < 1.2\n\n\ndef test_shared_memory_warm_start_0():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n search_data0 = hyper.results(model)\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=4,\n memory_warm_start=search_data0,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_2 * 1.4 - d_time_1 < 0\n\n\ndef test_shared_memory_warm_start_1():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n search_data0 = hyper.results(model)\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=4,\n memory_warm_start=search_data0,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 > 1.1\n\n\ndef test_shared_memory_warm_start_2():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n search_data0 = hyper.results(model)\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 > 1.1\n\n\ndef test_shared_memory_warm_start_3():\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model1,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model2,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model3,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.add_search(\n model4,\n search_space,\n n_iter=100,\n n_jobs=1,\n )\n hyper.run()\n d_time_1 = time.time() - c_time\n\n search_data0 = hyper.results(model1)\n\n c_time = time.time()\n hyper = Hyperactive(n_processes=1)\n hyper.add_search(\n model,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model1,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model2,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model3,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.add_search(\n model4,\n search_space,\n n_iter=100,\n n_jobs=1,\n memory_warm_start=search_data0,\n )\n hyper.run()\n d_time_2 = time.time() - c_time\n\n print(\"\\n d_time_1 \\n\", d_time_1)\n print(\"\\n d_time_2 \\n\", d_time_2)\n\n d_time_2 = d_time_2 / 2\n\n assert d_time_1 / d_time_2 > 1.4\n" ]
[ [ "sklearn.tree.DecisionTreeRegressor", "sklearn.model_selection.cross_val_score", "sklearn.datasets.load_boston" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yt605155624/Parakeet
[ "8ce8254adad55df07288df86cecdbf0f608b73fb" ]
[ "examples/ge2e/inference.py" ]
[ "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom pathlib import Path\n\nimport tqdm\nimport paddle\nimport numpy as np\n\nfrom parakeet.models.lstm_speaker_encoder import LSTMSpeakerEncoder\n\nfrom audio_processor import SpeakerVerificationPreprocessor\nfrom config import get_cfg_defaults\n\n\ndef embed_utterance(processor, model, fpath_or_wav):\n # audio processor\n wav = processor.preprocess_wav(fpath_or_wav)\n mel_partials = processor.extract_mel_partials(wav)\n\n model.eval()\n # speaker encoder\n with paddle.no_grad():\n mel_partials = paddle.to_tensor(mel_partials)\n with paddle.no_grad():\n embed = model.embed_utterance(mel_partials)\n embed = embed.numpy()\n return embed\n\n\ndef _process_utterance(ifpath: Path,\n input_dir: Path,\n output_dir: Path,\n processor: SpeakerVerificationPreprocessor,\n model: LSTMSpeakerEncoder):\n rel_path = ifpath.relative_to(input_dir)\n ofpath = (output_dir / rel_path).with_suffix(\".npy\")\n ofpath.parent.mkdir(parents=True, exist_ok=True)\n embed = embed_utterance(processor, model, ifpath)\n np.save(ofpath, embed)\n\n\ndef main(config, args):\n paddle.set_device(args.device)\n\n # load model\n model = LSTMSpeakerEncoder(config.data.n_mels, config.model.num_layers,\n config.model.hidden_size,\n config.model.embedding_size)\n weights_fpath = str(Path(args.checkpoint_path).expanduser())\n model_state_dict = paddle.load(weights_fpath + \".pdparams\")\n model.set_state_dict(model_state_dict)\n model.eval()\n print(f\"Loaded encoder {weights_fpath}\")\n\n # create audio processor\n c = config.data\n processor = SpeakerVerificationPreprocessor(\n sampling_rate=c.sampling_rate,\n audio_norm_target_dBFS=c.audio_norm_target_dBFS,\n vad_window_length=c.vad_window_length,\n vad_moving_average_width=c.vad_moving_average_width,\n vad_max_silence_length=c.vad_max_silence_length,\n mel_window_length=c.mel_window_length,\n mel_window_step=c.mel_window_step,\n n_mels=c.n_mels,\n partial_n_frames=c.partial_n_frames,\n min_pad_coverage=c.min_pad_coverage,\n partial_overlap_ratio=c.min_pad_coverage, )\n\n # input output preparation\n input_dir = Path(args.input).expanduser()\n ifpaths = list(input_dir.rglob(args.pattern))\n print(f\"{len(ifpaths)} utterances in total\")\n output_dir = Path(args.output).expanduser()\n output_dir.mkdir(parents=True, exist_ok=True)\n\n for ifpath in tqdm.tqdm(ifpaths, unit=\"utterance\"):\n _process_utterance(ifpath, input_dir, output_dir, processor, model)\n\n\nif __name__ == \"__main__\":\n config = get_cfg_defaults()\n parser = argparse.ArgumentParser(description=\"compute utterance embed.\")\n parser.add_argument(\n \"--config\",\n metavar=\"FILE\",\n help=\"path of the config file to overwrite to default config with.\")\n parser.add_argument(\n \"--input\", type=str, help=\"path of the audio_file folder.\")\n parser.add_argument(\n \"--pattern\",\n type=str,\n default=\"*.wav\",\n help=\"pattern to filter audio files.\")\n parser.add_argument(\n \"--output\",\n metavar=\"OUTPUT_DIR\",\n help=\"path to save checkpoint and logs.\")\n\n # load from saved checkpoint\n parser.add_argument(\n \"--checkpoint_path\", type=str, help=\"path of the checkpoint to load\")\n\n # running\n parser.add_argument(\n \"--device\",\n type=str,\n choices=[\"cpu\", \"gpu\"],\n help=\"device type to use, cpu and gpu are supported.\")\n\n # overwrite extra config and default config\n parser.add_argument(\n \"--opts\",\n nargs=argparse.REMAINDER,\n help=\"options to overwrite --config file and the default config, passing in KEY VALUE pairs\"\n )\n\n args = parser.parse_args()\n if args.config:\n config.merge_from_file(args.config)\n if args.opts:\n config.merge_from_list(args.opts)\n config.freeze()\n print(config)\n print(args)\n\n main(config, args)\n" ]
[ [ "numpy.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cecivieira/concatenador-tabelas
[ "7f904eac4944b4184770b009b1b1ec1be092afff" ]
[ "concatenar-tabelas.py" ]
[ "import streamlit as st\nimport pandas as pd\nimport base64\n\ndef get_table_download_link(df):\n '''Essa função foi uma solução encontrada no fórum do StreamLit. Você pode encontrar a discussão aqui: https://discuss.streamlit.io/t/file-download-workaround-added-to-awesome-streamlit-org/1244\n '''\n csv = df.to_csv(index=False)\n b64 = base64.b64encode(csv.encode()).decode()\n href = f'<a href=\"data:file/csv;base64,{b64}\">Download CSV File</a>'\n\n return href\n\ndef main():\n st.set_option('deprecation.showfileUploaderEncoding', False)\n st.image('https://media.giphy.com/media/IboGSjkXaOre0/giphy.gif', caption='imagem animada de uma esteira com mesas e uma boneca virando-as', use_column_width=True, format='gif')\n # Seleção tabelas\n st.title('Concatenador de tabelas')\n st.subheader('')\n st.subheader('Selecione as tabelas que você deseja concatenar')\n\n csv_1 = st.file_uploader('Tabela 1', type='csv')\n csv_2 = st.file_uploader('Tabela 2', type='csv')\n\n df_1 = pd.DataFrame()\n df_2 = pd.DataFrame()\n df_final = pd.DataFrame()\n\n if csv_1 is not None:\n df_1 = pd.read_csv(csv_1)\n\n if csv_2 is not None:\n df_2 = pd.read_csv(csv_2)\n\n # Tipo de concatenação\n st.subheader('')\n st.subheader('Tipo de concatenação')\n add_selectbox = st.selectbox(\n 'Selecione o tipo de concatenação que você deseja realizar:', (['', 'Linhas no final'])\n )\n\n if add_selectbox == 'Linhas no final':\n df_final = pd.concat([df_1, df_2])\n st.subheader('')\n st.subheader('Faça download da tabela concatenada')\n st.markdown(get_table_download_link(df_final), unsafe_allow_html=True)\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "pandas.concat", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
outstandingcandy/pytorchvideo
[ "c71b3c21fd670a813948b6ddf12a1ca1d01b3bc7" ]
[ "pytorchvideo/data/youcook2.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport os\nimport json\nfrom collections import defaultdict\nfrom typing import Any, Callable, Dict, Optional, Set, Tuple, Type\nimport gensim\n\nimport torch\nfrom iopath.common.file_io import g_pathmgr\nfrom pytorchvideo.data.clip_sampling import ClipInfo, ClipSampler\nfrom pytorchvideo.data.labeled_video_dataset import LabeledVideoDataset\n\nimport numpy as np\nimport re\n\nclass YouCook2LabeledVideoPaths:\n \"\"\"\n Pre-processor YouCook2 Dataset\n \"\"\"\n def __init__(\n self,\n word2vec,\n we_dim: Optional[int] = 300,\n max_words: Optional[int] = 20\n ) -> None:\n self.we = word2vec\n self.we_dim = we_dim\n self.max_words = max_words\n\n def from_json(\n self,\n file_path: str,\n video_path_prefix: str,\n label_map_file: Optional[str] = None\n ) -> YouCook2LabeledVideoPaths:\n \"\"\"\n JSON Format:\n 'database':\n {\n 'video_id': {\n 'duration':241.62\n 'subset':'training'\n 'recipe_type':'113'\n 'annotations':[\n 0:{'segment': [90, 102], 'id': 0, 'sentence': 'spread margarine on ...hite bread'}, \n 1:{'segment': [90, 102], 'id': 1, 'sentence': 'spread margarine on ...hite bread'}\n ]\n }\n }\n Args:\n file_path (str): The path to the file to be read.\n video_path_prefix (str): Path to be augumented to the each relative frame\n path to get the global frame path.\n label_map_file (str): Path to a .pbtxt containing class id's and class names.\n If not set, label_map is not loaded and bbox labels are not pruned\n based on allowable class_id's in label_map.\n \"\"\"\n\n assert g_pathmgr.exists(file_path), f\"{file_path} not found.\"\n video_paths_and_label = []\n json_data = json.load(open(file_path))\n for video, info in json_data[\"database\"].items():\n file_path = os.path.join(video_path_prefix, \"{}.mp4\".format(video))\n is_file = g_pathmgr.isfile(file_path)\n if is_file:\n if os.path.getsize(file_path) < 7 * 1024 * 1024:\n print(\"selected video {}\".format(file_path))\n for label in info[\"annotations\"]:\n if self.we is not None:\n words = self._tokenize_text(label[\"sentence\"])\n text_emb = self._words_to_we(words)\n label[\"text_emb\"] = text_emb\n video_paths_and_label.append((file_path, label))\n assert (\n len(video_paths_and_label) > 0\n ), f\"Failed to load dataset from {file_path}.\"\n return video_paths_and_label\n\n def _zero_pad_tensor(self, tensor, size):\n if len(tensor) >= size:\n return tensor[:size]\n else:\n zero = np.zeros((size - len(tensor), self.we_dim), dtype=np.float32)\n return np.concatenate((tensor, zero), axis=0)\n\n def _tokenize_text(self, sentence):\n w = re.findall(r\"[\\w']+\", str(sentence))\n return w\n\n def _words_to_we(self, words):\n words = [word for word in words if word in self.we.key_to_index]\n if words:\n we = self._zero_pad_tensor(self.we[words], self.max_words)\n return torch.from_numpy(we)\n else:\n return torch.zeros(self.max_words, self.we_dim)\n\n\n\n\nclass TimeStampClipSampler:\n \"\"\"\n A sepcialized clip sampler for sampling video clips around specific\n timestamps. This is particularly used in datasets like Ava wherein only\n a specific subset of clips in the video have annotations\n \"\"\"\n\n def __init__(self, clip_sampler: ClipSampler) -> None:\n \"\"\"\n Args:\n clip_sampler (`pytorchvideo.data.ClipSampler`): Strategy used for sampling\n between the untrimmed clip boundary.\n \"\"\"\n self.clip_sampler = clip_sampler\n\n def __call__(\n self, last_clip_time: float, video_duration: float, annotation: Dict[str, Any]\n ) -> ClipInfo:\n \"\"\"\n Args:\n last_clip_time (float): Not used for TimeStampClipSampler.\n video_duration: (float): Not used for TimeStampClipSampler.\n annotation (Dict): Dict containing time step to sample aroud.\n Returns:\n clip_info (ClipInfo): includes the clip information of (clip_start_time,\n clip_end_time, clip_index, aug_index, is_last_clip). The times are in seconds.\n clip_index, aux_index and is_last_clip are always 0, 0 and True, respectively.\n \"\"\"\n clip_start_sec = annotation[\"segment\"][0]\n clip_end_sec = annotation[\"segment\"][1]\n clip_index = annotation[\"id\"]\n return ClipInfo(\n clip_start_sec,\n clip_end_sec,\n clip_index,\n 0,\n True,\n )\n\n\ndef YouCook2(\n json_file: str,\n word2vec: KeyedVectors,\n video_path_prefix: str = \"\",\n label_map_file: Optional[str] = None,\n clip_sampler: Callable = ClipSampler,\n video_sampler: Type[torch.utils.data.Sampler] = torch.utils.data.RandomSampler,\n transform: Optional[Callable[[dict], Any]] = None,\n) -> None:\n youcook2_labeled_video_paths = YouCook2LabeledVideoPaths(word2vec)\n labeled_video_paths = youcook2_labeled_video_paths.from_json(\n json_file,\n video_path_prefix,\n label_map_file,\n )\n return LabeledVideoDataset(\n labeled_video_paths=labeled_video_paths,\n clip_sampler=TimeStampClipSampler(clip_sampler),\n transform=transform,\n video_sampler=video_sampler,\n decode_audio=False,\n )\n\ndef main():\n dataset = YouCook2(\"/Users/tangjie/Downloads/youcookii_annotations_trainval.json\", \"\", \"/Users/tangjie/RedPanda/crawler/youcook/video\")\n for data in dataset:\n print(data)\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.concatenate", "torch.from_numpy", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pferreirafabricio/graphic-computation
[ "28f64673eda8796e233e8bb8de49225e6f6c5c35" ]
[ "draws/drawing-sky.py" ]
[ "import cv2\r\nimport numpy as np\r\n\r\nwidth = 400\r\nheight = 400\r\n\r\n# Colors\r\nblue = (255, 0, 0)\r\ngreen = (20, 180, 0)\r\nblack = (0, 0, 0)\r\nred = (0, 0, 255)\r\n\r\nscreen = np.ones((400, 400, 3))\r\n\r\n# cv2.line(screen, (0, 0), (width, height), blue)\r\n# cv2.line(screen, (int(width / 2), 0), (int(width / 2), height), green, 5)\r\n\r\n\r\n\"\"\"\r\nExercise \r\nA mountain, sky, clouds and a building.\r\nThe elements should appear in perspective.\r\n\"\"\"\r\n\r\ndef draw_clouds():\r\n cloud_min_height = 20\r\n cloud_max_height = 180\r\n\r\n cloud_min_width = 30\r\n cloud_max_width = 380\r\n\r\n cv2.circle(screen, (cloud_min_width, 60), 20, blue, -1)\r\n cv2.circle(screen, (cloud_min_width + 20, 70), 20, blue, -1)\r\n cv2.circle(screen, (cloud_min_width + 25, 40), 20, blue, -1)\r\n cv2.circle(screen, (cloud_min_width + 40, 50), 20, blue, -1)\r\n\r\n # cv2.line(screen, (cloud_min_width, 60), (cloud_min_width, 80), black, 2)\r\n # cv2.line(screen, (cloud_min_width, 80), (cloud_min_width + 40, 80), black, 2)\r\n # cv2.line(screen, (cloud_min_width + 40, 100), (cloud_min_width + 70, 100), black, 2)\r\n\r\ndef draw_building():\r\n building_min_height = 40\r\n building_max_height = 200\r\n\r\n building_min_width = 100\r\n building_max_width = 380\r\n\r\n cv2.rectangle(screen, (building_min_width, building_max_height), (building_min_width + 40, building_max_height - 50), green, 2)\r\n cv2.rectangle(screen, (building_min_width + 20, building_max_height - 20), (building_min_width + 30, building_max_height - 30), green, 2)\r\n cv2.rectangle(screen, (building_min_width + 40, building_max_height), (building_min_width + 70, building_max_height - 80), green, 2)\r\n cv2.rectangle(screen, (building_min_width + 60, building_max_height - 40), (building_min_width + 50, building_max_height - 50), green, 2)\r\n cv2.rectangle(screen, (building_min_width + 70, building_max_height), (building_min_width + 100, building_max_height - 30), green, 2)\r\n cv2.rectangle(screen, (building_min_width + 80, building_max_height - 15), (building_min_width + 90, building_max_height - 25), green, 2)\r\n\r\ndef draw_mountain():\r\n mountain_min_height = 40\r\n mountain_max_height = 200\r\n\r\n mountain_min_width = 240\r\n mountain_max_width = 380\r\n\r\n points = np.array(\r\n [\r\n [mountain_min_width, mountain_max_height],\r\n [mountain_min_width + 30, mountain_max_height - 40],\r\n [mountain_min_width + 50, mountain_max_height - 40],\r\n [mountain_min_width + 70, mountain_max_height - 100],\r\n [mountain_max_width, mountain_max_height]\r\n ],\r\n np.int32\r\n )\r\n # points = points.reshape((-1,1,2))\r\n cv2.polylines(screen, [points], False, red)\r\n\r\n# Sky\r\ncv2.line(screen, (0, int(height / 2)), (width, int(height / 2)), black, 2)\r\n\r\n# Clouds\r\ndraw_clouds()\r\n\r\n# Mountain\r\ndraw_mountain()\r\n\r\n# Building\r\ndraw_building()\r\n\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\ncv2.putText(screen, 'Graphics Computing', (int(width / 10), int(height / 2) + 110), font, 1, black, 2, cv2.LINE_AA)\r\n\r\ncv2.imshow(\"Canvas\", screen)\r\ncv2.waitKey(0)\r\n\r\n\r\n" ]
[ [ "numpy.array", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MiRudnik/quantum_optimization
[ "9c63c9164d9a8620d7610cc0576a1e3ee7319d98" ]
[ "histograms.py" ]
[ "# import pandas as pd\nimport random\n\nimport matplotlib.pyplot as plt\nimport plotly.graph_objects as go\nimport numpy as np\nimport plotly.express as px\nfrom pandas import DataFrame\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nres_8 = [45, 27, 15, 28, 20, 8, 19, 21, 9, 24, 3, 16, 7, 12, 6, 9, 1, 3, 17, 27, 19, 26, 24, 17, 10, 15, 8, 6, 9, 5, 6,\n 2, 4, 3, 8, 0, 2, 4, 3, 1, 2, 4, 2, 3, 0, 4, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n# res_8_normalized = [float(x)/float(sum(res_8)) for x in res_8]\ncorrect_8_number = 98\nres_10 = [143, 99, 55, 13, 33, 24, 12, 0, 0, 16, 15, 2, 0, 0, 2, 9, 15, 25, 23, 34, 57, 76, 36, 26, 18, 14, 11, 7, 7, 9,\n 6, 4, 4, 3, 10, 10, 20, 11, 8, 9, 18, 17, 14, 5, 5, 3, 4, 4, 0, 1, 2, 4, 1, 2, 1, 5, 1, 5, 0, 2, 1, 1, 1, 3,\n 2, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1]\n# res_10_normalized = [float(x)/float(sum(res_10)) for x in res_10]\ncorrect_10_numbers = 27\nres_15 = [217, 94, 64, 144, 258, 144, 100, 138, 155, 112, 71, 70, 71, 55, 44, 35, 28, 17, 24, 16, 16, 12, 14, 10, 6, 8,\n 6, 2, 2, 1, 1, 6, 1, 3, 0, 2, 0, 2, 2, 0, 1, 0, 1, 0, 0, 2, 1, 0, 0, 1]\n# res_15_normalized = [float(x)/float(sum(res_15)) for x in res_15]\ncorrect_15_numbers = 4\nres_18 = [124, 92, 42, 93, 126, 102, 83, 141, 96, 98, 109, 84, 76, 72, 90, 76, 63, 66, 40, 52, 45, 27, 29, 35, 24, 25,\n 13, 15, 10, 7, 10, 4, 5, 2, 5, 2, 3, 2, 3, 0, 2, 1, 0, 0, 0, 0, 1, 0, 1, 1]\n# res_18_normalized = [float(x)/float(sum(res_18)) for x in res_18]\ncorrect_18_numbers = 0\n\n\ndef get_correct_and_wrong_list(source, correct_number):\n tmp = correct_number\n elements_number = 0\n while tmp > 0:\n tmp -= source[elements_number]\n elements_number += 1\n correct_solutions = source[:elements_number]\n if len(correct_solutions) > 0:\n correct_solutions[-1] += tmp\n wrong_solutions = source[(elements_number - 1):]\n return correct_solutions, wrong_solutions\n\n\nres_8_correct, res_8_wrong = get_correct_and_wrong_list(res_8, correct_8_number)\nres_8_correct, res_8_wrong = [float(x)/float(sum(res_8)) for x in res_8_correct], [float(x)/float(sum(res_8)) for x in res_8_wrong]\nres_10_correct, res_10_wrong = get_correct_and_wrong_list(res_10, correct_10_numbers)\nres_10_correct, res_10_wrong = [float(x)/float(sum(res_8)) for x in res_10_correct], [float(x)/float(sum(res_10)) for x in res_10_wrong]\nres_15_correct, res_15_wrong = get_correct_and_wrong_list(res_15, correct_15_numbers)\nres_15_correct, res_15_wrong = [float(x)/float(sum(res_8)) for x in res_15_correct], [float(x)/float(sum(res_15)) for x in res_15_wrong]\n\n\n# res_18_correct, res_18_wrong = get_correct_and_wrong_list(res_18, correct_18_numbers)\n\n\ndef prepare_histogram(correct, wrong, ax, id):\n total_len = len(correct) + int(len(wrong)) - 1\n k = 2 if id <= 2 else 1\n xes = [''] + ['low energy'] + ['' for i in range(k)] + \\\n ['high energy'] + ['' for j in range(int(total_len - 45))]\n ax.bar(np.arange(len(correct) - 1, total_len), wrong, color='red', label='wrong solutions')\n ax.bar(np.arange(len(correct)), correct, color='green', label='correct solutions')\n # ax.xticks(range(total_len), xes)\n ax.legend(loc='best')\n ax.set_xticklabels(xes)\n ax.set_title(\"Problem {}\".format(id))\n # ax.set_axis_off()\n # ax.patch.set_visible(False)\n # ax.set_xlabel('ddd' + str(random.randint(5,156)))\n\n\nfig, axs = plt.subplots(2, 2)\nprepare_histogram(res_8_correct, res_8_wrong, axs[0,0], 1)\nprepare_histogram(res_10_correct, res_10_wrong, axs[0, 1], 2)\nprepare_histogram(res_15_correct, res_15_wrong, axs[1, 0], 3)\nprepare_histogram([0], [float(x)/float(sum(res_18)) for x in res_18], axs[1, 1], 4)\nfor ax in axs.flat:\n ax.set(ylabel='Probability density')\n# plt.show()\n# plt.subplots_adjust(top=2)\nfig.tight_layout(pad=0.75)\n\npdf = PdfPages(\"histogram_dwave.pdf\")\npdf.savefig()\npdf.close()\n# y_pos = np.arange(len(res_8))\n# frame1 = plt.gca()\n# frame1.axes.get_xaxis().set_visible(False)\n# plt.xticks(y_pos, ['.' for i in range(len(res_8))])\n\n# res8dict = [{\"value\": val, \"color\": 'wrong'} for val in res_8]\n# for i in range(4):\n# res8dict[i][\"color\"] = \"correct\"\n# df = DataFrame(res8dict)\n# fig = px.bar(df, y=\"value\", color='color')\n# fig.show()\n\n# x0 = np.random.randn(500)\n# Add 1 to shift the mean of the Gaussian distribution\n# x1 = np.random.randn(500) + 1\n\n\n# range50 = list(range(10))\n# range50.extend(['.' for i in range(40)])\n# range80 = list(range(10))\n# range80.extend(['.' for i in range(70)])\n#\n# all = [res_8, res_10, res_15, res_18]\n#\n## print(list(map(lambda x: len(x), all)))\n#\n# df8 = pd.DataFrame({'number of boxes':range80, 'number of occurences':res_8})\n# ax8 = df8.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df10 = pd.DataFrame({'number of boxes':range80, 'number of occurences':res_10})\n# ax10 = df10.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df15 = pd.DataFrame({'number of boxes':range50, 'number of occurences':res_15})\n# ax15 = df15.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n#\n# df18 = pd.DataFrame({'number of boxes':range50, 'number of occurences':res_18})\n# ax18 = df18.plot.bar(x='number of boxes', y='number of occurences', rot=0)\n# plt.show()\n" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wittawatj/cadgan
[ "92f2c99f4e6c58d3b49caad8f25330f22135dfa8", "92f2c99f4e6c58d3b49caad8f25330f22135dfa8" ]
[ "cadgan/gan/fashion_mnist/dcgan.py", "cadgan/kernel_tf.py" ]
[ "import math\nimport os\n\nimport cadgan.glo as glo\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.utils import save_image\n\n# DCGAN code heavily based on\n# https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/dcgan/dcgan.py\n\n\nclass Generator(nn.Module):\n def __init__(self, latent_dim=100):\n super(Generator, self).__init__()\n\n img_size = 28\n channels = 1\n self.latent_dim = latent_dim\n self.init_size = img_size // 4\n\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(64, channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img\n\n def save(self, f):\n \"\"\"\n Save the state of this model to a file.\n \"\"\"\n torch.save(self, f)\n\n @staticmethod\n def load(f, **opt):\n \"\"\"\n Load a Generator from a file. To be used with save().\n \"\"\"\n return torch.load(f, **opt)\n\n\n# ---------\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n\n def discriminator_block(in_filters, out_filters, bn=True):\n block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block\n\n channels = 1\n self.model = nn.Sequential(\n *discriminator_block(channels, 16, bn=False), # size 28 -> 15\n *discriminator_block(16, 32), # 14 -> 7\n *discriminator_block(32, 64), # 7 -> 4\n *discriminator_block(64, 128), # 4 -> 2\n )\n\n # The height and width of downsampled image\n img_size = 28\n self.adv_layer = nn.Sequential(nn.Linear(128 * 2 * 2, 1), nn.Sigmoid())\n\n def forward(self, img):\n out = self.model(img)\n out = out.view(out.shape[0], -1)\n # print(out.shape)\n validity = self.adv_layer(out)\n return validity\n\n def save(self, f):\n \"\"\"\n Save the state of this model to a file.\n \"\"\"\n torch.save(self, f)\n\n @staticmethod\n def load(f):\n \"\"\"\n Load a Generator from a file. To be used with save().\n \"\"\"\n return torch.load(f)\n\n\n# ---------\n\n\ndef weights_init_normal(m):\n classname = m.__class__.__name__\n if classname.find(\"Conv\") != -1:\n torch.nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find(\"BatchNorm2d\") != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')\n# parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')\n# parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')\n# parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')\n# parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')\n# parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')\n# parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')\n# parser.add_argument('--sample_interval', type=int, default=400, help='interval between image sampling')\n\n\nclass DCGAN(object):\n \"\"\"\n Class to manage training, model saving for DCGAN.\n \"\"\"\n\n def __init__(\n self,\n prob_model_dir=glo.prob_model_folder(\"fashion_dcgan\"),\n data_dir=glo.data_file(\"/home/wgondal/cadgan/data/fashion\"),\n use_cuda=True,\n n_epochs=30,\n batch_size=2 ** 6,\n lr=0.0002,\n b1=0.5,\n b2=0.999,\n n_cpu=4,\n latent_dim=100,\n sample_interval=400,\n ):\n \"\"\"\n n_epochs: number of epochs of training\n batch_size: size of the batches\n lr: adam: learning rate\n b1: adam: decay of first order momentum of gradient\n b2: adam: decay of first order momentum of gradient\n n_cpu: number of cpu threads to use during batch generation\n latent_dim: dimensionality of the latent space\n sample_interval: interval between image sampling\n \"\"\"\n print(prob_model_dir)\n os.makedirs(prob_model_dir, exist_ok=True)\n self.prob_model_dir = prob_model_dir\n self.data_dir = data_dir\n self.use_cuda = use_cuda\n self.n_epochs = n_epochs\n self.batch_size = batch_size\n self.lr = lr\n self.b1 = b1\n self.b2 = b2\n self.n_cpu = n_cpu\n self.latent_dim = latent_dim\n self.sample_interval = sample_interval\n\n def sample_noise(self, n):\n \"\"\"\n Draw n noise vectors (input to the generator).\n \"\"\"\n return torch.Tensor(np.random.normal(0, 1, (n, self.latent_dim))).float()\n\n def save_state(self, f):\n \"\"\"\n Save state of this object to a file.\n \"\"\"\n torch.save(self, f)\n\n def load_state(self, f):\n \"\"\"\n Load the state of a DCGAN object from a file. \n Return a DCGAN object.\n \"\"\"\n return torch.load(f)\n\n def train(self):\n \"\"\"\n Traing a DCGAN model with the training hyperparameters as specified in\n the constructor. Directly modify the state of this object to store all\n relevant variables.\n\n * self.generator stores the trained generator.\n \"\"\"\n\n # Loss function\n adversarial_loss = torch.nn.BCELoss()\n\n # Initialize generator and discriminator\n img_size = 28\n generator = Generator(latent_dim=self.latent_dim)\n discriminator = Discriminator()\n\n cuda = True if torch.cuda.is_available() else False\n\n if self.use_cuda and cuda:\n generator.cuda()\n discriminator.cuda()\n adversarial_loss.cuda()\n\n # Initialize weights\n generator.apply(weights_init_normal)\n discriminator.apply(weights_init_normal)\n\n print(self.data_dir)\n # Configure data loader\n os.makedirs(self.data_dir, exist_ok=True)\n dataloader = torch.utils.data.DataLoader(\n datasets.FashionMNIST(\n self.data_dir,\n train=True,\n download=True,\n transform=transforms.Compose(\n [\n # transforms.Resize(self.img_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]\n ),\n ),\n batch_size=self.batch_size,\n shuffle=True,\n )\n\n # Optimizers\n optimizer_G = torch.optim.Adam(generator.parameters(), lr=self.lr, betas=(self.b1, self.b2))\n optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=self.lr, betas=(self.b1, self.b2))\n\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\n # ----------\n # Training\n # ----------\n\n for epoch in range(self.n_epochs):\n for i, (imgs, _) in enumerate(dataloader):\n\n # Adversarial ground truths\n valid = Variable(Tensor(imgs.shape[0], 1).fill_(1.0), requires_grad=False)\n fake = Variable(Tensor(imgs.shape[0], 1).fill_(0.0), requires_grad=False)\n\n # Configure input\n real_imgs = Variable(imgs.type(Tensor))\n\n # -----------------\n # Train Generator\n # -----------------\n\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n z = Variable(self.sample_noise(imgs.shape[0]).type(Tensor))\n\n # Generate a batch of images\n gen_imgs = generator(z)\n\n # Loss measures generator's ability to fool the discriminator\n g_loss = adversarial_loss(discriminator(gen_imgs), valid)\n\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n real_loss = adversarial_loss(discriminator(real_imgs), valid)\n fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)\n d_loss = (real_loss + fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n if i % 50 == 0:\n print(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % (epoch, self.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())\n )\n\n batches_done = epoch * len(dataloader) + i\n if batches_done % self.sample_interval == 0:\n save_image(\n gen_imgs.data[:25], \"%s/%d.png\" % (self.prob_model_dir, batches_done), nrow=5, normalize=True\n )\n\n # keep the state of the generator\n self.generator = generator\n if epoch % 5 == 0:\n # save the generator\n g = self.generator\n g.save(\"/home/wgondal/cadgan/prob_models/fashion_dcgan/fashion_dcgan_ep_%d_bs64.pt\" % epoch)\n\n\n# ---------\n", "\"\"\"Module containing kernel related classes\"\"\"\nfrom __future__ import division\n\nfrom abc import ABCMeta, abstractmethod\nfrom builtins import object, str\n\nimport numpy as np\nimport tensorflow as tf\nfrom future.utils import with_metaclass\nfrom past.utils import old_div\n\n__author__ = \"wittawat\"\n\n\nclass Kernel(with_metaclass(ABCMeta, object)):\n \"\"\"Abstract class for kernels. Inputs to all methods are numpy arrays.\"\"\"\n\n @abstractmethod\n def eval(self, X, Y):\n \"\"\"\n Evaluate the kernel on data X and Y\n X: nx x d where each row represents one point\n Y: ny x d\n return nx x ny Gram matrix\n \"\"\"\n pass\n\n @abstractmethod\n def pair_eval(self, X, Y):\n \"\"\"Evaluate k(x1, y1), k(x2, y2), ...\n X: n x d where each row represents one point\n Y: n x d\n return a 1d numpy array of length n.\n \"\"\"\n pass\n\n # @abstractmethod\n # def is_compatible(self, k):\n # \"\"\"\n # Return True if the given kernel k is \"compatible\" with this kernel.\n # The term compatible is vague. Ideally we want to be able to check that\n # the two kernels define the same RKHS. However, this is difficult to\n # implement without an elaborate type system.\n\n # Simply check whether the kernel has the same type and the same (or\n # approximately the same) parameters.\n # \"\"\"\n # pass\n\n def get_feature_map(self):\n \"\"\"\n Return the underlying feature map (an instance of FeatureMap) of the\n kernel. Return None if a closed-form feature map is not available\n e.g., the output of the underlying feature map is infinite-dimensional.\n \"\"\"\n return None\n\n def feature_map_available(self):\n \"\"\"\n Return True if an explicit feature map is available.\n \"\"\"\n return self.get_feature_map() is not None\n\n\nclass FeatureMap(with_metaclass(ABCMeta, object)):\n \"\"\"\n Abstract class for a feature map of a kernel.\n \"\"\"\n\n @abstractmethod\n def __call__(self, x):\n \"\"\"\n Return a feature vector for the input x.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def input_shape(self):\n \"\"\"\n Return the expected input shape of this feature map (excluding the\n batch dimension). For instance if each point is a 32x32 pixel image,\n then return (32, 32).\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def output_shape(self):\n \"\"\"\n Return the output shape of this feature map.\n \"\"\"\n raise NotImplementedError()\n\n\n# end class FeatureMap\n\n\nclass FuncFeatureMap(FeatureMap):\n def __init__(self, f, in_shape, out_shape):\n \"\"\"\n f: a callable object representing the feature map.\n in_shape: expected shape of the input\n out_shape: expected shape of the output\n \"\"\"\n self.f = f\n self.in_shape = in_shape\n self.out_shape = out_shape\n\n def __call__(self, x):\n f = self.f\n return f(x)\n\n def input_shape(self):\n return self.in_shape\n\n def output_shape(self):\n return self.out_shape\n\n\n# end of FuncFeatureMap\nclass TFKernel(Kernel):\n \"\"\"\n An abstract class for a kernel for Tensorflow.\n Subclasses implementing this should rely on only operations which are\n compatible with Tensorflow.\n \"\"\"\n\n pass\n\n\n# end PTKernel\n\n\nclass TFExplicitKernel(TFKernel):\n \"\"\"\n A class for kernel that is defined as \n k(x,y) = <f(x), f(y)> \n for a finite-output f (of type FeatureMap).\n \"\"\"\n\n def __init__(self, fm):\n \"\"\"\n fm: a FeatureMap parameterizing the kernel. This feature map is\n expected to take in a Pytorch tensor as the input.\n \"\"\"\n self.fm = fm\n\n @abstractmethod\n def eval(self, X, Y):\n \"\"\"\n Evaluate the kernel on Pytorch tensors X and Y\n X: nx x d where each row represents one point\n Y: ny x d\n return nx x ny Gram matrix\n \"\"\"\n f = self.fm\n FX = f(X)\n FY = f(Y)\n K = tf.matmul(FX, FY, transpose_b=True)\n return K\n\n def pair_eval(self, X, Y):\n \"\"\"Evaluate k(x1, y1), k(x2, y2), ...\n X: n x d where each row represents one point\n Y: n x d\n return a 1d Pytorch array of length n.\n \"\"\"\n f = self.fm\n FX = f(X)\n FY = f(Y)\n vec = tf.reduce_sum(FX * FY, 1)\n return vec\n\n # def is_compatible(self, k):\n # \"\"\"\n # This compatibility check is very weak.\n # \"\"\"\n # if isinstance(k, PTExplicitKernel):\n # fm1 = self.fm\n # fm2 = k.fm\n # return fm1.input_shape() == fm2.input_shape() and \\\n # fm1.output_shape() == fm2.output_shape()\n # return False\n\n def get_feature_map(self):\n return self.fm\n\n\n# end PTExplicitKernel\nclass TFKFuncCompose(TFKernel):\n \"\"\"\n A kernel given by k'(x,y) = k(f(x), f(y)), where f is the specified \n function, and k is the specified kernel.\n f has to be callable.\n \"\"\"\n\n def __init__(self, k, f):\n \"\"\"\n k: a PTKernel\n f: a callable object or a function\n \"\"\"\n self.k = k\n self.f = f\n\n def eval(self, X, Y):\n f = self.f\n k = self.k\n fx = f(X)\n fy = f(Y)\n return k.eval(fx, fy)\n\n def pair_eval(self, X, Y):\n f = self.f\n k = self.k\n fx = f(X)\n fy = f(Y)\n return k.pair_eval(fx, fy)\n\n\n# end class PTKFuncCompose\nclass TFKPoly(TFKernel):\n \"\"\"\n Polynomial kernel of the form\n k(x,y) = (x^T y + c)^d\n \"\"\"\n\n def __init__(self, c, d):\n if c < 0:\n raise ValueError(\"c has to be positive real. Was {}\".format(c))\n if d < 0:\n raise ValueError(\"d has to be positive integer. Was {}\".format(d))\n self.c = c\n self.d = d\n\n def eval(self, X, Y):\n return (tf.matmul(X, Y, transpose_b=True) + self.c) ** self.d\n\n def pair_eval(self, X, Y):\n return (tf.reduce_sum(X * Y, 1) + self.c) ** self.d\n\n\n# end class PTKPoly\nclass TFKLinear(TFKernel):\n \"\"\"\n Linear kernel. Pytorch implementation.\n \"\"\"\n\n def __init__(self):\n pass\n\n def eval(self, X, Y):\n return tf.matmul(X, Y, transpose_b=True)\n\n def pair_eval(self, X, Y):\n return tf.reduce_sum(X * Y, 1)\n\n\n# end class TFKLinear\nclass TFKIMQ(TFKernel):\n \"\"\"\n The inverse multiquadric (IMQ) kernel studied in \n\n Measure Sample Quality with Kernels \n Jackson Gorham, Lester Mackey\n\n k(x,y) = (c^2 + ||x-y||^2)^b \n where c > 0 and b < 0. Following a theorem in the paper, this kernel is \n convergence-determining only when -1 < b < 0. In the experiments, \n the paper sets b = -1/2 and c = 1.\n \"\"\"\n\n def __init__(self, b=-0.5, c=1.0):\n if not b < 0:\n raise ValueError(\"b has to be negative. Was {}\".format(b))\n if not c > 0:\n raise ValueError(\"c has to be positive. Was {}\".format(c))\n self.b = b\n self.c = c\n\n def eval(self, X, Y):\n b = self.b\n c = self.c\n sumx2 = tf.reshape(tf.reduce_sum(X ** 2, 1), [-1, 1])\n sumy2 = tf.reshape(tf.reduce_sum(Y ** 2, 1), [1, -1])\n with tf.control_dependencies(\n [tf.assert_non_negative(sumx2, name=\"sumx2_nonneg\"), tf.assert_non_negative(sumy2, name=\"sumy2_nonneg\")]\n ):\n D2 = sumx2 - 2.0 * tf.matmul(X, Y, transpose_b=True) + sumy2\n\n D2_no0 = tf.maximum(0.0, D2)\n with tf.control_dependencies([tf.assert_non_negative(D2_no0, name=\"D2_nonneg\")]):\n K = (c ** 2 + D2_no0) ** b\n return K\n\n def mean_eval(self, X, Y):\n return tf.reduce_mean(self.eval(X, Y))\n\n def pair_eval(self, X, Y):\n assert X.shape[0] == Y.shape[0]\n b = self.b\n c = self.c\n return (c ** 2 + tf.reduce_sum((X - Y) ** 2, 1)) ** b\n" ]
[ [ "torch.nn.Dropout2d", "torch.load", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.BCELoss", "torch.nn.Tanh", "torch.nn.Linear", "torch.nn.Sigmoid", "numpy.random.normal", "torch.nn.init.normal_", "torch.nn.Upsample", "torch.nn.LeakyReLU", "torch.nn.BatchNorm2d", "torch.cuda.is_available", "torch.save" ], [ "tensorflow.reduce_sum", "tensorflow.matmul", "tensorflow.maximum", "tensorflow.assert_non_negative" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
jackaranda/climatedash
[ "4fc279b2308d33ac64691629d063357d39cbe628" ]
[ "data/covid-19/update.py" ]
[ "import urllib.request\nimport datetime\n\ndt = datetime.datetime\n\nimport numpy as np\nimport pandas as pd\n\nBASE_URL = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\"\n\nurls = {\n\t'cases': BASE_URL + \"time_series_covid19_confirmed_global.csv\",\n\t'deaths': BASE_URL + \"time_series_covid19_deaths_global.csv\",\n\t'recovered': BASE_URL + \"time_series_covid19_recovered_global.csv\"\n}\n\n\nfor variable in ['cases', 'deaths', 'recovered']:\n\t\n\traw = pd.read_csv(urls[variable], index_col=[0,1], na_filter=False).groupby(\"Country/Region\").sum()\n\tindex = list(map(lambda s: dt(int(s.split('/')[2])+2000, int(s.split('/')[0]), int(s.split('/')[1])), raw.columns[2:]))\n\tdf = pd.DataFrame(raw[raw.columns[2:]].T).reset_index(drop=True)\n\tdf['date'] = index\n\tdf.set_index('date', inplace=True)\n\tprint(df)\n\tdf.to_json(variable+'.json')\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
nancy-nayak/rethinking-bnn
[ "f7595c13bc56c69a9f1ed6d689a0bc569f4072bf" ]
[ "latentweights/mnistLENET5.py" ]
[ "import tensorflow as tf\nimport binary_layer \nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nfrom mnist import download_mnist\nimport pickle\nimport xlsxwriter \n\n\n\ndef conv_pool_bn(pre_layer, kernel_num, kernel_size, padding, pool_size, activation, training, epsilon=1e-4, alpha=.1, binary=True, stochastic=False, H=1., W_LR_scale=\"Glorot\"):\n\tconv = binary_layer.conv2d_binary(pre_layer, kernel_num, kernel_size, padding=padding, binary=binary, stochastic=stochastic, H=H, W_LR_scale=W_LR_scale)\n\tpool = tf.layers.max_pooling2d(conv, pool_size=pool_size, strides=pool_size)\n\tbn = binary_layer.batch_normalization(pool, epsilon=epsilon, momentum = 1-alpha, training=training)\n\toutput = activation(bn)\n\treturn output\n\ndef fully_connect_bn(pre_layer, output_dim, act, use_bias, training, epsilon=1e-4, alpha=.1, binary=True, stochastic=False, H=1., W_LR_scale=\"Glorot\"):\n\tpre_act = binary_layer.dense_binary(pre_layer, output_dim,\n\t\t\t\t\t\t\t\t\tuse_bias = use_bias,\n\t\t\t\t\t\t\t\t\tkernel_constraint = lambda w: tf.clip_by_value(w, -1.0, 1.0))\n\tbn = binary_layer.batch_normalization(pre_act, momentum=1-alpha, epsilon=epsilon, training=training)\n\tif act == None:\n\t\toutput = bn\n\telse:\n\t\toutput = act(bn)\n\treturn output\n\n\ndef conv_pool_latent(pre_layer, kernel_num, kernel_size, padding, pool_size, activation, training, epsilon=1e-4, alpha=.1, binary=True, stochastic=False, H=1., W_LR_scale=\"Glorot\"):\n\tconv = binary_layer.conv2d_latent(pre_layer, kernel_num, kernel_size, padding=padding, binary=binary, stochastic=stochastic, H=H, W_LR_scale=W_LR_scale)\n\tpool = tf.layers.max_pooling2d(conv, pool_size=pool_size, strides=pool_size)\n\tbn = binary_layer.batch_normalization(pool, epsilon=epsilon, momentum = 1-alpha, training=training)\n\toutput = activation(bn)\n\treturn output\n\ndef fully_connect_latent(pre_layer, output_dim, act, use_bias, training, epsilon=1e-4, alpha=.1, binary=True, stochastic=False, H=1., W_LR_scale=\"Glorot\"):\n\tpre_act = binary_layer.dense_latent(pre_layer, output_dim,\n\t\t\t\t\t\t\t\t\tuse_bias = use_bias,\n\t\t\t\t\t\t\t\t\tkernel_constraint = lambda w: tf.clip_by_value(w, -1.0, 1.0))\n\tbn = binary_layer.batch_normalization(pre_act, momentum=1-alpha, epsilon=epsilon, training=training)\n\tif act == None:\n\t\toutput = bn\n\telse:\n\t\toutput = act(bn)\n\treturn output\n\n# A function which shuffles a dataset\ndef shuffle(X,y):\n\tprint(len(X))\n\tshuffle_parts = 1\n\tchunk_size = int(len(X)/shuffle_parts)\n\tshuffled_range = np.arange(chunk_size)\n\n\tX_buffer = np.copy(X[0:chunk_size])\n\ty_buffer = np.copy(y[0:chunk_size])\n\n\tfor k in range(shuffle_parts):\n\n\t\tnp.random.shuffle(shuffled_range)\n\n\t\tfor i in range(chunk_size):\n\n\t\t\tX_buffer[i] = X[k*chunk_size+shuffled_range[i]]\n\t\t\ty_buffer[i] = y[k*chunk_size+shuffled_range[i]]\n\n\t\tX[k*chunk_size:(k+1)*chunk_size] = X_buffer\n\t\ty[k*chunk_size:(k+1)*chunk_size] = y_buffer\n\n\treturn X,y\n\n# This function trains the model a full epoch (on the whole dataset)\ndef train_epoch(X, y, sess, batch_size=100):\n\tbatches = int(len(X)/batch_size)\n\tfor i in range(batches):\n\t\tsess.run([train_kernel_op, train_other_op],\n\t\t\tfeed_dict={ input: X[i*batch_size:(i+1)*batch_size],\n\t\t\t\t\t\ttarget: y[i*batch_size:(i+1)*batch_size],\n\t\t\t\t\t\ttraining: True})\n\ndownload_mnist.maybe_download('./mnist/MNIST_data/')\nmnist = input_data.read_data_sets('./mnist/MNIST_data/', one_hot=True)\ntraindatashape = mnist.train.images.shape[0]\ntestdatashape = mnist.test.images.shape[0]\nmnisttrain = mnist.train.images.reshape(traindatashape, 28,28)\nmnisttest = mnist.test.images.reshape(testdatashape, 28,28)\n\n\n# convert class vectors to binary class vectors\nfor i in range(mnist.train.images.shape[0]):\n\tmnisttrain[i] = mnisttrain[i] * 2 - 1\nfor i in range(mnist.test.images.shape[0]):\n\tmnisttest[i] = mnisttest[i] * 2 - 1\nfor i in range(mnist.train.labels.shape[0]):\n\tmnist.train.labels[i] = mnist.train.labels[i] * 2 - 1 # -1 or 1 for hinge loss\nfor i in range(mnist.test.labels.shape[0]):\n\tmnist.test.labels[i] = mnist.test.labels[i] * 2 - 1\nprint(mnist.test.labels.shape)\nprint(mnisttest.shape)\n\n# BinaryOut\nactivation = binary_layer.binary_tanh_unit\nprint(\"activation = binary_net.binary_tanh_unit\")\n\n## Training for BNN=======================================================================================\ninput = tf.placeholder(tf.float32, shape=[None, 28, 28])\ntarget = tf.placeholder(tf.float32, shape=[None, 10])\ntraining = tf.placeholder(tf.bool)\n\n\n######### Build CNN ###########\nx = tf.expand_dims(input, 3)\ncnn = conv_pool_bn(x, 20, (5,5), padding='same', pool_size=(2,2), activation=activation, training=training)\n\ncnn = conv_pool_bn(cnn, 50, (5,5), padding='same', pool_size=(2,2), activation=activation, training=training)\n\ncnn = tf.layers.flatten(cnn)\n\ncnn = fully_connect_bn(cnn, 500, act=activation, use_bias=True, training=training)\ntrain_output = fully_connect_bn(cnn, 10, act=None, use_bias=True, training=training)\n\nloss = tf.keras.metrics.squared_hinge(target, train_output)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(train_output, 1), tf.argmax(target, 1)), tf.float32))\n\n\ntrain_epochs = 500\ntest_epochs = 1\nlr_start = 0.003\nlr_end = 0.0000003\nlr_decay = (lr_end / lr_start)**(1. / train_epochs)\nglobal_step1 = tf.Variable(0, trainable=False)\nglobal_step2 = tf.Variable(0, trainable=False)\nlr1 = tf.train.exponential_decay(lr_start, global_step=global_step1, decay_steps=int(mnist.train.images.shape[0]/100), decay_rate=lr_decay)\nlr2 = tf.train.exponential_decay(lr_start, global_step=global_step2, decay_steps=int(mnist.train.images.shape[0]/100), decay_rate=lr_decay)\n\nsess = tf.Session()\nsaver = tf.train.Saver()\n\n\nother_var = [var for var in tf.trainable_variables() if not var.name.endswith('kernel:0')]\nopt = binary_layer.AdamOptimizer(binary_layer.get_all_LR_scale(), lr1)\nopt2 = tf.train.AdamOptimizer(lr2)\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(update_ops): # when training, the moving_mean and moving_variance in the BN need to be updated.\n\ttrain_kernel_op = opt.apply_gradients(binary_layer.compute_grads(loss, opt), global_step=global_step1)\n\ttrain_other_op = opt2.minimize(loss, var_list=other_var, global_step=global_step2)\n\n\nsess.run(tf.global_variables_initializer())\n\n\n\nprint(\"Training started.....\")\n######train time =============================================================\nold_acc = 0.0\nX_train, y_train = shuffle(mnisttrain, mnist.train.labels)\ntr_acc = np.zeros(train_epochs)\nfor i in range(train_epochs):\n\tprint(\"train epoch:{}\".format(i))\n\ttrain_epoch(X_train, y_train, sess)\n\tX_train, y_train = shuffle(mnisttrain, mnist.train.labels)\n\ttrain_hist = sess.run([accuracy],\n\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\tinput: X_train,\n\t\t\t\t\t\ttarget: y_train,\n\t\t\t\t\t\ttraining: False\n\t\t\t\t\t})\n\ttr_acc[i]=train_hist[0]\n\tprint(train_hist[0])\n\n\n\tif train_hist[0] > old_acc:\n\t\told_acc = train_hist[0]\n\t\tsave_path = saver.save(sess, \"./mnist/modelLENET/model.ckpt\")\n\n\nprint(\"Variables are saved....\")\n\nkernel = [k for k in tf.trainable_variables() if k.name.endswith('kernel:0')]\nbias = [k for k in tf.trainable_variables() if k.name.endswith('bias:0')]\ngamma = [k for k in tf.trainable_variables() if k.name.endswith('gamma:0')]\nbeta = [k for k in tf.trainable_variables() if k.name.endswith('beta:0')]\nmoving_mean = [k for k in tf.global_variables() if k.name.endswith('moving_mean:0')]\nmoving_variance = [k for k in tf.global_variables() if k.name.endswith('moving_variance:0')]\nkernel_M = sess.run(kernel)\nbias_M = sess.run(bias)\ngamma_M = sess.run(gamma)\nbeta_M = sess.run(beta)\nmoving_mean_M = sess.run(moving_mean)\nmoving_variance_M = sess.run(moving_variance)\n\n\nwith open(__file__+'training_accuracy.pkl','w') as obj:\n\t\tpickle.dump( { 'acc':tr_acc,\n\t\t\t\t\t\t'kernel_M' : kernel_M,\n\t\t\t\t\t\t'bias_M' : bias_M,\n\t\t\t\t\t\t'gamma_M' : gamma_M,\n\t\t\t\t\t\t'beta_M' : beta_M,\n\t\t\t\t\t\t'moving_mean_M' : moving_mean_M,\n\t\t\t\t\t\t'moving_variance_M' : moving_variance_M,\n\t\t\t\t\t\t}, obj )\n\n# # # ###testing============================================================================\nsaver.restore(sess, \"./mnist/modelLENET/model.ckpt\")\n\nprint(\"Prining test accuracy with Binary weights...\")\ntest_acc =np.zeros(test_epochs)\nfor i in range(test_epochs):\n\tprint(\"test epoch:{}\".format(i))\n\n\ttest_hist = sess.run([accuracy],\n\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\tinput: mnisttest,\n\t\t\t\t\t\ttarget: mnist.test.labels,\n\t\t\t\t\t\ttraining: False\n\t\t\t\t\t})\n\ttest_acc[i]=test_hist[0]\n\tprint(test_hist[0])\n\nwith open(__file__+'testing_accuracy.pkl','w') as obj:\n\t\tpickle.dump( { 'acc': test_acc,\n\t\t\t\t\t\t}, obj )\n\n\n##### latent weights========================================================================================\nsess.close()\n\ntf.reset_default_graph()\n\n### Architecture when we want latent weights==========================\nprint(\"Making the architecture without binarization of weights...\")\ninput = tf.placeholder(tf.float32, shape=[None, 28, 28])\ntarget = tf.placeholder(tf.float32, shape=[None, 10])\ntraining = tf.placeholder(tf.bool)\n\n\n######### Build CNN ###########\nx = tf.expand_dims(input, 3)\ncnn = conv_pool_latent(x, 20, (5,5), padding='same', pool_size=(2,2), activation=activation, training=training)\n\ncnn = conv_pool_latent(cnn, 50, (5,5), padding='same', pool_size=(2,2), activation=activation, training=training)\n\ncnn = tf.layers.flatten(cnn)\n\ncnn = fully_connect_latent(cnn, 500, act=activation, use_bias=True, training=training)\ntrain_output = fully_connect_latent(cnn, 10, act=None, use_bias=True, training=training)\n\nloss = tf.keras.metrics.squared_hinge(target, train_output)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(train_output, 1), tf.argmax(target, 1)), tf.float32))\n\n\ntest_epochs = 1\nsess = tf.Session()\nsaver = tf.train.Saver()\n\nsess.run(tf.global_variables_initializer())\n\nparams = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES )\n\n\nprint(\"Restoring latent weights in the model...\")\nwith open('mnistLENET5.pytraining_accuracy.pkl','rb') as obj:\n\t# gr = pickle.load(obj)\n\tdata = pickle.load(obj)\nacc = data['acc']\nkernel_M = data['kernel_M']\nbias_M = data['bias_M']\ngamma_M = data['gamma_M']\nbeta_M = data['beta_M']\nmoving_mean_M = data['moving_mean_M']\nmoving_variance_M = data['moving_variance_M']\n\n\nfor i,param in enumerate(params):\n\tif i<=11:\n\t\tif param.name.endswith('kernel:0'):\n\t\t\tparam.load(kernel_M[i/6], sess)\n\t\telif param.name.endswith('bias:0'):\n\t\t\tparam.load(bias_M[i/6], sess)\n\t\telif param.name.endswith('gamma:0'):\n\t\t\tparam.load(gamma_M[i/6], sess)\n\t\telif param.name.endswith('beta:0'):\n\t\t\tparam.load(beta_M[i/6], sess)\n\t\telif param.name.endswith('moving_mean:0'):\n\t\t\tparam.load(moving_mean_M[i/6], sess)\n\t\telif param.name.endswith('moving_variance:0'):\n\t\t\tparam.load(moving_variance_M[i/6], sess)\n\t\telse:\n\t\t\tpass\n\telse:\n\t\tif param.name.endswith('kernel:0'):\n\t\t\tparam.load(kernel_M[((i-12)+7*2)/7], sess)\n\t\telif param.name.endswith('bias:0'):\n\t\t\tparam.load(bias_M[((i-12)+7*2)/7], sess)\n\t\telif param.name.endswith('gamma:0'):\n\t\t\tparam.load(gamma_M[((i-12)+7*2)/7], sess)\n\t\telif param.name.endswith('beta:0'):\n\t\t\tparam.load(beta_M[((i-12)+7*2)/7], sess)\n\t\telif param.name.endswith('moving_mean:0'):\n\t\t\tparam.load(moving_mean_M[((i-12)+7*2)/7], sess)\n\t\telif param.name.endswith('moving_variance:0'):\n\t\t\tparam.load(moving_variance_M[((i-12)+7*2)/7], sess)\n\t\telse:\n\t\t\tpass\n\t\n\n##restore model =================================================================\n\nprint(\"Printing test accuracy with latent weights in BNN...\")\n\ntest_acc = np.zeros(test_epochs)\nfor i in range(test_epochs):\n\tprint(\"test epoch:{}\".format(i))\n\ttest_hist = sess.run([accuracy],\n\t\t\t\t\t\t feed_dict={\n\t\t\t\t\t\tinput: mnisttest,\n\t\t\t\t\t\ttarget: mnist.test.labels,\n\t\t\t\t\t\ttraining: False\n\t\t\t\t\t})\n\ttest_acc[i]=test_hist[0]\n\tprint(test_hist[0])\n\nwith open(__file__+'testing_accuracy.pkl','w') as obj:\n\t\tpickle.dump( { 'acc': test_acc,\n\t\t\t\t\t\t}, obj )\n" ]
[ [ "tensorflow.control_dependencies", "tensorflow.global_variables", "tensorflow.train.AdamOptimizer", "tensorflow.Variable", "tensorflow.get_collection", "numpy.arange", "numpy.copy", "tensorflow.reset_default_graph", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.train.Saver", "tensorflow.argmax", "tensorflow.examples.tutorials.mnist.input_data.read_data_sets", "numpy.zeros", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.clip_by_value", "tensorflow.layers.flatten", "tensorflow.expand_dims", "tensorflow.layers.max_pooling2d", "numpy.random.shuffle", "tensorflow.keras.metrics.squared_hinge" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Pythonista-Haruka/awesomebook-master
[ "2060d9a16d3854ac11d43d48df32ba3bd98d8367" ]
[ "awesome_S3.py" ]
[ "import os\nimport pandas as pd\n\n# csvファイルからのダウンロード\n# 絶対パスを通すといい。ダメなら実行構成を見直すこと\nX = os.path.abspath(\"reserve.csv\")\nreserve_tb = pd.read_csv(X, encoding=\"utf-8\")\n\n# 3-1 データ数、種類数の算出\n# agg関数を用い、引数にdictionaryオブジェクトを取ることで集約処理をまとめて指定\n# result = reserve_tb\\\n# .groupby(\"hotel_id\")\\\n# .agg({\"reserve_id\": \"count\", \"customer_id\": \"nunique\"})\n# reset_index関数で、列番号を振り直す\n# result.reset_index(inplace=True)\n# result.columns = [\"hotel_id\", \"rsv_cnt\", \"cus_cnt\"]\n\n# 3-2 合計値の算出\n# 集約処理が一つのみのときはagg関数よりも良い関数がある\n# sum関数で合計を集約する\n# result = reserve_tb\\\n# .groupby([\"hotel_id\", \"people_num\"])[\"total_price\"]\\\n# .sum().reset_index()\n# 列名を\"price_sum\"に変更\n# result.rename(columns={\"total_price\": \"price_sum\"}, inplace=True)\n\n# 3-3 極値、代表値の算出\n# 最大、最小、平均、中央はそれぞれmax,min,mean,median関数を利用\n# パーセンタイルはnp.percentileを使用し、そのラムダ式をaggの集約処理に指定する\n# result = reserve_tb\\\n# .groupby(\"hotel_id\")\\\n# .agg({\"total_price\": [\"max\", \"min\", \"mean\", \"median\", lambda x: np.percentile(x, q=20)]})\\\n# .reset_index()\n# result.columns = [\"hotel_id\", \"price_max\", \"price_min\", \"price_mean\", \"price_median\", \"price_20%\"]\n\n# 3-4 ばらつき具合の算出\n# 分散、標準偏差はそれぞれvar,std関数を使用\n# n=1の場合はNAになってしまうので、fillna関数で0に置き換え\n# result = reserve_tb\\\n# .groupby(\"hotel_id\")\\\n# .agg({\"total_price\": [\"var\", \"std\"]}).reset_index()\n# result.columns = [\"hotel_id\", \"price_var\", \"price_std\"]\n# result.fillna(0, inplace=True)\n\n# 3-5 最頻値の算出\n# round関数で四捨五入した後に、mode関数で最頻値を算出\n# reserve_tb[\"total_price\"].round(-3).mode()\n\n# 3-6 順位の算出\n# rank関数により順位付け\n# strでは順位付けできないため、先にdatetime型に変換\n# reserve_tb[\"reserve_datetime\"] = pd.to_datetime(reserve_tb[\"reserve_datetime\"],\n# format=\"%Y-%m-%d %H:%M:%S\")\n# log_noを新たな列として追加\n# groupby関数で集約単位を指定\n# reserve_tb[\"log_no\"] = reserve_tb \\\n# .groupby(\"customer_id\")[\"reserve_datetime\"] \\\n# .rank(ascending=True, method=\"first\")\n# print(reserve_tb)\n\n# 予約回数を計算\n# rsv_cnt_tb = reserve_tb.groupby(\"hotel_id\").size().reset_index()\n# rsv_cnt_tb.columns = [\"hotel_id\", \"rsv_cnt\"]\n# 予約回数をもとに順位を計算\n# rsv_cnt_tb[\"rsv_cnt_rank\"] = rsv_cnt_tb[\"rsv_cnt\"] \\\n# .rank(ascending=False, method=\"min\")\n# 必要のない列を削除\n# rsv_cnt_tb.drop(\"rsv_cnt\", axis=1, inplace=True)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
chansigit/fbm
[ "431d984f4d775de1fafa2752db9a54c39d7ed8d0" ]
[ "fbm/fbm.py" ]
[ "\"\"\"Generate realizations of fractional Brownian motion.\"\"\"\nimport warnings\n\nimport numpy as np\n\n\nclass FBM(object):\n \"\"\"The FBM class.\n\n After instantiating with n = number of increments, hurst parameter, length\n of realization (default = 1) and method of generation\n (default daviesharte), call fbm() for fBm, fgn()\n for fGn, or times() to get corresponding time values.\n \"\"\"\n\n def __init__(self, n, hurst, length=1, method=\"daviesharte\"):\n \"\"\"Instantiate the FBM.\"\"\"\n self._methods = {'daviesharte': self._daviesharte,\n 'cholesky': self._cholesky,\n 'hosking': self._hosking}\n self.n = n\n self.hurst = hurst\n self.length = length\n self.method = method\n self._fgn = self._methods[self.method]\n # Some reusable values to speed up Monte Carlo.\n self._cov = None\n self._eigenvals = None\n self._C = None\n # Flag if some params get changed\n self._changed = False\n\n def __str__(self):\n return 'fBm (' + str(self.method) + ') on [0, ' + str(self.length) + \\\n '] with Hurst value ' + str(self.hurst) + ' and ' + str(self.n) + \\\n ' increments'\n\n def __repr__(self):\n return 'FBM(n=' + str(self.n) + ', hurst=' + str(self.hurst) + \\\n ', length=' + str(self.length) + ', method=\\'' + \\\n str(self.method) + '\\')'\n\n @property\n def n(self):\n return self._n\n\n @n.setter\n def n(self, value):\n if not isinstance(value, int) or value <= 0:\n raise TypeError('Number of increments must be a positive int.')\n self._n = value\n self._changed = True\n\n @property\n def hurst(self):\n return self._hurst\n\n @hurst.setter\n def hurst(self, value):\n if not isinstance(value, float) or value <= 0 or value >= 1:\n raise ValueError('Hurst parameter must be in interval (0, 1).')\n self._hurst = value\n self._changed = True\n\n @property\n def length(self):\n return self._length\n\n @length.setter\n def length(self, value):\n if not isinstance(value, (int, float)) or value <= 0:\n raise ValueError('Length of fbm must be greater than 0.')\n self._length = value\n self._changed = True\n\n @property\n def method(self):\n return self._method\n\n @method.setter\n def method(self, value):\n if value not in self._methods:\n raise ValueError('Method must be \\'daviesharte\\', \\'hosking\\' or \\\n \\'cholesky\\'')\n self._method = value\n self._fgn = self._methods[self.method]\n self._changed = True\n\n def fbm(self):\n \"\"\"Sample the fractional Brownian motion.\"\"\"\n return np.insert(self.fgn().cumsum(), [0], 0)\n\n def fgn(self):\n \"\"\"Sample the fractional Gaussian noise.\"\"\"\n scale = (1.0 * self.length / self.n) ** self.hurst\n gn = np.random.normal(0.0, 1.0, self.n)\n\n # If hurst == 1/2 then just return Gaussian noise\n if self.hurst == 0.5:\n return gn * scale\n else:\n fgn = self._fgn(gn)\n\n # Scale to interval [0, L]\n return fgn * scale\n\n def times(self):\n \"\"\"The times associated with the fbm/fgn samples.\"\"\"\n return np.linspace(0, self.length, self.n + 1)\n\n def _autocovariance(self, k):\n \"\"\"The autocovariance for fgn.\"\"\"\n return 0.5 * (abs(k - 1) ** (2 * self.hurst) -\n 2 * abs(k) ** (2 * self.hurst) +\n abs(k + 1) ** (2 * self.hurst))\n\n def _daviesharte(self, gn):\n \"\"\"Generate a fgn realization using Davies-Harte method.\n\n Uses Davies and Harte method (exact method) from:\n Davies, Robert B., and D. S. Harte. \"Tests for Hurst effect.\"\n Biometrika 74, no. 1 (1987): 95-101.\n\n Can fail if n is small and hurst close to 1. Falls back to Hosking\n method in that case. See:\n\n Wood, Andrew TA, and Grace Chan. \"Simulation of stationary Gaussian\n processes in [0, 1] d.\" Journal of computational and graphical\n statistics 3, no. 4 (1994): 409-432.\n \"\"\"\n # Monte carlo consideration\n if self._eigenvals is None or self._changed:\n # Generate the first row of the circulant matrix\n row_component = [self._autocovariance(i) for i in range(1, self.n)]\n reverse_component = list(reversed(row_component))\n row = [self._autocovariance(0)] + row_component + \\\n [0] + reverse_component\n\n # Get the eigenvalues of the circulant matrix\n # Discard the imaginary part (should all be zero in theory so\n # imaginary part will be very small)\n self._eigenvals = np.fft.fft(row).real\n self._changed = False\n\n # If any of the eigenvalues are negative, then the circulant matrix\n # is not positive definite, meaning we cannot use this method. This\n # occurs for situations where n is low and H is close to 1.\n # Fall back to using the Hosking method. See the following for a more\n # detailed explanation:\n #\n # Wood, Andrew TA, and Grace Chan. \"Simulation of stationary Gaussian\n # processes in [0, 1] d.\" Journal of computational and graphical\n # statistics 3, no. 4 (1994): 409-432.\n if np.any([ev < 0 for ev in self._eigenvals]):\n warnings.warn('Combination of increments n and Hurst value H '\n 'invalid for Davies-Harte method. Reverting to Hosking method.'\n ' Occurs when n is small and Hurst is close to 1. ')\n # Set method to hosking for future samples.\n self.method = 'hosking'\n # Don't need to store eigenvals anymore.\n self._eigenvals = None\n return self._hosking(gn)\n\n # Generate second sequence of i.i.d. standard normals\n gn2 = np.random.normal(0.0, 1.0, self.n)\n\n # Resulting sequence from matrix multiplication of positive definite\n # sqrt(C) matrix with fgn sample can be simulated in this way.\n w = np.zeros(2 * self.n, dtype=complex)\n for i in range(2 * self.n):\n if i == 0:\n w[i] = np.sqrt(self._eigenvals[i] / (2 * self.n)) * gn[i]\n elif i < self.n:\n w[i] = np.sqrt(self._eigenvals[i] / (4 * self.n)) * \\\n (gn[i] + 1j * gn2[i])\n elif i == self.n:\n w[i] = np.sqrt(self._eigenvals[i] / (2 * self.n)) * gn2[0]\n else:\n w[i] = np.sqrt(self._eigenvals[i] / (4 * self.n)) * \\\n (gn[2 * self.n - i] - 1j * gn2[2 * self.n - i])\n\n # Resulting z is fft of sequence w. Discard small imaginary part (z\n # should be real in theory).\n z = np.fft.fft(w)\n fgn = z[:self.n].real\n return fgn\n\n def _cholesky(self, gn):\n \"\"\"Generate a fgn realization using the Cholesky method.\n\n Uses Cholesky decomposition method (exact method) from:\n Asmussen, S. (1998). Stochastic simulation with a view towards\n stochastic processes. University of Aarhus. Centre for Mathematical\n Physics and Stochastics (MaPhySto)[MPS].\n \"\"\"\n # Monte carlo consideration\n if self._C is None or self._changed:\n # Generate covariance matrix\n G = np.matrix(np.zeros([self.n, self.n]))\n for i in range(self.n):\n for j in range(i + 1):\n G[i, j] = self._autocovariance(i - j)\n\n # Cholesky decomposition\n self._C = np.linalg.cholesky(G)\n self._changed = False\n\n # Generate fgn\n fgn = self._C * np.matrix(gn).T\n fgn = np.squeeze(np.asarray(fgn))\n return fgn\n\n\n def _hosking(self, gn):\n \"\"\"Generate a fgn realization using Hosking's method\n\n Method of generation is Hosking's method (exact method) from his paper:\n Hosking, J. R. (1984). Modeling persistence in hydrological time series\n using fractional differencing. Water resources research, 20(12),\n 1898-1908.\n \"\"\"\n fgn = np.zeros(self.n)\n phi = np.zeros(self.n)\n psi = np.zeros(self.n)\n # Monte carlo consideration\n if self._cov is None or self._changed:\n self._cov = np.array(\n [self._autocovariance(i) for i in range(self.n)])\n self._changed = False\n\n # First increment from stationary distribution\n fgn[0] = gn[0]\n v = 1\n phi[0] = 0\n\n # Generate fgn realization with n increments of size 1\n for i in range(1, self.n):\n phi[i - 1] = self._cov[i]\n for j in range(i - 1):\n psi[j] = phi[j]\n phi[i - 1] -= psi[j] * self._cov[i - j - 1]\n phi[i - 1] /= v\n for j in range(i - 1):\n phi[j] = psi[j] - phi[i - 1] * psi[i - j - 2]\n v *= (1 - phi[i - 1] * phi[i - 1])\n for j in range(i):\n fgn[i] += phi[j] * fgn[i - j - 1]\n fgn[i] += np.sqrt(v) * gn[i]\n\n return fgn\n\n\ndef fbm(n, hurst, length=1, method=\"daviesharte\"):\n \"\"\"One off sample of fbm.\"\"\"\n f = FBM(n, hurst, length, method)\n return f.fbm()\n\ndef fgn(n, hurst, length=1, method=\"daviesharte\"):\n \"\"\"One off sample of fgn.\"\"\"\n f = FBM(n, hurst, length, method)\n return f.fgn()\n\ndef times(n, length=1):\n \"\"\"Generate the times associated with increments n and length.\"\"\"\n return np.linspace(0, length, n + 1)\n" ]
[ [ "numpy.matrix", "numpy.sqrt", "numpy.linspace", "numpy.fft.fft", "numpy.asarray", "numpy.random.normal", "numpy.any", "numpy.linalg.cholesky", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Varat7v2/Person-Detection
[ "b8b33f1206839d94119f1aa7a6b7b62ec9c5048e" ]
[ "myFROZEN_GRAPH_v1.py" ]
[ "import sys\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport cv2\n\nclass FROZEN_GRAPH_INFERENCE:\n \n def __init__(self, frozen_model):\n \"\"\"Tensorflow detector\n \"\"\"\n self.inference_list = list()\n self.PATH_TO_CKPT = frozen_model\n self.count = 0\n\n self.detection_graph = tf.Graph()\n with self.detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(self.PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n with self.detection_graph.as_default():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n self.sess = tf.Session(graph=self.detection_graph, config=config)\n self.windowNotSet = True\n\n def draw_bounding_box(self, image, scores, boxes, classes, im_width, im_height):\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n for score, box, name in zip(scores, boxes, classes):\n if name == 1 and score > 0.6:\n # ymin, xmin, ymax, xmax = box\n left = int(box[1]*im_width)\n top = int(box[0]*im_height)\n right = int(box[3]*im_width)\n bottom = int(box[2]*im_height)\n\n box_width = right-left\n box_height = bottom-top\n\n cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2, 8)\n cv2.putText(image, '{}: {:.3f}'.format('person', score),(left, top - 5),\n cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)\n\n return image\n\n\n def run_frozen_graph(self, image, im_width, im_height):\n \"\"\"image: bgr image\n return (boxes, scores, classes, num_detections)\n \"\"\"\n image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Actual detection.\n start_time = time.time()\n (boxes, scores, classes, num_detections) = self.sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n elapsed_time = time.time() - start_time\n self.inference_list.append(elapsed_time)\n self.count = self.count + 1\n average_inference = sum(self.inference_list)/self.count\n print('Average inference time: {}'.format(average_inference))\n\n # Draw bounding boxes on the image\n image = self.draw_bounding_box(image, scores, boxes, classes, im_width, im_height)\n\n return (image, boxes, scores, classes, num_detections)\n" ]
[ [ "tensorflow.Graph", "numpy.expand_dims", "tensorflow.import_graph_def", "tensorflow.gfile.GFile", "numpy.squeeze", "tensorflow.ConfigProto", "tensorflow.Session", "tensorflow.GraphDef" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
snudm-starlab/cnn-compression
[ "8f41ea2f14d640972aabbf074fa181078edc2d53" ]
[ "src/train_test/train.py" ]
[ "\"\"\"\nLicensed to the Apache Software Foundation (ASF) under one\nor more contributor license agreements. See the NOTICE file\ndistributed with this work for additional information\nregarding copyright ownership. The ASF licenses this file\nto you under the Apache License, Version 2.0 (the\n\"License\"); you may not use this file except in compliance\nwith the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing,\nsoftware distributed under the License is distributed on an\n\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\nKIND, either express or implied. See the License for the\nspecific language governing permissions and limitations\nunder the License.\n\nFALCON: Lightweight and Accurate Convolution\n\nFile: train_test/train.py\n - Contain training code for execution for model.\n\nVersion: 1.0\n\"\"\"\n\n# pylint: disable=C0103,R0912,R0913,R0914,R0915,R1704,C0200,W0621,E1101\nimport time\nimport sys\nimport copy\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nfrom utils.optimizer_option import get_optimizer\nfrom utils.load_data import load_cifar100, load_svhn\nfrom utils.lr_decay import adjust_lr\nfrom train_test.validation import validation\n\n\n\ndef train(net,\n lr,\n log=None,\n optimizer_option='SGD',\n data='cifar100',\n epochs=350,\n batch_size=128,\n is_train=True,\n net_st=None,\n beta=0.0,\n lrd=10):\n \"\"\"\n Train the model.\n \n :param net: model to be trained\n :param lr: learning rate\n :param optimizer_option: optimizer type\n :param data: datasets used to train\n :param epochs: number of training epochs\n :param batch_size: batch size\n :param is_train: whether it is a training process\n :param net_st: uncompressed model\n :param beta: transfer parameter\n :return: best_param: the parameters of the model that achieves the best accuracy\n \"\"\"\n\n net.train()\n if net_st is not None:\n net_st.eval()\n\n if data == 'cifar100':\n trainloader = load_cifar100(is_train, batch_size)\n valloader = load_cifar100(False, batch_size)\n elif data == 'svhn':\n trainloader = load_svhn(is_train, batch_size)\n valloader = load_svhn(False, batch_size)\n else:\n sys.exit()\n\n criterion = nn.CrossEntropyLoss()\n criterion_mse = nn.MSELoss()\n optimizer = get_optimizer(net, lr, optimizer_option)\n\n start_time = time.time()\n last_time = 0\n\n best_acc = 0\n best_param = net.state_dict()\n\n iteration = 0\n for epoch in range(epochs):\n print(\"****************** EPOCH = %d ******************\" % epoch)\n if log is not None:\n log.write(\"****************** EPOCH = %d ******************\\n\" % epoch)\n\n total = 0\n correct = 0\n loss_sum = 0\n\n # change learning rate\n if epoch in (150, 250):\n lr = adjust_lr(lr, lrd=lrd, log=log)\n optimizer = get_optimizer(net, lr, optimizer_option)\n\n for i, data in enumerate(trainloader, 0):\n iteration += 1\n\n # foward\n inputs, labels = data\n inputs_var, labels_var = Variable(inputs.cuda()), Variable(labels.cuda())\n outputs, outputs_conv = net(inputs_var)\n loss = criterion(outputs, labels_var)\n if net_st is not None:\n _, outputs_st_conv = net_st(inputs_var)\n for i in range(len(outputs_st_conv)):\n if i != (len(outputs_st_conv)-1):\n loss += beta / 50 * criterion_mse(outputs_conv[i], \\\n outputs_st_conv[i].detach())\n else:\n loss += beta * criterion_mse(outputs_conv[i], \\\n outputs_st_conv[i].detach())\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n _, predicted = torch.max(F.softmax(outputs, -1), 1)\n total += labels_var.size(0)\n correct += (predicted == labels_var).sum()\n loss_sum += loss\n\n if iteration % 100 == 99:\n now_time = time.time()\n print('accuracy: %f %%; loss: %f; time: %ds'\n % ((float(100) * float(correct) / float(total)), loss, \\\n (now_time - last_time)))\n if log is not None:\n log.write('accuracy: %f %%; loss: %f; time: %ds\\n'\n % ((float(100) * float(correct) / float(total)), loss, \\\n (now_time - last_time)))\n\n total = 0\n correct = 0\n loss_sum = 0\n last_time = now_time\n\n # validation\n net.eval()\n val_acc = validation(net, valloader, log)\n net.train()\n if val_acc > best_acc:\n best_acc = val_acc\n # Store the current parameters in the parameters of the best model\n best_param = copy.deepcopy(net.state_dict())\n\n print('Finished Training. It took %ds in total' % (time.time() - start_time))\n if log is not None:\n log.write('Finished Training. It took %ds in total\\n' % (time.time() - start_time))\n\n return best_param\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.MSELoss", "torch.nn.functional.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lraszkiewicz/trax
[ "a4aff810ed8f744ce8a59ef5623a21b8f268ec66" ]
[ "trax/supervised/callbacks.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Trax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"Loop callbacks.\n\nCallbacks can be used to customize the behavior of `supervised.training.Loop`\nto accomodate a variety of use-cases.\n\nExamples include:\n - custom evaluation schemes\n - logging metrics to external servers\n - sending model checkpoints to external servers\n - updating the target network in RL algorithms and other non-stationary\n problems\n\"\"\"\n\nimport collections\nimport os\n\nimport gin\nimport numpy as np\n\nfrom trax import jaxboard\nfrom trax import layers as tl\nfrom trax import shapes\nfrom trax.rl import serialization_utils\nfrom trax.supervised import decoding\n\n\nclass TrainingStepCallback:\n \"\"\"Callback triggered before and after a training step.\"\"\"\n\n def __init__(self, loop):\n \"\"\"Initializes the callback with a `supervised.training.Loop` instance.\"\"\"\n self._loop = loop\n\n def call_at(self, step):\n \"\"\"Returns whether the callback should be called at a given step.\"\"\"\n raise NotImplementedError\n\n def on_step_begin(self, step):\n \"\"\"Called by Loop before training steps, when call_at returned True.\"\"\"\n raise NotImplementedError\n\n def on_step_end(self, step):\n \"\"\"Called by Loop after training steps, when call_at returned True.\"\"\"\n raise NotImplementedError\n\n\[email protected]\nclass SerializedModelEvaluation(TrainingStepCallback):\n \"\"\"Evaluates serialized sequence prediction models.\n\n Example: time series prediction. We can serialize a time series into\n a sequence of discrete tokens and model this sequence using an autoregressive\n sequence model, such as Transformer - see\n `trax.rl.serialization_utils.SerializedModel`. Then we can use this callback\n to evaluate long-horizon predictions of such a model.\n \"\"\"\n\n def __init__(\n self,\n loop,\n model=None,\n eval_at=1000,\n eval_task=None,\n context_lengths=(1,),\n horizon_lengths=(1,),\n n_steps=1,\n accelerate_model=True,\n normalize_context=False,\n model_with_aux=False,\n ):\n \"\"\"Initializes SerializedModelEvaluation.\n\n Args:\n loop: Instance of `trax.supervised.training.Loop` or `None`. Can be set to\n `None` for testing - in such a case, `model` and `eval_task` must be\n provided.\n model: Instance of `trax.rl.serialization_utils.SerializedModel`. Not\n required if `loop` is provided.\n eval_at: When to evaluate. Either int (every how many steps to evaluate),\n or a list of ints (step numbers), or a function int -> bool (step\n predicate).\n eval_task: Instance of `trax.supervised.training.EvalTask` with the\n evaluation data, or None. If not provided, the task will be taken from\n `loop`.\n context_lengths: List of lengths of the context sequence fed into the\n model before starting prediction.\n horizon_lengths: List of lengths of the predicted sequence.\n n_steps: Number of batches to run evaluation for.\n accelerate_model (bool): Whether to wrap the model in `tl.Accelerate`.\n \"\"\"\n super().__init__(loop)\n\n if model is None:\n model = loop.model\n\n observation_serializer = model.observation_serializer\n action_serializer = model.action_serializer\n\n predict_model = model.make_predict_model()\n if accelerate_model:\n predict_model = tl.Accelerate(predict_model)\n self._predict_model = predict_model\n self._obs_serializer = observation_serializer\n self._act_serializer = action_serializer\n\n if isinstance(eval_at, int):\n self._eval_at = lambda step: step % eval_at == 1\n elif hasattr(eval_at, '__in__'):\n self._eval_at = lambda step: step in eval_at\n elif callable(eval_at):\n self._eval_at = eval_at\n else:\n raise TypeError(f'Unsupported type for eval_at: {type(eval_at)}.')\n\n if eval_task is None:\n if len(loop.eval_tasks) != 1:\n raise ValueError(\n 'If eval_task is not provided, the number of eval_tasks registered '\n 'in Loop must be exactly 1.'\n )\n eval_task = loop.eval_tasks[0]\n self._eval_task = eval_task\n\n self._context_lengths = list(sorted(context_lengths))\n self._horizon_lengths = list(sorted(horizon_lengths))\n self._n_steps = n_steps\n self._normalize_context = normalize_context\n self._model_with_aux = model_with_aux\n\n self._batch_size = eval_task.sample_batch[0].shape[0]\n if self._model_with_aux:\n (_, self._init_state) = predict_model.init(\n shapes.ShapeDtype((2, self._batch_size, 1), dtype=np.int32),\n )\n else:\n (_, self._init_state) = predict_model.init(\n shapes.ShapeDtype((self._batch_size, 1), dtype=np.int32)\n )\n\n @property\n def predict_model(self):\n return self._predict_model\n\n def call_at(self, step):\n return self._eval_at(step)\n\n def on_step_begin(self, step):\n pass\n\n def on_step_end(self, step):\n summary_writer = jaxboard.SummaryWriter(\n os.path.join(self._loop.output_dir, 'srl_eval')\n )\n try:\n weights = self._loop.eval_model.seq_model_weights\n metrics = self.evaluate(weights)\n self._loop.log_summary(metrics, summary_writer, '', 'srl_eval')\n finally:\n summary_writer.close()\n\n def evaluate(self, weights):\n \"\"\"Evaluates the model and returns the metrics.\"\"\"\n self._predict_model.weights = weights\n\n metrics = collections.defaultdict(list)\n for _ in range(self._n_steps):\n batch = self._eval_task.next_batch()\n step_metrics = self._evaluate_batch(batch)\n for (key, value) in step_metrics.items():\n metrics[key].append(value)\n\n metrics = {k: np.array(v) for (k, v) in metrics.items()}\n\n def metric_name(name, context, horizon):\n return f'pred_{name}/context_{context}/horizon_{horizon}'\n\n ret_metrics = {}\n for ((name, context, horizon), errors) in metrics.items():\n if name == 'error':\n value = np.sum(errors) / (np.sum(errors != 0) + 1e-6)\n elif name == 'smape':\n errors = 200.0 * errors / horizon\n value = np.sum(errors) / (np.sum(errors != 0) + 1e-6)\n errors = np.sum(errors, axis=1) / np.sum(errors != 0, axis=1)\n\n ret_metrics[metric_name(f'{name}_min', context, horizon)] = np.min(errors)\n ret_metrics[metric_name(f'{name}_max', context, horizon)] = np.max(errors)\n ret_metrics[metric_name(f'{name}_var', context, horizon)] = np.var(errors)\n\n ret_metrics[metric_name(name, context, horizon)] = value\n\n return ret_metrics\n\n def _evaluate_batch(self, batch):\n \"\"\"Performs evaluation on a single batch.\"\"\"\n (obs, act, _, mask) = batch\n act_repr = serialization_utils.Serialize(self._act_serializer)(act)\n\n metrics = {}\n last_context = 0\n last_state = self._init_state\n last_start_id = 0\n\n if self._normalize_context:\n assert len(self._context_lengths) == 1\n\n for context in self._context_lengths:\n self._predict_model.state = last_state\n start_id = last_start_id\n\n if self._normalize_context:\n obs_context = obs[:, last_context:context]\n obs_means = obs_context.mean(axis=1)\n obs_stds = obs_context.std(axis=1, ddof=1)\n obs_norm = (obs - obs_means[:, np.newaxis]) / obs_stds[:, np.newaxis]\n obs_norm = np.nan_to_num(obs_norm)\n obs_repr = serialization_utils.Serialize(self._obs_serializer)(obs_norm)\n else:\n obs_repr = serialization_utils.Serialize(self._obs_serializer)(obs)\n\n if context > last_context:\n context_seq = serialization_utils.Interleave()((\n obs_repr[:, last_context:context], act_repr[:, last_context:context]\n ))\n consume_sequence(self._predict_model, start_id, context_seq[:, :-1])\n last_start_id = start_id = context_seq[:, -1:]\n last_state = self._predict_model.state\n last_context = context\n\n for timestep in range(max(self._horizon_lengths)):\n pred_repr = decoding.autoregressive_sample(\n self._predict_model,\n start_id=start_id,\n eos_id=-1,\n batch_size=self._batch_size,\n max_length=self._obs_serializer.representation_length,\n accelerate=False,\n )\n pred = self._obs_serializer.deserialize(pred_repr)\n if self._normalize_context:\n pred = pred * obs_stds\n pred = pred + obs_means\n horizon = timestep + 1\n\n smape = self._calculate_smape(pred, obs[:, context + timestep])\n\n for hor_len in self._horizon_lengths:\n if horizon <= hor_len:\n key = ('smape', context, hor_len)\n if key not in metrics:\n metrics[key] = 0\n metrics[key] += smape * mask[:, context + timestep]\n\n if horizon in self._horizon_lengths:\n error = self._calculate_error(pred, obs[:, context + timestep])\n metrics['error', context, horizon] = \\\n error * mask[:, context + timestep]\n\n start_id = pred_repr[:, -1:]\n consume_sequence(\n self._predict_model, start_id, act_repr[:, context + timestep, :-1]\n )\n start_id = act_repr[:, context + timestep, -1:]\n\n return metrics\n\n def _calculate_error(self, prediction, ground_truth):\n return (prediction - ground_truth) ** 2\n\n def _calculate_smape(self, prediction, ground_truth):\n return np.abs(ground_truth - prediction) / (\n np.abs(ground_truth) + np.abs(prediction) + 1e-6)\n\n\ndef consume_sequence(model, start_id, sequence):\n decoding.autoregressive_sample(\n model,\n start_id=start_id,\n eos_id=-1,\n inputs=sequence,\n batch_size=sequence.shape[0],\n max_length=1,\n accelerate=False,\n )\n" ]
[ [ "numpy.abs", "numpy.min", "numpy.nan_to_num", "numpy.max", "numpy.var", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neuroelf/isic-archive
[ "3250009693bbfa1457a3df2d647a17a977af52dd" ]
[ "isicarchive/annotation.py" ]
[ "\"\"\"\nisicarchive.annotation (Annotation)\n\nThis module provides the Annotation object for the IsicApi to utilize.\n\nAnnotation objects are either returned from calls to\n\n >>> from isicarchive.api import IsicApi\n >>> api = IsicApi()\n >>> study = api.study(study_name)\n >>> annotation = study.load_annotation(annotation_id)\n\nor can be generated\n\n >>> from isicarchive.annotation import Annotation\n >>> annotation = Annotation(...)\n\"\"\"\n\n__version__ = '0.4.8'\n\n\n# imports (needed for majority of functions)\nimport datetime\nimport os\nfrom typing import Any\nimport warnings\n\nfrom . import func\nfrom .image import Image\nfrom .vars import ISIC_IMAGE_DISPLAY_SIZE_MAX\n\n_json_full_fields = [\n 'id',\n 'image',\n 'image_id',\n 'log',\n 'markups',\n 'responses',\n 'start_time',\n 'state',\n 'status',\n 'stop_time',\n 'study_id',\n 'user',\n 'user_id',\n]\n_mangling = {\n 'id': '_id',\n 'image_id': 'imageId',\n 'start_time': 'startTime',\n 'stop_time': 'stopTime',\n 'study_id': 'studyId',\n 'user_id': 'userId',\n}\n_repr_pretty_list = {\n 'id': 'id',\n 'study_id': 'study_id',\n 'study_name': '_study.name',\n 'image_id': 'image_id',\n 'image_name': 'image.name',\n 'user_id': 'user_id',\n 'user_name': 'user.name',\n 'user_lastname': 'user.lastName',\n 'features': 'features.{keys}',\n}\n\nclass Annotation(object):\n \"\"\"\n Annotation object. If the details are not filled in, only the `id`,\n `image_id`, `state`, `study_id`, and `user_id` fields will be set.\n\n To generate an annotation object for an existing study, please use\n the Study.load_annotation(...) method!\n\n To generate a new annotation object (for later storage), use\n\n >>> anno = Annotation(image=image_id, study=study_id, user=user_id)\n\n Attributes\n ----------\n features : dict\n List of features (fields: idx and msk)\n id : str\n mongodb objectId of the annotation\n image : dict\n Minimal dict of the image (fields: _id, name, updated)\n image_id : str\n mongodb objectId of the image the annotation pertains to\n log : list\n Log entries (can be missing!)\n markups : dict\n Features that are part of the annotation {'Feature name': bool, ...}\n (can be missing!)\n responses : dict\n List of responses to questions {'Question': 'Response', ...}\n (can be missing!)\n start_time : Date\n Time when annotation was started (can be missing!)\n state : str\n State of annotation (one of 'active', 'complete' [?])\n status : str\n Status of annotation (can be missing!)\n stop_time : Date\n Time when annotation was finished (can be missing!)\n study_id : str\n mongodb objectId of the study the annotation pertains to\n user : dict\n Minimal dict of the user (fields: _id, name)\n user_id : str\n mongodb objectId of the user who entered the annotation\n \n Methods\n -------\n \"\"\"\n\n\n def __init__(self,\n from_json:dict = None,\n annotation_id:str = None,\n image:str = None,\n study:str = None,\n user:str = None,\n api:object = None,\n load_data:bool = False,\n ):\n \"\"\"Annotation init.\"\"\"\n\n self._api = api\n self._image_obj = None\n self._in_archive = False\n self._model_type = 'annotation'\n self._study = None\n self.features = dict()\n self.id = ''\n self.image = None\n self.image_id = image\n self.log = []\n self.markups = dict()\n self.masks = dict()\n self.responses = dict()\n self.start_time = ''\n self.state = 'active'\n self.status = '?'\n self.stop_time = ''\n self.study_id = study\n self.user = None\n self.user_id = user\n\n # preference: JSON, id (in name), then name (lookup)\n if isinstance(from_json, dict):\n try:\n self._from_json(from_json, load_data)\n except:\n raise\n elif func.could_be_mongo_object_id(annotation_id) and self._api:\n try:\n self._from_json(self._api.get(\n 'annotation/' + annotation_id).json(), load_data)\n except:\n raise\n\n # parse JSON\n def _from_json(self,\n from_json:dict,\n load_data:bool = True,\n load_masks:bool = False,\n ):\n self.id = from_json['_id']\n self.state = from_json['state']\n self.study_id = from_json['studyId']\n if 'image' in from_json:\n self.image = from_json['image']\n self.image_id = self.image['_id']\n if self._api and self.image_id in self._api._image_objs:\n self._image_obj = self._api._image_objs[self.image_id]\n elif 'imageId' in from_json:\n self.image_id = from_json['imageId']\n if 'log' in from_json:\n self.log = from_json['log']\n if 'markups' in from_json:\n self.markups = from_json['markups']\n if 'responses' in from_json:\n self.responses = from_json['responses']\n if 'startTime' in from_json:\n self.start_time = from_json['startTime']\n if 'status' in from_json:\n self.status = from_json['status']\n if 'stopTime' in from_json:\n self.stop_time = from_json['stopTime']\n if 'user' in from_json:\n self.user = from_json['user']\n self.user_id = self.user['_id']\n elif 'userId' in from_json:\n self.user_id = from_json['userId']\n self._in_archive = True\n if self._api and self.study_id in self._api._studies:\n self._study = self._api._studies[self.study_id]\n if ('features' in from_json and \n isinstance(from_json['features'], dict)):\n self.features = from_json['features']\n if (not load_data) and (self.state != 'complete'):\n return\n if self._api:\n self.load_data(load_masks=load_masks)\n\n # JSON\n def __repr__(self):\n return 'isicarchive.annotation.Annotation(from_json=%s)' % (self.as_json())\n \n # formatted print\n def __str__(self):\n return 'ISIC Annotation (id={0:s}, image_id={1:s}, study_id={2:s})'.format(\n self.id, self.image_id, self.study_id)\n \n # pretty print\n def _repr_pretty_(self, p:object, cycle:bool = False):\n func.object_pretty(self, p, cycle, _repr_pretty_list)\n\n # JSON representation (without constructor):\n def as_json(self):\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n from json import dumps as json_dumps\n\n json_list = []\n for field in _json_full_fields:\n if field in _mangling:\n json_field = _mangling[field]\n else:\n json_field = field\n json_list.append('\"%s\": %s' % (json_field,\n json_dumps(getattr(self, field))))\n return '{' + ', '.join(json_list) + '}'\n\n # clear data\n def clear_data(self,\n clear_features:bool = False,\n clear_masks:bool = True,\n deref_image:bool = False):\n if deref_image:\n self._image_obj = None\n if clear_features:\n self.features = dict()\n if clear_masks:\n self.masks = dict()\n\n # compute areas\n def compute_areas(self):\n if not self._in_archive or not self._api:\n return\n try:\n if not self._image_obj:\n try:\n self._image_obj = self._api.image(self.image_id)\n except:\n raise\n if self._image_obj.superpixels['szs'] is None:\n try:\n self._image_obj.map_superpixels()\n except:\n raise\n spx = self._image_obj.superpixels\n szs = spx['szs']\n iarea = float(sum(szs))\n if self._image_obj._segmentation is None:\n try:\n self._image_obj.load_segmentation()\n except:\n pass\n try:\n szp = spx['szp']\n if szp is None:\n szp = [0.0] * len(szs)\n except:\n szp = [0.0] * len(szs)\n for fcont in self.features.values():\n idx = fcont['idx']\n fcont['area'] = [szs[i] for i in idx]\n fcont['area_pct'] = [szs[i] / iarea for i in idx]\n fcont['area_mpct'] = [szp[i] for i in idx]\n fcont['tarea'] = sum(fcont['area'])\n fcont['tarea_pct'] = sum(fcont['area_pct'])\n fcont['tarea_mpct'] = sum(fcont['area_mpct'])\n except:\n raise\n \n # load data\n def load_data(self, load_masks:bool=False):\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE IMPORT\n if load_masks:\n import imageio\n \n try:\n for (key, value) in self.markups.items():\n if not value:\n continue\n feat_uri = func.uri_encode(key)\n if not (key in self.features and\n isinstance(self.features[key], dict) and\n ('idx' in self.features[key]) and\n (len(self.features[key]['idx']) > 0)):\n feat_lst = self._api.get(\n 'annotation/' + self.id + '/' + feat_uri,\n parse_json=False)\n if not feat_lst.ok:\n raise ValueError('Error loading feature ' + key)\n feat_lst = feat_lst.json()\n feat_idx = [fidx for fidx in range(len(feat_lst))\n if feat_lst[fidx] > 0]\n self.features[key] = dict()\n self.features[key]['area'] = None\n self.features[key]['area_pct'] = None\n self.features[key]['area_mpct'] = None\n self.features[key]['idx'] = feat_idx\n self.features[key]['lst'] = [v for v in filter(\n lambda v: v > 0, feat_lst)]\n self.features[key]['num'] = len(feat_idx)\n self.features[key]['tarea'] = None\n self.features[key]['tarea_pct'] = None\n self.features[key]['tarea_mpct'] = None\n if not load_masks or key in self.masks:\n continue\n cache_filename = self._api.cache_filename(self.id,\n 'afmsk', 'png', self._api._feature_filepart[key])\n if cache_filename and os.path.exists(cache_filename):\n try:\n self.masks[key] = imageio.imread(cache_filename)\n except Exception as e:\n warnings.warn('Error loading feature mask: ' + str(e))\n os.remove(cache_filename)\n if not key in self.masks:\n feat_req = self._api.get('annotation/' + self.id +\n '/' + feat_uri + '/mask', parse_json=False)\n if not feat_req.ok:\n raise ValueError('Error loading feature mask ' + key)\n self.features[key]['msk'] = feat_req.content\n self.masks[key] = imageio.imread(feat_req.content)\n if cache_filename:\n try:\n with open(cache_filename, 'wb') as cache_file:\n cache_file.write(self.features[key]['msk'])\n except Exception as e:\n warnings.warn('Error writing feature mask: ' + str(e))\n except Exception as e:\n warnings.warn('Error loading annotation: ' + str(e))\n\n # overlap in features\n def overlap(self,\n feature:str,\n other:object,\n other_feature:str,\n measure:str = 'dice',\n smcc_fwhm:float = 0.05,\n cc_within_segmask:bool = True,\n ) -> float:\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n from . import imfunc\n\n # do not return anything unless complete\n if self.state != 'complete' or other.state != 'complete':\n raise ValueError('Requires complete annotations.')\n if not feature in self.markups or not self.markups[feature]:\n return 0.0\n if not other_feature in other.markups or not other.markups[other_feature]:\n return 0.0\n if measure == 'cc' or measure == 'smcc' or self.image['_id'] != other.image['_id']:\n load_masks = True\n else:\n load_masks = False\n if not feature in self.features or (load_masks and not feature in self.masks):\n self.load_data(load_masks=load_masks)\n if not other_feature in other.features or (load_masks and not other_feature in other.masks):\n other.load_data(load_masks=load_masks)\n if not feature in self.features or not other_feature in other.features:\n raise RuntimeError('Error loading features.')\n if load_masks:\n if not feature in self.masks or not other_feature in other.masks:\n raise RuntimeError('Error loading binary masks.')\n if self.image['_id'] == other.image['_id'] and measure == 'dice':\n return imfunc.superpixel_dice(self.features[feature]['idx'],\n other.features[other_feature]['idx'])\n\n # ANOTHER IMPORT DONE HERE AS IT'S NOT NEEDED OTHERWISE\n import numpy\n\n if not self._image_obj:\n if self._api and self.image_id in self._api._image_objs:\n self._image_obj = self._api._image_objs[self.image_id]\n elif not self._api:\n raise ValueError('API object required to load feature.')\n else:\n try:\n self._image_obj = self._api.image(self.image_id)\n except:\n raise ValueError('Could not load image object.')\n if not other._image_obj:\n if self._api and other.image_id in self._api._image_objs:\n other._image_obj = self._api._image_objs[other.image_id]\n elif not self._api:\n raise ValueError('API object required to load feature.')\n else:\n try:\n other._image_obj = self._api.image(other.image_id)\n except:\n raise ValueError('Could not load image object.')\n simage = self._image_obj\n simeta = simage.meta['acquisition']\n oimage = other._image_obj\n oimeta = oimage.meta['acquisition']\n simage_shape = (simeta['pixelsY'], simeta['pixelsX'])\n oimage_shape = (oimeta['pixelsY'], oimeta['pixelsX'])\n if cc_within_segmask:\n try:\n simage.load_segmentation()\n seg_obj = simage._segmentation\n seg_obj.load_mask_data()\n simage_mask = seg_obj.mask\n except:\n warnings.warn('Segmentation mask not available for source image.')\n cc_within_segmask = False\n if simage.id == oimage.id:\n oimage_mask = simage_mask\n else:\n try:\n oimage.load_segmentation()\n seg_obj = oimage._segmentation\n seg_obj.load_mask_data()\n oimage_mask = seg_obj.mask\n except:\n warnings.warn('Segmentation mask not available for other image.')\n cc_within_segmask = False\n if simage_shape != oimage_shape:\n if simage_shape[0] <= oimage_shape[0] and simage_shape[1] <= oimage_shape[1]:\n target = 's'\n elif simage_shape[0] >= oimage_shape[0] and simage_shape[1] >= oimage_shape[1]:\n target = 'o'\n elif (simage_shape[0] * simage_shape[1]) <= (oimage_shape[0] * oimage_shape[1]):\n target = 's'\n else:\n target = 'o'\n if target == 's':\n sdata = self.masks[feature]\n odata = imfunc.image_resample(other.masks[other_feature], simage_shape)\n if cc_within_segmask:\n oimage_mask = imfunc.image_resample(oimage_mask, simage_shape)\n else:\n sdata = imfunc.image_resample(self.masks[feature], oimage_shape)\n odata = other.masks[other_feature]\n if cc_within_segmask:\n simage_mask = imfunc.image_resample(simage_mask, oimage_shape)\n else:\n sdata = self.masks[feature]\n odata = other.masks[other_feature]\n if cc_within_segmask:\n simage_mask = numpy.logical_and(simage_mask > 0, oimage_mask > 0)\n else:\n simage_mask = None\n if measure == 'dice':\n return imfunc.image_dice(sdata, odata, simage_mask)\n elif measure == 'cc':\n return imfunc.image_corr(sdata, odata, simage_mask)\n else:\n return imfunc.image_corr(imfunc.image_smooth_fft(sdata, smcc_fwhm),\n imfunc.image_smooth_fft(odata, smcc_fwhm), simage_mask)\n\n # set data\n def set_data(self):\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n import numpy\n\n if not self._api:\n raise RuntimeError('Only valid with API object set.')\n if self._image_obj is None:\n try:\n self._image_obj = self._api.image(self.image_id)\n except:\n raise\n try:\n spmap = self._image_obj.superpixels['map']\n if spmap is None:\n self._image_obj.map_superpixels()\n spmap = self._image_obj.superpixels['map']\n spshape = self._image_obj.superpixels['idx'].shape\n except:\n raise\n self.masks = dict()\n for (key, val) in self.features.items():\n maskimg = numpy.zeros((spshape[0] * spshape[1]), numpy.uint8)\n for (idx, weight) in zip(val['idx'], val['lst']):\n if float(weight) == 1.0:\n maskimg[spmap[idx,0:spmap[idx,-1]]] = 255\n else:\n w = min(255, int(weight * 255.0))\n maskimg[spmap[idx,0:spmap[idx,-1]]] = w\n self.masks[key] = maskimg.reshape(spshape)\n\n # show image in notebook\n def show_in_notebook(self,\n features:Any = None,\n color_code:list = [255, 0, 0],\n alpha:float = 1.0,\n on_image:bool = True,\n max_size:int = None,\n call_display:bool = True,\n ) -> object:\n\n # IMPORT DONE HERE TO SAVE TIME AT MODULE INIT\n import numpy\n from .imfunc import color_superpixels, write_image\n\n try:\n from ipywidgets import Image as ImageWidget\n from IPython.display import display\n except:\n warnings.warn('ipywidgets.Image or IPython.display.display not available')\n return\n if not isinstance(color_code, list) or len(color_code) != 3:\n color_code = [255, 0, 0]\n if not isinstance(alpha, float) or (alpha < 0.0) or (alpha > 1.0):\n alpha = 1.0\n if features is None:\n features = {name: [self._api.feature_color(name), alpha] for\n name in self.features.keys()}\n elif isinstance(features, str):\n if not features in self.features:\n raise KeyError('Feature \"' + features + '\" not found.')\n features = {features: [color_code, alpha]}\n elif isinstance(features, list):\n features_list = features\n features = dict()\n for feature in features_list:\n if not feature in self.features:\n continue\n if feature == features[0]:\n features[feature] = [color_code, alpha]\n else:\n rand_color = numpy.random.randint(0, 255, 3).tolist()\n features[feature] = [rand_color, alpha]\n elif not isinstance(features, dict):\n raise ValueError('Invalid features')\n else:\n for (name, code) in features.items():\n if not isinstance(code, list) or not (\n (len(code) == 2) and (len(code[0]) == 3) and\n isinstance(code[1], float) and code[1] >= 0.0 and code[1] <= 1.0):\n rand_color = numpy.random.randint(0, 255, 3).tolist()\n features[name] = [rand_color, alpha]\n\n if max_size is None:\n max_size = ISIC_IMAGE_DISPLAY_SIZE_MAX\n \n try:\n image_id = self.image_id\n if self._image_obj is None:\n if image_id in self._api._image_objs:\n self._image_obj = self._api._image_objs[image_id]\n image_odata = self._image_obj.data\n image_osp = self._image_obj.superpixels\n elif image_id in self._api._image_cache:\n image_info = self._api._image_cache[image_id]\n self._image_obj = Image(image_info, api=self._api,\n load_image_data=True, load_superpixels=True)\n if self._api._store_objs:\n self._api._image_objs[image_id] = self._image_obj\n image_odata = self._image_obj.data\n image_osp = self._image_obj.superpixels\n else:\n self._image_obj = self._api.image(image_id,\n load_image_data=True, load_superpixels=True)\n image_odata = None\n image_osp = None\n else:\n image_odata = self._image_obj.data\n image_osp = self._image_obj.superpixels\n image_obj = self._image_obj\n if image_obj.data is None:\n image_obj.load_image_data()\n if image_obj.superpixels['map'] is None:\n image_obj.load_superpixels(map_superpixels=True)\n image_shape = image_obj.superpixels['shp']\n image_height = image_shape[0]\n image_width = image_shape[1]\n image_spmap = image_obj.superpixels['map']\n except Exception as e:\n warnings.warn('Problem with associated image: ' + str(e))\n if not self._image_obj is None:\n if not image_osp is None:\n self._image_obj.data = image_odata\n self._image_obj.superpixels = image_osp\n else:\n self._image_obj.clear_data()\n return\n if on_image:\n image_data = image_obj.data.copy()\n image_data_shape = image_data.shape\n if len(image_data_shape) < 3:\n planes = 1\n else:\n planes = image_data_shape[2]\n image_data.shape = (image_height * image_width, planes)\n else:\n planes = 3\n image_data = numpy.zeros((image_height * image_width, planes),\n dtype=numpy.uint8)\n planes = min(3, planes)\n for (feature, color_spec) in features.items():\n splist = numpy.asarray(self.features[feature]['idx'])\n spvals = numpy.asarray(self.features[feature]['lst'])\n color_superpixels(image_data,\n splist, image_spmap, color_spec[0], color_spec[1], spvals)\n image_data.shape = (image_height, image_width, planes)\n if not self._image_obj is None:\n if not image_osp is None:\n self._image_obj.data = image_odata\n self._image_obj.superpixels = image_osp\n else:\n self._image_obj.clear_data()\n if on_image:\n imformat = 'jpg'\n else:\n imformat = 'png'\n buffer_data = write_image(image_data, 'buffer', imformat)\n image_max_xy = max(image_width, image_height)\n shrink_factor = max(1.0, image_max_xy / max_size)\n image_width = int(image_width / shrink_factor)\n image_height = int(image_height / shrink_factor)\n try:\n image_out = ImageWidget(value=buffer_data,\n width=image_width, height=image_height)\n if call_display:\n display(image_out)\n return None\n return image_out\n except Exception as e:\n warnings.warn('Problem producing image for display: ' + str(e))\n return None\n" ]
[ [ "numpy.asarray", "numpy.logical_and", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AntonKorneev/classy_code
[ "fdc673938b3ee201d5d2fa924692a64a9bcaf466" ]
[ "Solution.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport json as js\nimport requests as req\nfrom tkinter import filedialog\nfrom tkinter import *\nimport sys\nimport pandas as pd\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport httpagentparser as pars\nprint(\"Пожалуйста загрузите файл test.csv\")\nTk().withdraw() #указываем путь к выборке\nTk().update()\ninit_csv = filedialog.askopenfilename(initialdir = \"/\",title = \"Укажите путь к файлу\",filetypes = ((\"csv files\",\"*.csv\"),(\"all files\",\"*.*\")))\nTk().destroy()\nif len(init_csv)==0:\n print(\"Не указан .csv файл, выход из программы\")\n sys.exit()\ndf = pd.read_csv(init_csv, sep=';') #превращаем выборку в DataFrame\n\n\n# In[2]:\n\n\ndef find_mask(df_x,column_name,column_value):\n #Данная функция предназначена для облегчения читаемости кода. Она строит выборку из полученного DataFrame по заданным условиям\n mask = df_x[column_name].values == column_value #для ускорения поиска воспользуемся mask\n temp_dataframe = mask.nonzero()[0]\n return temp_dataframe\n\n\n# In[3]:\n\n\ndef ip_check(IPv4):\n #Данная функция предназначена для получения подробной информации об IP методом парсинга БД RIPE Network и портала https://check-host.net/\n data = js.loads(req.get(\"https://rest.db.ripe.net/search.json?query-string=\"+IPv4).text) #загружаем информацию из БД RIPE при помощи запроса данных в формате JSON\n i,n=0,0\n for data[\"objects\"][\"object\"][i] in data[\"objects\"][\"object\"]:\n if data[\"objects\"][\"object\"][i][\"type\"]==\"inetnum\":\n for data[\"objects\"][\"object\"][i][\"attributes\"][\"attribute\"][n] in data[\"objects\"][\"object\"][i][\"attributes\"][\"attribute\"]:\n if data[\"objects\"][\"object\"][i][\"attributes\"][\"attribute\"][n][\"name\"]==\"netname\": #ищем нужную строку, содержащую название подсети\n NetName = data[\"objects\"][\"object\"][i][\"attributes\"][\"attribute\"][n][\"value\"] #получаем искомое значение\n break\n n+=1\n elif data[\"objects\"][\"object\"][i][\"type\"]==\"route\": #ищем нужную строку, содержащую адрес подсети\n Subnetwork = data[\"objects\"][\"object\"][i][\"attributes\"][\"attribute\"][0][\"value\"] #получаем искомое значение, пользуясь тем, что первой строкой всегда идёт адрес подсети\n break\n n=0\n i+=1\n header = {\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36\",\n \"X-Requested-With\": \"XMLHttpRequest\"} #эмулируем клиент-браузер, в связи с тем, что сайт отказывается отвечать на прямые запросы программы\n data2 = pd.read_html(req.get(\"https://check-host.net/ip-info?host=\"+IPv4+\"#ip_info-ip2location\", headers=header).text) #загружаем информацию с портала в виде HTML-таблиц (DataFrame)\n Provider,Country,Region,City = data2[2].at[4,1],data2[4].at[5,1], data2[4].at[6,1], data2[4].at[7,1] #пользуемся жёсткой структурой таблиц и получаем искомые значения (Название провайдера, страны, региона и города)\n if len(str(Provider))<5:\n Provider = data2[2].at[3,1] #исправляем потенциальные ошибки\n return [Country, Region, City, Provider, NetName, Subnetwork] #выдаём полученную информацию в виде списка\n\n\n# In[4]:\n\n\ndef usag_check(UsAg):\n #Данная функция предназначена для получения подробной информации о User Agent клиента при помощи анализа по ключевым словам (httpagentparser + очистка)\n mob_check = {'iPhone', 'iOS', 'Android', 'Windows Phone'}\n while True:\n try:\n IsMobile=pars.detect(UsAg)[\"os\"][\"name\"] #оказалось, что скрипт считает Android не ос, а платформой\n break\n except KeyError:\n IsMobile=\"Undefined\"\n break\n while True:\n try: \n OS=pars.detect(UsAg)[\"platform\"][\"name\"] #поэтому их пришлось поменять местами\n break\n except KeyError:\n OS=\"Undefined\"\n break\n while True:\n try: \n OS_ver=pars.detect(UsAg)[\"os\"][\"version\"]\n break\n except KeyError:\n OS_ver=\"Undefined\"\n break\n while True:\n try: \n Browser=pars.detect(UsAg)[\"browser\"][\"name\"]\n break\n except KeyError:\n Browser=\"Undefined\"\n break\n while True:\n try: \n Br_ver=pars.detect(UsAg)[\"browser\"][\"version\"]\n break\n except KeyError:\n Br_ver=\"Undefined\"\n break\n if IsMobile==None: #методом проб и ошибок было найдено странное исключение, упорядочим его\n IsMobile=\"Undefined\"\n if IsMobile in mob_check or OS in mob_check: #блок проверки на мобильное устройство\n IsMobile=True\n else:\n if IsMobile != \"Undefined\":\n IsMobile=False\n if OS_ver==\"Undefined\": #здесь и далее ручная доработка парсинга User Agent библиотеки httpagentparser\n if OS==\"Android\":\n OS_ver=UsAg[UsAg.find(\"Android \")+8:UsAg.find(\",\", UsAg.find(\"Android \")+8)]\n if OS==\"iOS\":\n OS_ver=UsAg[UsAg.find(\"iPhone OS \")+10:UsAg.find(\" like Mac\", UsAg.find(\"iPhone OS \")+10)]\n if OS==\"Mac OS\":\n OS_ver=UsAg[UsAg.find(\" \", UsAg.find(\"Mac OS \")+7)+1:UsAg.find(\")\", UsAg.find(\"Mac OS \")+7)]\n elif len(OS_ver)>3 and OS==\"Windows\":\n OS_ver=UsAg[UsAg.find(\"NT \"):UsAg.find(\",\", UsAg.find(\"NT \"))]\n if OS_ver==\"NT 10.0\":\n OS_ver=10\n if OS_ver==\"NT 6.3\":\n OS_ver=8.1\n if OS_ver==\"NT 6.2\":\n OS_ver=8\n if OS_ver==\"NT 6.1\":\n OS_ver=7\n if OS_ver==\"NT 6.0\":\n OS_ver=\"Vista\"\n if Browser==\"Undefined\" and OS==\"iOS\":\n Browser=UsAg[UsAg.rfind(\" \")+1:]\n if Browser[:6]==\"Mobile\":\n Browser,Br_ver=\"Webkit based browser\",Browser[7:]\n if Br_ver==\"Undefined\" and Browser==\"Microsoft Internet Explorer\":\n Br_ver=UsAg[UsAg.find(\"rv:\")+3:UsAg.find(\")\", UsAg.find(\"rv:\"))] \n return [IsMobile, OS, OS_ver, Browser, Br_ver]\n\n\n# In[5]:\n\n\ndef calc_helper(df_x, aim_i, true_i, rd_id):\n #Данная функция предназначена для облегчения читаемости кода. Она проводит операции с таблицами устройств одного логина, присваивая реальный идентификатор устройства\n true_el = df_x.at[true_i, \"rdev_id\"]\n aim_el = df_x.at[aim_i, \"rdev_id\"]\n if true_el == aim_el == None: #рассмотрим четыре случая: первый - ни один из реальных индентификаторов не определён\n df_x.at[true_i, \"rdev_id\"]=rd_id\n df_x.at[aim_i, \"rdev_id\"]=rd_id\n rd_id+=1\n elif true_el == None: #второй - верхний реальный индентификатор не определён\n df_x.at[true_i, \"rdev_id\"]=df_x.at[aim_i, \"rdev_id\"]\n elif aim_el == None: #третий - нижний релаьный индентификатор не определён\n df_x.at[aim_i, \"rdev_id\"]=df_x.at[true_i, \"rdev_id\"]\n elif true_el != None != aim_el: #последний - оба определены\n result=min(true_el,aim_el)\n aim=max(true_el,aim_el)\n temp_df = df_x.iloc[find_mask(df_x,\"rdev_id\",aim)]\n for temp_index in temp_df.index: #в таком случае - сольём эти номера вместе (по меньшему)\n df_x.at[temp_index, \"rdev_id\"]=result\n return rd_id\n\n\n# In[6]:\n\n\ndef make_a_random_graph(df_x, true_column, aim_column):\n #Данная функция предназначена для визуализации случайного графа (размерности от 4 до 16 вершин) зависимости между двумя заданными параметрами\n inner_df=df_x\n unique_values_list=pd.unique(df_x[[true_column]].values.ravel('K'))\n while len(inner_df)>15 or len(inner_df)<3:\n random_number = np.random.randint(group_count)\n random_value = unique_values_list[random_number]\n inner_df = df_x.loc[find_mask(df_x,true_column,random_value)]\n g=nx.from_pandas_edgelist(inner_df, true_column, aim_column)\n node_size_list=[20000]+[5000 for x in range(len(inner_df))]\n if true_column == \"login\":\n node_color_list=[0]+[1 for x in range(len(inner_df))]\n else:\n node_color_list=node_size_list\n return nx.draw_kamada_kawai(g, cmap=plt.cm.Pastel1, with_labels=True,node_size=node_size_list,node_color=node_color_list,font_weight=\"bold\")\n\n\n# In[7]:\n\n\ndf['country'],df['region'],df['city'],df['provider'],df['netname'],df['subnetwork'] = None,None,None,None,None,None #создаём столбцы для\ndf['isMobile'],df['os'],df['os_ver'],df['browser'],df['br_ver'] = None,None,None,None,None #последующего заполнения данными об IP и User Agent\nip_set=set()\ndf_new=df #подготавливаем таблицу для будущего .csv файла\nip_inc=df['ip'].str.count(',').sum() #посчитаем, насколько увеличится таблица, если разложить её так, чтобы в строке был только один IP-адрес\nfin_row=len(df.index)+ip_inc #посчитаем итоговую длину таблицы\ncompl_bar={int(fin_row * (x+1) * 0.05) for x in range(19)}\nprint(\"Ведётся подготовка файла.csv [скорость неравномерная]\")\nfor row_count in range(fin_row):\n if df_new.ip[row_count][0]==\"[\": #удалим лишние скобки, если они есть в поле\n ip_num=str(df_new.ip[row_count][1:-1]).split(', ')\n else:\n ip_num=str(df_new.ip[row_count]).split(', ')\n if len(ip_num)>1: #добавим новые строки с другими IP-адресами, если их больше, чем один\n for inner_count in range(len(ip_num)-1):\n df_new = df_new.append(pd.Series([df.device_id[row_count], df.login[row_count], ip_num[inner_count+1], df.user_agent[row_count], None, None, None, None, None, None, None, None, None, None, None], index=df_new.columns ), ignore_index=True)\n df_new.ip[row_count]=ip_num[0]\n if ip_num[0] not in ip_set: #чтобы не перегружать БД лишними запросами - устроим проверку на наличие данных в таблице при помощи множества IP\n ip_info=ip_check(ip_num[0][1:-1])\n ip_set.add(ip_num[0])\n else:\n ip_info = df_new.loc[find_mask(df_new,\"ip\",ip_num[0]).min()].tolist()[4:10]\n df_new.country[row_count], df_new.region[row_count], df_new.city[row_count], df_new.provider[row_count], df_new.netname[row_count], df_new.subnetwork[row_count] = ip_info #добавляем информацию об IP\n df_new.isMobile[row_count], df_new.os[row_count], df_new.os_ver[row_count], df_new.browser[row_count], df_new.br_ver[row_count] = usag_check(df_new.user_agent[row_count]) #добавляем информацию из User Agent\n if row_count in compl_bar: #выведем процент завершения для пользователя, в связи с длительностью операции\n compl_bar_status = int(100*row_count/fin_row)\n if compl_bar_status != 50:\n compl_bar_status += 1\n print(\"Выполнено \" + str(compl_bar_status) + \"%\")\nprint(\"Выполнено 100%. Сохраните сформированный файл\")\nip_set.clear()\nTk().withdraw() \nTk().update()\ntask1_path = filedialog.askdirectory(initialdir = \"/\",title = \"Укажите путь сохранения файла\")\nTk().destroy()\nif len(task1_path)==0:\n print(\"Не указан путь сохранения, выход из программы\")\n sys.quit()\ndf_new.to_csv(task1_path+\"/task_1.csv\", sep=';', index=False)\n\n\n# In[8]:\n\n\ndf_2 = df_new #подготавливаем таблицу для выявления реальных устройств\ndf_2 = df_2.iloc[0:len(df.index), [0,1,10,11,12,13,14]].drop_duplicates().reset_index(drop=True) #убираем лишние колонки и строки\ndf_2['rdev_id'], df_2['reason'] = None, None #добавим столбцы для группировки идентификаторов устройств за реальным устройством\nun_logs = pd.unique(df_2[[\"login\"]].values.ravel('K')) #формируем массив уникальных логинов\nreasons = [\"установлена связь между людьми\",\"смена ОС\",\"смена версии ОС\",\"смена браузера\",\"смена версии браузера\",\"полностью совпадают (при разных device_id)\"]\nreas_set, temp_reas_set = set(), set()\nr_id = 1\nfor un_log in un_logs: #запустим процедуру внутренней проверки (внутри одного логина), найдём DataFrame каждого уникального логина\n inner_counter = 0\n inner_check = df_2.loc[find_mask(df_2,\"login\",un_log)]\n for true_index in inner_check.index: #сравним каждую строку уникального логина с каждой последующей строкой уникального логина\n reas_set.clear()\n inner_counter +=1\n for temp_counter in range(len(inner_check.index)-inner_counter):\n aim_index=inner_check.index[temp_counter+inner_counter] \n for inn_column_name in inner_check.columns.values[2:-2]: #сделаем это по столбцам\n true_elem=inner_check.at[true_index, inn_column_name]\n aim_elem=inner_check.at[aim_index, inn_column_name]\n #print(true_elem != aim_elem) #оставляю в коде для облегчения проверки логики программы(1)\n if inn_column_name==\"isMobile\": #устроим сначала проверку на совпадение реального идентификатора\n if inner_check.at[true_index, \"rdev_id\"]==inner_check.at[aim_index, \"rdev_id\"]!=None:\n break\n if inn_column_name==\"isMobile\": #затем - дополнительную проверку на совпадение device_id\n if inner_check.at[true_index, \"device_id\"]==inner_check.at[aim_index, \"device_id\"]:\n r_id=calc_helper(df_2,aim_index,true_index,r_id)\n break\n if inn_column_name==\"isMobile\" and true_elem != aim_elem: #реальное устройство не может быть и мобильным, и стационарным одновременно\n break\n if inn_column_name==\"os\" and true_elem != aim_elem: #проверка имени ОС\n if true_elem==\"Mac OS\" or aim_elem==\"Mac OS\": #предполагаем, что Mac OS невозможно поставить на ПК, а Linux/Windows на Mac\n break\n else:\n temp_reas_set.add(1) \n if inn_column_name==\"os_ver\" and true_elem != aim_elem: #проверка версии ОС\n if 1 not in temp_reas_set: #если ОС отличается, сверять версии нет смысла\n temp_reas_set.add(2)\n if inn_column_name==\"browser\" and true_elem != aim_elem: #проверка названия браузера\n if 1 in temp_reas_set: #согласно предположению невозможна одновременная смена ОС и браузера, если не будет обнаружено промежуточных этапов в других записях одного логина\n temp_reas_set.clear()\n break\n elif 2 in temp_reas_set: #та же ситуация с одновременной сменой версии ОС и браузера\n temp_reas_set.clear()\n break\n else:\n temp_reas_set.add(3)\n if inn_column_name==\"br_ver\" and true_elem != aim_elem: #проверка версии браузера\n if 3 not in temp_reas_set: #если браузер отличается, сверять версии нет смысла\n if 1 in temp_reas_set: #согласно предположению невозможна одновременная смена ОС и версии браузера, однако смена версии ОС и версии браузера допустима\n temp_reas_set.clear()\n break\n else:\n temp_reas_set.add(4) \n if inn_column_name==\"br_ver\":\n if len(temp_reas_set)==0: #на случай полного совпадения кроме device_id\n temp_reas_set.add(5)\n r_id=calc_helper(df_2,aim_index,true_index,r_id)\n reas_set=reas_set.union(temp_reas_set)\n #print(\"Y\", temp_reas_set) #оставляю в коде для облегчения проверки логики программы(2)\n temp_reas_set.clear()\n #print(true_index, aim_index) #оставляю в коде для облегчения проверки логики программы(3) \n if len(reas_set)==0 and df_2.at[true_index, \"rdev_id\"]==None: #проставим уникальные идентификаторы для устройств без дубликатов (проверка с None на случай наличия дубля по device_id)\n df_2.at[true_index, \"rdev_id\"]=r_id\n r_id+=1\n else: #проставим предполагаемые причины неуникальности\n df_2.at[true_index, \"reason\"]=set() #по неизвестной причине передача множества reas_set напрямую приводила к образованию пустого множества (предположительно из-за NoneType элемента)\n df_2.at[true_index, \"reason\"].update(reas_set)\nprint(\"Результат внутренней проверки: обнаружено \" + str(len(pd.unique(df_2[[\"rdev_id\"]].values.ravel('K')))) + \" потенциально реальных устройств\")\n\n\n# In[9]:\n\n\ndf_3=df_2 #упорядочиваем полученные данные в новой таблице\ndf_3=df_3.iloc[:, [0,1,7,8]] #удалим лишние столбцы. В связи с тем, что столбец reason состоит из множеств, удалим дубли после его очистки\nun_devs = pd.unique(df_3[[\"device_id\"]].values.ravel('K')) #формируем массив уникальных идентификаторов устройств\nfor un_dev in un_devs: #устроим внешнюю проверку (между логинами по идентификатору устройства)\n outer_check = df_3.loc[find_mask(df_3,\"device_id\",un_dev)]\n outer_counter = 0\n for aim_index in outer_check.index: #проверяем, чтобы у одного идентификатора устройства не было нескольких rdev_id\n if outer_counter==0:\n true_index=aim_index\n else:\n if df_3.at[aim_index, \"rdev_id\"] != df_3.at[true_index, \"rdev_id\"]:\n r_id=calc_helper(df_3, aim_index, true_index, r_id)\n outer_counter+=1\nun_rdevs = pd.unique(df_3[[\"rdev_id\"]].values.ravel('K')) #формируем массив уникальных реальных идентификаторов устройств\ngroup_count=0\nfor un_rdev in un_rdevs: #объединяем множества причин для каждого реалиного идентификатора устройства\n outer_check_2 = df_3.loc[find_mask(df_3,\"rdev_id\",un_rdev)]\n reas_set.clear()\n reas_statement = \"\"\n group_count+=1\n group_name = \"r_dev_\" + str(group_count)\n for aim_index in outer_check_2.index:\n curr_reas = df_3.at[aim_index, \"reason\"]\n if curr_reas == None:\n curr_reas=set()\n reas_set=reas_set.union(curr_reas)\n for n in range(len(reas_set)):\n reas = reas_set.pop()\n reas_statement += str(reasons[reas]) + \", \" #переведём причины на русский язык\n for aim_index in outer_check_2.index: #и вставим их в текст, а также обновим названия\n if len(reas_statement)==0:\n reas_statement=\"уникальный \"\n df_3.at[aim_index, \"reason\"] = reas_statement[:-2]\n df_3.at[aim_index, \"rdev_id\"] = group_name\nprint(\"Результат внешней проверки: обнаружено \" + str(group_count) + \" реальных устройств(а)\")\n\n\n# In[12]:\n\n\ndf_Task2,df_Task3=df_3,df_3\ndf_Task2=df_Task2.iloc[:, [0,2,3]].drop_duplicates().reset_index(drop=True) #убираем лишние строки и столбец логинов, чтобы получить ответ на второе задание\ndf_Task3=df_Task3.iloc[:, [1,2]].drop_duplicates().reset_index(drop=True) #проводим ту же операцию для получения ответа на третье задание\nprint(\"Таблица по результатам задания 2\")\nprint(df_Task2)\nprint(\"Таблица по результатам задания 3\")\nprint(df_Task3)\nprint(\"Подготовка общего графа (розовым отмечены люди, серым - реальные устройства):\")\ngraph_df=df_Task3 #сформируем таблицу для визуализации\nfor i in range(len(graph_df)): #уменьшим логин до первых 8 символов\n graph_df.at[i, \"login\"]=graph_df.at[i, \"login\"][:8]\ng=nx.from_pandas_edgelist(graph_df, \"rdev_id\", \"login\") #создадим общий график\nnode_size_list=[]\nfor node in g.nodes():\n if node[:2]==\"r_\":\n node_size_list.append(100)\n else:\n node_size_list.append(10)\nnx.draw_kamada_kawai(g, cmap=plt.cm.Pastel1, node_size=node_size_list, node_color=node_size_list, font_weight=\"bold\")\nplt.show()\nprint(\"Граф по случайному реальному устройству (логин сокращён до восьми символов):\")\nmake_a_random_graph(graph_df, \"rdev_id\", \"login\")\nplt.show()\nprint(\"Граф по случайному логину (логин сокращён до восьми символов):\")\nmake_a_random_graph(graph_df, \"login\", \"rdev_id\")\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.Series", "matplotlib.pyplot.show", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
goodatlas/fairseq
[ "7dafb05754fe268bb5f76a1c97cf3a14062f44e5" ]
[ "fairseq_cli/train.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport argparse\nimport logging\nimport math\nimport os\nimport sys\nfrom typing import Dict, Optional, Any, List, Tuple, Callable\n\nimport numpy as np\nimport torch\nfrom fairseq import (\n checkpoint_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\nfrom fairseq.data import iterators\nfrom fairseq.data.plasma_utils import PlasmaStore\nfrom fairseq.dataclass.configs import FairseqConfig\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils\nfrom fairseq.file_io import PathManager\nfrom fairseq.logging import meters, metrics, progress_bar\nfrom fairseq.model_parallel.megatron_trainer import MegatronTrainer\nfrom fairseq.trainer import Trainer\nfrom omegaconf import DictConfig, OmegaConf\n\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"fairseq_cli.train\")\n\n\ndef main(cfg: FairseqConfig) -> None:\n if isinstance(cfg, argparse.Namespace):\n cfg = convert_namespace_to_omegaconf(cfg)\n\n utils.import_user_module(cfg.common)\n\n if distributed_utils.is_master(cfg.distributed_training) and \"job_logging_cfg\" in cfg:\n # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126)\n logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg))\n\n assert (\n cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None\n ), \"Must specify batch size either with --max-tokens or --batch-size\"\n metrics.reset()\n\n np.random.seed(cfg.common.seed)\n utils.set_torch_seed(cfg.common.seed)\n\n if distributed_utils.is_master(cfg.distributed_training):\n checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)\n\n # Print args\n logger.info(cfg)\n\n if cfg.checkpoint.write_checkpoints_asynchronously:\n try:\n import iopath # noqa: F401\n except ImportError:\n logging.exception(\n \"Asynchronous checkpoint writing is specified but iopath is \"\n \"not installed: `pip install iopath`\"\n )\n return\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(cfg.task)\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in cfg.dataset.valid_subset.split(\",\"):\n task.load_dataset(valid_sub_split, combine=False, epoch=1)\n\n assert cfg.criterion, \"Please specify criterion to train a model\"\n\n # Build model and criterion\n if cfg.distributed_training.ddp_backend == \"fully_sharded\":\n with fsdp_enable_wrap(cfg.distributed_training):\n model = fsdp_wrap(task.build_model(cfg.model))\n else:\n model = task.build_model(cfg.model)\n criterion = task.build_criterion(cfg.criterion)\n logger.info(model)\n logger.info(\"task: {}\".format(task.__class__.__name__))\n logger.info(\"model: {}\".format(model.__class__.__name__))\n logger.info(\"criterion: {}\".format(criterion.__class__.__name__))\n logger.info(\n \"num. shared model params: {:,} (num. trained: {:,})\".format(\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if not getattr(p, \"expert\", False) and p.requires_grad)\n )\n )\n\n logger.info(\n \"num. expert model params: {} (num. trained: {})\".format(\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False)),\n sum(p.numel() for p in model.parameters() if getattr(p, \"expert\", False) and p.requires_grad),\n )\n )\n\n # (optionally) Configure quantization\n if cfg.common.quantization_config_path is not None:\n quantizer = quantization_utils.Quantizer(\n config_path=cfg.common.quantization_config_path,\n max_epoch=cfg.optimization.max_epoch,\n max_update=cfg.optimization.max_update,\n )\n else:\n quantizer = None\n\n # Build trainer\n if cfg.common.model_parallel_size == 1:\n trainer = Trainer(cfg, task, model, criterion, quantizer)\n else:\n trainer = MegatronTrainer(cfg, task, model, criterion)\n logger.info(\n \"training on {} devices (GPUs/TPUs)\".format(\n cfg.distributed_training.distributed_world_size\n )\n )\n logger.info(\n \"max tokens per device = {} and max sentences per device = {}\".format(\n cfg.dataset.max_tokens,\n cfg.dataset.batch_size,\n )\n )\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(\n cfg.checkpoint,\n trainer,\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n if cfg.common.tpu:\n import torch_xla.core.xla_model as xm\n xm.rendezvous(\"load_checkpoint\") # wait for all workers\n\n max_epoch = cfg.optimization.max_epoch or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n while epoch_itr.next_epoch_idx <= max_epoch:\n if lr <= cfg.optimization.stop_min_lr:\n logger.info(\n f\"stopping training because current learning rate ({lr}) is smaller \"\n \"than or equal to minimum learning rate \"\n f\"(--stop-min-lr={cfg.optimization.stop_min_lr})\"\n )\n break\n\n # train for one epoch\n valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)\n if should_stop:\n break\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n # sharded data: get train iterator for next epoch\n load_dataset=task.has_sharded_data(\"train\"),\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n train_meter.stop()\n logger.info(\"done training in {:.1f} seconds\".format(train_meter.sum))\n\n # ioPath implementation to wait for all asynchronous file writes to complete.\n if cfg.checkpoint.write_checkpoints_asynchronously:\n logger.info(\n \"ioPath PathManager waiting for all asynchronous checkpoint \"\n \"writes to finish.\"\n )\n PathManager.async_close()\n logger.info(\"ioPath PathManager finished waiting.\")\n\n\ndef should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if cfg.checkpoint.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b\n\n prev_best = getattr(should_stop_early, \"best\", None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= cfg.checkpoint.patience:\n logger.info(\n \"early stop since valid performance hasn't improved for last {} runs\".format(\n cfg.checkpoint.patience\n )\n )\n return True\n else:\n return False\n\n\[email protected](\"train\")\ndef train(\n cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr\n) -> Tuple[List[Optional[float]], bool]:\n \"\"\"Train the model for one epoch and return validation losses.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,\n shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),\n )\n update_freq = (\n cfg.optimization.update_freq[epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(cfg.optimization.update_freq)\n else cfg.optimization.update_freq[-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n azureml_logging=(\n cfg.common.azureml_logging\n if distributed_utils.is_master(cfg.distributed_training)\n else False\n ),\n )\n progress.update_config(_flatten_config(cfg))\n\n trainer.begin_epoch(epoch_itr.epoch)\n\n valid_subsets = cfg.dataset.valid_subset.split(\",\")\n should_stop = False\n num_updates = trainer.get_num_updates()\n logger.info(\"Start iterating over samples\")\n for i, samples in enumerate(progress):\n with metrics.aggregate(\"train_inner\"), torch.autograd.profiler.record_function(\n \"train_step-%d\" % i\n ):\n log_output = trainer.train_step(samples)\n\n if log_output is not None: # not OOM, overflow, ...\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % cfg.common.log_interval == 0:\n stats = get_training_stats(metrics.get_smoothed_values(\"train_inner\"))\n progress.log(stats, tag=\"train_inner\", step=num_updates)\n\n # reset mid-epoch stats after each log interval\n # the end-of-epoch stats will still be preserved\n metrics.reset_meters(\"train_inner\")\n\n end_of_epoch = not itr.has_next()\n valid_losses, should_stop = validate_and_save(\n cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch\n )\n\n if should_stop:\n break\n\n # log end-of-epoch stats\n logger.info(\"end of epoch {} (average epoch stats below)\".format(epoch_itr.epoch))\n stats = get_training_stats(metrics.get_smoothed_values(\"train\"))\n progress.print(stats, tag=\"train\", step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters(\"train\")\n return valid_losses, should_stop\n\n\ndef _flatten_config(cfg: DictConfig):\n config = OmegaConf.to_container(cfg)\n # remove any legacy Namespaces and replace with a single \"args\"\n namespace = None\n for k, v in list(config.items()):\n if isinstance(v, argparse.Namespace):\n namespace = v\n del config[k]\n if namespace is not None:\n config[\"args\"] = vars(namespace)\n return config\n\n\ndef validate_and_save(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n valid_subsets: List[str],\n end_of_epoch: bool,\n) -> Tuple[List[Optional[float]], bool]:\n num_updates = trainer.get_num_updates()\n max_update = cfg.optimization.max_update or math.inf\n\n # Stopping conditions (and an additional one based on validation loss later\n # on)\n should_stop = False\n if num_updates >= max_update:\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"num_updates: {num_updates} >= max_update: {max_update}\"\n )\n\n training_time_hours = trainer.cumulative_training_time() / (60 * 60)\n if (\n cfg.optimization.stop_time_hours > 0\n and training_time_hours > cfg.optimization.stop_time_hours\n ):\n should_stop = True\n logger.info(\n f\"Stopping training due to \"\n f\"cumulative_training_time: {training_time_hours} > \"\n f\"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)\"\n )\n\n do_save = (\n (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)\n or should_stop\n or (\n cfg.checkpoint.save_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.checkpoint.save_interval_updates == 0\n and num_updates >= cfg.dataset.validate_after_updates\n )\n )\n do_validate = (\n (not end_of_epoch and do_save) # validate during mid-epoch saves\n or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)\n or should_stop\n or (\n cfg.dataset.validate_interval_updates > 0\n and num_updates > 0\n and num_updates % cfg.dataset.validate_interval_updates == 0\n )\n ) and not cfg.dataset.disable_validation\n\n # Validate\n valid_losses = [None]\n if do_validate:\n valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)\n\n should_stop |= should_stop_early(cfg, valid_losses[0])\n\n # Save checkpoint\n if do_save or should_stop:\n checkpoint_utils.save_checkpoint(\n cfg.checkpoint, trainer, epoch_itr, valid_losses[0]\n )\n\n return valid_losses, should_stop\n\n\ndef get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]:\n stats[\"wall\"] = round(metrics.get_meter(\"default\", \"wall\").elapsed_time, 0)\n return stats\n\n\ndef validate(\n cfg: DictConfig,\n trainer: Trainer,\n task: tasks.FairseqTask,\n epoch_itr,\n subsets: List[str],\n) -> List[Optional[float]]:\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if cfg.dataset.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(cfg.dataset.fixed_validation_seed)\n\n trainer.begin_valid_epoch(epoch_itr.epoch)\n valid_losses = []\n for subset in subsets:\n logger.info('begin validation on \"{}\" subset'.format(subset))\n\n # Initialize data iterator\n itr = trainer.get_valid_iterator(subset).next_epoch_itr(\n shuffle=False, set_dataset_epoch=False # use a fixed valid set\n )\n if cfg.common.tpu:\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=cfg.common.log_format,\n log_interval=cfg.common.log_interval,\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n cfg.common.tensorboard_logdir\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n default_log_format=(\"tqdm\" if not cfg.common.no_progress_bar else \"simple\"),\n wandb_project=(\n cfg.common.wandb_project\n if distributed_utils.is_master(cfg.distributed_training)\n else None\n ),\n wandb_run_name=os.environ.get(\n \"WANDB_NAME\", os.path.basename(cfg.checkpoint.save_dir)\n ),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n with metrics.aggregate(new_root=True) as agg:\n for i, sample in enumerate(progress):\n if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps:\n break\n trainer.valid_step(sample)\n\n # log validation stats\n stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])\n return valid_losses\n\n\ndef get_valid_stats(\n cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]\n) -> Dict[str, Any]:\n stats[\"num_updates\"] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, \"best\"):\n key = \"best_{0}\".format(cfg.checkpoint.best_checkpoint_metric)\n best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best,\n stats[cfg.checkpoint.best_checkpoint_metric],\n )\n return stats\n\n\ndef cli_main(\n modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None\n) -> None:\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser, modify_parser=modify_parser)\n\n cfg = convert_namespace_to_omegaconf(args)\n\n if cfg.common.use_plasma_view:\n server = PlasmaStore(path=cfg.common.plasma_path)\n logger.info(f\"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}\")\n\n if args.profile:\n with torch.cuda.profiler.profile():\n with torch.autograd.profiler.emit_nvtx():\n distributed_utils.call_main(cfg, main)\n else:\n distributed_utils.call_main(cfg, main)\n\n # if cfg.common.use_plasma_view:\n # server.server.kill()\n\n\nif __name__ == \"__main__\":\n cli_main()\n" ]
[ [ "torch.autograd.profiler.record_function", "torch.autograd.profiler.emit_nvtx", "torch.cuda.profiler.profile", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jwb37/LSeSim_adv
[ "97d8b10aab758a9cde713854f62314b144c5b913" ]
[ "models/cyclegan_networks.py" ]
[ "\"\"\"\nThe network architectures is based on the implementation of CycleGAN and CUT\nOriginal PyTorch repo of CycleGAN: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix\nOriginal PyTorch repo of CUT: https://github.com/taesungp/contrastive-unpaired-translation\nOriginal CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf\nOriginal CUT paper: https://arxiv.org/pdf/2007.15651.pdf\nWe use the network architecture for our default modal image translation\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport functools\nimport numpy as np\n\n\n##################################################################################\n# Discriminator\n##################################################################################\nclass D_NLayersMulti(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3,\n norm_layer=nn.BatchNorm2d, num_D=1):\n super(D_NLayersMulti, self).__init__()\n # st()\n self.num_D = num_D\n if num_D == 1:\n layers = self.get_layers(input_nc, ndf, n_layers, norm_layer)\n self.model = nn.Sequential(*layers)\n else:\n layers = self.get_layers(input_nc, ndf, n_layers, norm_layer)\n self.add_module(\"model_0\", nn.Sequential(*layers))\n self.down = nn.AvgPool2d(3, stride=2, padding=[\n 1, 1], count_include_pad=False)\n for i in range(1, num_D):\n ndf_i = int(round(ndf / (2**i)))\n layers = self.get_layers(input_nc, ndf_i, n_layers, norm_layer)\n self.add_module(\"model_%d\" % i, nn.Sequential(*layers))\n\n def get_layers(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw,\n stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=1, padding=padw),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1,\n kernel_size=kw, stride=1, padding=padw)]\n\n return sequence\n\n def forward(self, input):\n if self.num_D == 1:\n return self.model(input)\n result = []\n down = input\n for i in range(self.num_D):\n model = getattr(self, \"model_%d\" % i)\n result.append(model(down))\n if i != self.num_D - 1:\n down = self.down(down)\n return result\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator\"\"\"\n\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, no_antialias=False):\n \"\"\"Construct a PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n kw = 4\n padw = 1\n if no_antialias:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]\n else:\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=1, padding=padw), nn.LeakyReLU(0.2, True),\n Downsample(ndf)\n # nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n ]\n\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1, n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n if no_antialias:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]\n else:\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True),\n Downsample(ndf * nf_mult)\n # nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map\n self.model = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.model(input)\n\n\nclass PixelDiscriminator(nn.Module):\n \"\"\"Defines a 1x1 PatchGAN discriminator (pixelGAN)\"\"\"\n\n def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):\n \"\"\"Construct a 1x1 PatchGAN discriminator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n \"\"\"\n super(PixelDiscriminator, self).__init__()\n if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n self.net = [\n nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),\n norm_layer(ndf * 2),\n nn.LeakyReLU(0.2, True),\n nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]\n\n self.net = nn.Sequential(*self.net)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.net(input)\n\n\n##################################################################################\n# Generator\n##################################################################################\nclass ResnetGenerator(nn.Module):\n \"\"\"Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.\n\n We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)\n \"\"\"\n\n def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6,\n padding_type='reflect', no_antialias=False, no_antialias_up=False, opt=None):\n \"\"\"Construct a Resnet-based generator\n\n Parameters:\n input_nc (int) -- the number of channels in input images\n output_nc (int) -- the number of channels in output images\n ngf (int) -- the number of filters in the last conv layer\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers\n n_blocks (int) -- the number of ResNet blocks\n padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero\n \"\"\"\n assert(n_blocks >= 0)\n super(ResnetGenerator, self).__init__()\n self.opt = opt\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n\n model = [nn.ReflectionPad2d(3),\n nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),\n norm_layer(ngf),\n nn.ReLU(True)]\n\n n_downsampling = 2\n for i in range(n_downsampling): # add downsampling layers\n mult = 2 ** i\n if no_antialias:\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True)]\n else:\n model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=1, padding=1, bias=use_bias),\n norm_layer(ngf * mult * 2),\n nn.ReLU(True),\n Downsample(ngf * mult * 2)\n # nn.AvgPool2d(kernel_size=2, stride=2)\n ]\n\n mult = 2 ** n_downsampling\n for i in range(n_blocks): # add ResNet blocks\n\n model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]\n\n for i in range(n_downsampling): # add upsampling layers\n mult = 2 ** (n_downsampling - i)\n if no_antialias_up:\n model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1, bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n else:\n model += [\n Upsample(ngf * mult),\n # nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=1, bias=use_bias),\n norm_layer(int(ngf * mult / 2)),\n nn.ReLU(True)]\n model += [nn.ReflectionPad2d(3)]\n model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]\n model += [nn.Tanh()]\n\n self.model = nn.Sequential(*model)\n\n def forward(self, x, layers=[], encode_only=False):\n if len(layers) > 0:\n feat = x\n feats = []\n for layer_id, layer in enumerate(self.model):\n feat = layer(feat)\n if layer_id in layers:\n feats.append(feat)\n if layer_id == layers[-1] and encode_only:\n return None, feats\n return feat, feats\n else:\n out = self.model(x)\n return out, None\n\n\n##################################################################################\n# Basic Blocks\n##################################################################################\nclass ResnetBlock(nn.Module):\n \"\"\"Define a Resnet block\"\"\"\n\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Initialize the Resnet block\n\n A resnet block is a conv block with skip connections\n We construct a conv block with build_conv_block function,\n and implement skip connections in <forward> function.\n Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf\n \"\"\"\n super(ResnetBlock, self).__init__()\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\n\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\n \"\"\"Construct a convolutional block.\n\n Parameters:\n dim (int) -- the number of channels in the conv layer.\n padding_type (str) -- the name of padding layer: reflect | replicate | zero\n norm_layer -- normalization layer\n use_dropout (bool) -- if use dropout layers.\n use_bias (bool) -- if the conv layer uses bias or not\n\n Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))\n \"\"\"\n conv_block = []\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\n if use_dropout:\n conv_block += [nn.Dropout(0.5)]\n\n p = 0\n if padding_type == 'reflect':\n conv_block += [nn.ReflectionPad2d(1)]\n elif padding_type == 'replicate':\n conv_block += [nn.ReplicationPad2d(1)]\n elif padding_type == 'zero':\n p = 1\n else:\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\n\n return nn.Sequential(*conv_block)\n\n def forward(self, x):\n \"\"\"Forward function (with skip connections)\"\"\"\n out = x + self.conv_block(x) # add skip connections\n return out\n\n\n###############################################################################\n# Helper Functions\n###############################################################################\ndef get_filter(filt_size=3):\n if(filt_size == 1):\n a = np.array([1., ])\n elif(filt_size == 2):\n a = np.array([1., 1.])\n elif(filt_size == 3):\n a = np.array([1., 2., 1.])\n elif(filt_size == 4):\n a = np.array([1., 3., 3., 1.])\n elif(filt_size == 5):\n a = np.array([1., 4., 6., 4., 1.])\n elif(filt_size == 6):\n a = np.array([1., 5., 10., 10., 5., 1.])\n elif(filt_size == 7):\n a = np.array([1., 6., 15., 20., 15., 6., 1.])\n\n filt = torch.Tensor(a[:, None] * a[None, :])\n filt = filt / torch.sum(filt)\n\n return filt\n\n\nclass Downsample(nn.Module):\n def __init__(self, channels, pad_type='reflect', filt_size=3, stride=2, pad_off=0):\n super(Downsample, self).__init__()\n self.filt_size = filt_size\n self.pad_off = pad_off\n self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2)), int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]\n self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]\n self.stride = stride\n self.off = int((self.stride - 1) / 2.)\n self.channels = channels\n\n filt = get_filter(filt_size=self.filt_size)\n self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))\n\n self.pad = get_pad_layer(pad_type)(self.pad_sizes)\n\n def forward(self, inp):\n if(self.filt_size == 1):\n if(self.pad_off == 0):\n return inp[:, :, ::self.stride, ::self.stride]\n else:\n return self.pad(inp)[:, :, ::self.stride, ::self.stride]\n else:\n return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])\n\n\nclass Upsample2(nn.Module):\n def __init__(self, scale_factor, mode='nearest'):\n super().__init__()\n self.factor = scale_factor\n self.mode = mode\n\n def forward(self, x):\n return torch.nn.functional.interpolate(x, scale_factor=self.factor, mode=self.mode)\n\n\nclass Upsample(nn.Module):\n def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):\n super(Upsample, self).__init__()\n self.filt_size = filt_size\n self.filt_odd = np.mod(filt_size, 2) == 1\n self.pad_size = int((filt_size - 1) / 2)\n self.stride = stride\n self.off = int((self.stride - 1) / 2.)\n self.channels = channels\n\n filt = get_filter(filt_size=self.filt_size) * (stride**2)\n self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))\n\n self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])\n\n def forward(self, inp):\n ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]\n if(self.filt_odd):\n return ret_val\n else:\n return ret_val[:, :, :-1, :-1]\n\n\ndef get_pad_layer(pad_type):\n if(pad_type in ['refl', 'reflect']):\n PadLayer = nn.ReflectionPad2d\n elif(pad_type in ['repl', 'replicate']):\n PadLayer = nn.ReplicationPad2d\n elif(pad_type == 'zero'):\n PadLayer = nn.ZeroPad2d\n else:\n print('Pad type [%s] not recognized' % pad_type)\n return PadLayer\n\n\nclass Identity(nn.Module):\n def forward(self, x):\n return x\n\n\ndef get_norm_layer(norm_type='instance'):\n \"\"\"Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch':\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance':\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n def norm_layer(x): return Identity()\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n" ]
[ [ "torch.nn.Sequential", "torch.nn.Dropout", "torch.nn.ReflectionPad2d", "torch.Tensor", "torch.nn.Conv2d", "torch.sum", "torch.nn.Tanh", "numpy.ceil", "torch.nn.AvgPool2d", "torch.nn.LeakyReLU", "torch.nn.functional.interpolate", "numpy.mod", "torch.nn.ReLU", "numpy.array", "torch.nn.ReplicationPad2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shishirpy/highdicom
[ "bdec0b4123b1eedc3ff275f07edadca6cfe21725" ]
[ "src/highdicom/spatial.py" ]
[ "from typing import Sequence, Tuple\n\nimport numpy as np\n\n\ndef create_rotation_matrix(\n image_orientation: Sequence[float],\n) -> np.ndarray:\n \"\"\"Builds a rotation matrix.\n\n Parameters\n ----------\n image_orientation: Sequence[float]\n Cosines of the row direction (first triplet: horizontal, left to right,\n increasing Column index) and the column direction (second triplet:\n vertical, top to bottom, increasing Row index) direction expressed in\n the three-dimensional patient or slide coordinate system defined by the\n Frame of Reference\n\n Returns\n -------\n numpy.ndarray\n 3 x 3 rotation matrix\n\n \"\"\"\n if len(image_orientation) != 6:\n raise ValueError('Argument \"image_orientation\" must have length 6.')\n row_cosines = np.array(image_orientation[:3], dtype=float)\n column_cosines = np.array(image_orientation[3:], dtype=float)\n n = np.cross(row_cosines.T, column_cosines.T)\n return np.column_stack([\n row_cosines,\n column_cosines,\n n\n ])\n\n\nclass ImageToReferenceTransformer(object):\n\n \"\"\"Class for transforming coordinates from image to reference space.\n\n Builds an affine transformation matrix for mapping two dimensional\n pixel matrix coordinates into the three dimensional frame of reference.\n\n Examples\n --------\n\n >>> import numpy as np\n >>> # Create a transformer by specifying the reference space of\n >>> # an image\n >>> transformer = ImageToReferenceTransformer(\n ... image_position=[56.0, 34.2, 1.0],\n ... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n ... pixel_spacing=[0.5, 0.5])\n >>> # Use the transformer to convert coordinates\n >>> image_coords = np.array([[0.0, 10.0], [5.0, 5.0]])\n >>> ref_coords = transformer(image_coords)\n >>> print(ref_coords)\n [[56. 39.2 1. ]\n [58.5 36.7 1. ]]\n\n \"\"\"\n\n def __init__(\n self,\n image_position: Sequence[float],\n image_orientation: Sequence[float],\n pixel_spacing: Sequence[float],\n ):\n \"\"\"Constructs transformation object.\n\n Parameters\n ----------\n image_position: Sequence[float]\n Position of the slice (image or frame) in the Frame of Reference,\n i.e., the offset of the top left pixel in the pixel matrix from the\n origin of the reference coordinate system along the X, Y, and Z\n axis\n image_orientation: Sequence[float]\n Cosines of the row direction (first triplet: horizontal, left to\n right, increasing Column index) and the column direction (second\n triplet: vertical, top to bottom, increasing Row index) direction\n expressed in the three-dimensional patient or slide coordinate\n system defined by the Frame of Reference\n pixel_spacing: Sequence[float]\n Spacing between pixels in millimeter unit along the column\n direction (first value: spacing between rows, vertical, top to\n bottom, increasing Row index) and the rows direction (second value:\n spacing between columns: horizontal, left to right, increasing\n Column index)\n\n Raises\n ------\n TypeError\n When any of the arguments is not a sequence.\n ValueError\n When any of the arguments has an incorrect length.\n\n \"\"\"\n if not isinstance(image_position, Sequence):\n raise TypeError('Argument \"image_position\" must be a sequence.')\n if len(image_position) != 3:\n raise ValueError('Argument \"image_position\" must have length 3.')\n if not isinstance(image_orientation, Sequence):\n raise TypeError('Argument \"image_orientation\" must be a sequence.')\n if len(image_orientation) != 6:\n raise ValueError('Argument \"image_orientation\" must have length 6.')\n if not isinstance(pixel_spacing, Sequence):\n raise TypeError('Argument \"pixel_spacing\" must be a sequence.')\n if len(pixel_spacing) != 2:\n raise ValueError('Argument \"pixel_spacing\" must have length 2.')\n\n x_offset = float(image_position[0])\n y_offset = float(image_position[1])\n z_offset = float(image_position[2])\n translation = np.array([x_offset, y_offset, z_offset], dtype=float)\n\n rotation = create_rotation_matrix(image_orientation)\n # Column direction (spacing between rows)\n column_spacing = float(pixel_spacing[0])\n # Row direction (spacing between columns)\n row_spacing = float(pixel_spacing[1])\n rotation[:, 0] *= row_spacing\n rotation[:, 1] *= column_spacing\n\n # 4x4 transformation matrix\n self._affine = np.row_stack(\n [\n np.column_stack([\n rotation,\n translation,\n ]),\n [0.0, 0.0, 0.0, 1.0]\n ]\n )\n\n @property\n def affine(self) -> np.ndarray:\n \"\"\"numpy.ndarray: 4x4 affine transformation matrix\"\"\"\n return self._affine\n\n def __call__(self, coordinates: np.ndarray) -> np.ndarray:\n \"\"\"Transform coordinates from image space to frame of reference.\n\n Applies the affine transformation matrix to a batch of pixel matrix\n coordinates to obtain the corresponding coordinates in the frame of\n reference.\n\n Parameters\n ----------\n coordinates: numpy.ndarray\n Array of (Column, Row) coordinates in the Total Pixel Matrix in\n pixel unit at sub-pixel resolution. Array should have shape\n ``(n, 2)``, where *n* is the number of coordinates, the first\n column represents the *Column* values and the second column\n represents the *Row* values.\n\n Returns\n -------\n numpy.ndarray\n Array of (X, Y, Z) coordinates in the coordinate system defined by\n the Frame of Reference. Array has shape ``(n, 3)``, where *n* is\n the number of coordinates, the first column represents the *X*\n offsets, the second column represents the *Y* offsets and the third\n column represents the *Z* offsets\n\n Raises\n ------\n ValueError\n When `coordinates` has incorrect shape.\n\n \"\"\"\n if coordinates.shape[1] != 2:\n raise ValueError(\n 'Argument \"coordinates\" must be a two-dimensional array '\n 'with shape [n, 2].'\n )\n pixel_matrix_coordinates = np.row_stack([\n coordinates.T,\n np.zeros((coordinates.shape[0], ), dtype=float),\n np.ones((coordinates.shape[0], ), dtype=float),\n ])\n physical_coordinates = np.dot(self._affine, pixel_matrix_coordinates)\n return physical_coordinates[:3, :].T\n\n\nclass ReferenceToImageTransformer(object):\n\n \"\"\"Class for transforming coordinates from reference to image space.\n\n Builds an affine transformation matrix for mapping coordinates in the\n three dimensional frame of reference into two-dimensional pixel matrix\n coordinates.\n\n Examples\n --------\n\n >>> # Create a transformer by specifying the reference space of\n >>> # an image\n >>> transformer = ReferenceToImageTransformer(\n ... image_position=[56.0, 34.2, 1.0],\n ... image_orientation=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0],\n ... pixel_spacing=[0.5, 0.5])\n >>> # Use the transformer to convert coordinates\n >>> ref_coords = np.array([[56., 39.2, 1. ], [58.5, 36.7, 1.]])\n >>> image_coords = transformer(ref_coords)\n >>>\n >>> print(image_coords)\n [[ 0. 10. 0.]\n [ 5. 5. 0.]]\n\n \"\"\"\n\n def __init__(\n self,\n image_position: Sequence[float],\n image_orientation: Sequence[float],\n pixel_spacing: Sequence[float],\n spacing_between_slices: float = 1.0\n ):\n \"\"\"Construct transformation object.\n\n Builds an inverse of an affine transformation matrix for mapping\n coordinates from the frame of reference into the two\n dimensional pixel matrix.\n\n Parameters\n ----------\n image_position: Sequence[float]\n Position of the slice (image or frame) in the Frame of Reference,\n i.e., the offset of the top left pixel in the pixel matrix from the\n origin of the reference coordinate system along the X, Y, and Z\n axis\n image_orientation: Sequence[float]\n Cosines of the row direction (first triplet: horizontal, left to\n right, increasing Column index) and the column direction (second\n triplet: vertical, top to bottom, increasing Row index) direction\n expressed in the three-dimensional patient or slide coordinate\n system defined by the Frame of Reference\n pixel_spacing: Sequence[float]\n Spacing between pixels in millimeter unit along the column\n direction (first value: spacing between rows, vertical, top to\n bottom, increasing Row index) and the rows direction (second value:\n spacing between columns: horizontal, left to right, increasing\n Column index)\n spacing_between_slices: float, optional\n Distance (in the coordinate defined by the Frame of Reference)\n between neighboring slices. Default: 1\n\n Raises\n ------\n TypeError\n When `image_position`, `image_orientation` or `pixel_spacing` is\n not a sequence.\n ValueError\n When `image_position`, `image_orientation` or `pixel_spacing` has\n an incorrect length.\n\n \"\"\"\n if not isinstance(image_position, Sequence):\n raise TypeError('Argument \"image_position\" must be a sequence.')\n if len(image_position) != 3:\n raise ValueError('Argument \"image_position\" must have length 3.')\n if not isinstance(image_orientation, Sequence):\n raise TypeError('Argument \"image_orientation\" must be a sequence.')\n if len(image_orientation) != 6:\n raise ValueError('Argument \"image_orientation\" must have length 6.')\n if not isinstance(pixel_spacing, Sequence):\n raise TypeError('Argument \"pixel_spacing\" must be a sequence.')\n if len(pixel_spacing) != 2:\n raise ValueError('Argument \"pixel_spacing\" must have length 2.')\n\n x_offset = float(image_position[0])\n y_offset = float(image_position[1])\n z_offset = float(image_position[2])\n translation = np.array([x_offset, y_offset, z_offset])\n\n rotation = create_rotation_matrix(image_orientation)\n # Column direction (spacing between rows)\n column_spacing = float(pixel_spacing[0])\n # Row direction (spacing between columns)\n row_spacing = float(pixel_spacing[1])\n rotation[:, 0] *= row_spacing\n rotation[:, 1] *= column_spacing\n rotation[:, 2] *= spacing_between_slices\n inv_rotation = np.linalg.inv(rotation)\n # 4x4 transformation matrix\n self._affine = np.row_stack(\n [\n np.column_stack([\n inv_rotation,\n -np.dot(inv_rotation, translation)\n ]),\n [0.0, 0.0, 0.0, 1.0]\n ]\n )\n\n @property\n def affine(self) -> np.ndarray:\n \"\"\"numpy.ndarray: 4 x 4 affine transformation matrix\"\"\"\n return self._affine\n\n def __call__(self, coordinates: np.ndarray) -> np.ndarray:\n \"\"\"Applies the inverse of an affine transformation matrix to a batch of\n coordinates in the frame of reference to obtain the corresponding pixel\n matrix coordinates.\n\n Parameters\n ----------\n coordinates: numpy.ndarray\n Array of (X, Y, Z) coordinates in the coordinate system defined by\n the Frame of Reference. Array should have shape ``(n, 3)``, where\n *n* is the number of coordinates, the first column represents the\n *X* offsets, the second column represents the *Y* offsets and the\n third column represents the *Z* offsets\n\n Returns\n -------\n numpy.ndarray\n Array of (Column, Row, Slice) coordinates, where the\n `Column` and `Row` offsets relate to the Total Pixel Matrix in pixel\n units at sub-pixel resolution and the `Slice` offset represents the\n signed distance of the input coordinate in the direction normal to\n the plane of the Total Pixel Matrix represented in units of the\n given spacing between slices.\n The `Row` and `Column` offsets are constrained by the dimension of\n the Total Pixel Matrix. Note, however, that in general, the\n resulting coordinate may not lie within the imaging plane, and\n consequently the `Slice` offset may be non-zero.\n\n Raises\n ------\n ValueError\n When `coordinates` has incorrect shape.\n\n \"\"\"\n if coordinates.shape[1] != 3:\n raise ValueError(\n 'Argument \"coordinates\" must be a two-dimensional array '\n 'with shape [n, 3].'\n )\n physical_coordinates = np.row_stack([\n coordinates.T,\n np.ones((coordinates.shape[0], ), dtype=float)\n ])\n pixel_matrix_coordinates = np.dot(self._affine, physical_coordinates)\n return pixel_matrix_coordinates[:3, :].T\n\n\ndef map_pixel_into_coordinate_system(\n coordinate: Sequence[float],\n image_position: Sequence[float],\n image_orientation: Sequence[float],\n pixel_spacing: Sequence[float],\n) -> Tuple[float, float, float]:\n \"\"\"Maps a coordinate in the pixel matrix into the physical coordinate\n system (e.g., Slide or Patient) defined by the frame of reference.\n\n Parameters\n ----------\n coordinate: Sequence[float]\n (Column, Row) coordinate in the Total Pixel Matrix in pixel unit at\n sub-pixel resolution.\n image_position: Sequence[float]\n Position of the slice (image or frame) in the Frame of Reference, i.e.,\n the offset of the top left pixel in the Total Pixel Matrix from the\n origin of the reference coordinate system along the X, Y, and Z axis\n image_orientation: Sequence[float]\n Cosines of the row direction (first triplet: horizontal, left to right,\n increasing Column index) and the column direction (second triplet:\n vertical, top to bottom, increasing Row index) direction expressed in\n the three-dimensional patient or slide coordinate system defined by the\n Frame of Reference\n pixel_spacing: Sequence[float]\n Spacing between pixels in millimeter unit along the column direction\n (first value: spacing between rows, vertical, top to bottom,\n increasing Row index) and the row direction (second value: spacing\n between columns: horizontal, left to right, increasing Column index)\n\n Returns\n -------\n Tuple[float, float, float]\n (X, Y, Z) coordinate in the coordinate system defined by the\n Frame of Reference\n\n Note\n ----\n This function is a convenient wrapper around\n ``highdicom.spatial.ImageToReferenceTransformation`` for mapping an\n individual coordinate. When mapping a large number of coordinates, consider\n using this class directly for speedup.\n\n Raises\n ------\n TypeError\n When `image_position`, `image_orientation` or `pixel_spacing` is not a\n sequence.\n ValueError\n When `image_position`, `image_orientation` or `pixel_spacing` has an\n incorrect length.\n\n \"\"\"\n transformer = ImageToReferenceTransformer(\n image_position=image_position,\n image_orientation=image_orientation,\n pixel_spacing=pixel_spacing\n )\n transformed_coordinates = transformer(np.array([coordinate], dtype=float))\n physical_coordinates = transformed_coordinates[0, :].tolist()\n return (\n physical_coordinates[0],\n physical_coordinates[1],\n physical_coordinates[2],\n )\n\n\ndef map_coordinate_into_pixel_matrix(\n coordinate: Sequence[float],\n image_position: Sequence[float],\n image_orientation: Sequence[float],\n pixel_spacing: Sequence[float],\n spacing_between_slices: float = 1.0,\n) -> Tuple[float, float, float]:\n \"\"\"Maps a coordinate in the physical coordinate system (e.g., Slide or\n Patient) defined by the frame of reference into the pixel matrix.\n\n Parameters\n ----------\n coordinate: Sequence[float]\n (X, Y, Z) coordinate in the coordinate system in millimeter unit.\n image_position: Sequence[float]\n Position of the slice (image or frame) in the Frame of Reference, i.e.,\n the offset of the top left pixel in the Total Pixel matrix from the\n origin of the reference coordinate system along the X, Y, and Z axis\n image_orientation: Sequence[float]\n Cosines of the row direction (first triplet: horizontal, left to right,\n increasing Column index) and the column direction (second triplet:\n vertical, top to bottom, increasing Row index) direction expressed in\n the three-dimensional patient or slide coordinate system defined by the\n Frame of Reference\n pixel_spacing: Sequence[float]\n Spacing between pixels in millimeter unit along the column direction\n (first value: spacing between rows, vertical, top to bottom,\n increasing Row index) and the rows direction (second value: spacing\n between columns: horizontal, left to right, increasing Column index)\n spacing_between_slices: float, optional\n Distance (in the coordinate defined by the Frame of Reference) between\n neighboring slices. Default: ``1.0``\n\n Returns\n -------\n Tuple[float, float, float]\n (Column, Row, Slice) coordinate, where `Column` and `Row` are pixel\n coordinates in the Total Pixel Matrix, `Slice` represents the signed\n distance of the input coordinate in the direction normal to the plane\n of the Total Pixel Matrix represented in units of the given spacing\n between slices. If the `Slice` offset is ``0.0``, then the input\n coordinate lies in the imaging plane, otherwise it lies off the plane\n of the Total Pixel Matrix and `Column` and `Row` offsets may be\n interpreted as the projections of the input coordinate onto the\n imaging plane.\n\n Note\n ----\n This function is a convenient wrapper around\n ``build_ref_to_image_transform()`` and ``apply_ref_to_image_transform()``.\n When mapping a large number of coordinates, consider using these underlying\n functions directly for speedup.\n\n Raises\n ------\n TypeError\n When `image_position`, `image_orientation` or `pixel_spacing` is not a\n sequence.\n ValueError\n When `image_position`, `image_orientation` or `pixel_spacing` has an\n incorrect length.\n\n \"\"\"\n transformer = ReferenceToImageTransformer(\n image_position=image_position,\n image_orientation=image_orientation,\n pixel_spacing=pixel_spacing,\n spacing_between_slices=spacing_between_slices\n )\n transformed_coordinates = transformer(np.array([coordinate], dtype=float))\n pixel_matrix_coordinates = transformed_coordinates[0, :].tolist()\n return (\n pixel_matrix_coordinates[0],\n pixel_matrix_coordinates[1],\n pixel_matrix_coordinates[2],\n )\n" ]
[ [ "numpy.cross", "numpy.dot", "numpy.linalg.inv", "numpy.ones", "numpy.column_stack", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neerbek/taboo-selective
[ "56b126e4aa6d08c53b33ebffe0c7d5063bc1719e" ]
[ "functionality/kmeans_cluster_ijcai18_1_201_9_exp199_search_best_k_200K.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n\nCreated on March 9, 2018\n\n@author: neerbek\n\nSearch for k values\n\"\"\"\nimport os\nos.chdir(\"../../taboo-core\")\nfrom numpy.random import RandomState # type: ignore\nfrom sklearn.cluster import KMeans # type: ignore\n\nimport ai_util\nimport confusion_matrix\nimport kmeans_cluster_util as kutil\nimport similarity.load_trees as load_trees\n\nimport importlib\nimportlib.reload(confusion_matrix)\n\n# using 200K\n# ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K.zip\n\ntotaltimer = ai_util.Timer(\"Total time: \")\ntraintimer = ai_util.Timer(\"Train time: \")\ntotaltimer.begin()\nbasefile = \"output_embeddings_201_exp199_m200K.zip\"\ninputfileTrain = \"output/\" + basefile + \"$train.txt\"\nprint(\"reading: \" + inputfileTrain)\nlinesTrainFull = confusion_matrix.read_embeddings(inputfileTrain, max_line_count=-1)\nlinesTrain = [linesTrainFull[i] for i in range(60000)]\ninputfileDev = \"output/\" + basefile + \"$dev.txt\"\nlinesDev = confusion_matrix.read_embeddings(inputfileDev, max_line_count=-1)\n\nnumberOfClusters = 15\nrandomSeed = 7485\ndoShow = True\n# from exp194: # (low, acc, fraction) = (5, 99.1%, 23%)\nlow = 15\nhigh = 27\n\nrng = RandomState(randomSeed)\naTrain = confusion_matrix.get_embedding_matrix(linesTrain, normalize=True)\naTrainFull = confusion_matrix.get_embedding_matrix(linesTrainFull, normalize=True)\naDev = confusion_matrix.get_embedding_matrix(linesDev, normalize=True)\n\nkmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)\nsort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)\n\n# ################\nnumberOfClusters = 15\nrng = RandomState(randomSeed)\nkmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)\nsort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)\n\ny1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)\ny2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)\nfy1 = [\"{:.4f}\".format(1 - y) for y in y1]\nfy2 = [\"{:.2f}\".format(100 * y) for y in y2]\nprint(\"acc\", fy1)\nprint(\"sizes\", fy2)\n\n# acc ['0.9948', '0.9881', '0.9825', '0.9824', '0.9815', '0.9809', '0.9771', '0.9702', '0.9659', '0.9515', '0.7826', '0.7101', '0.1319', '0.1194', '0.1064']\n# sizes ['11.92', '27.84', '33.66', '40.58', '46.71', '51.34', '58.62', '67.91', '76.07', '81.94', '85.96', '89.12', '91.91', '94.55', '100.00']\n\n# ################\nnumberOfClusters = 35\nrng = RandomState(randomSeed)\nkmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)\nsort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)\n\ny1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)\ny2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)\nfy1 = [\"{:.4f}\".format(1 - y) for y in y1]\nfy2 = [\"{:.2f}\".format(100 * y) for y in y2]\nprint(\"acc\", fy1)\nprint(\"sizes\", fy2)\n\n# acc ['0.9972', '0.9958', '0.9945', '0.9942', '0.9903', '0.9863', '0.9859', '0.9858', '0.9838', '0.9810', '0.9805', '0.9802', '0.9790', '0.9789', '0.9724', '0.9718', '0.9705', '0.9704', '0.9691', '0.9685', '0.9627', '0.9532', '0.9258', '0.9234', '0.9196', '0.8709', '0.8393', '0.7721', '0.6288', '0.2350', '0.2106', '0.1557', '0.0982', '0.0881', '0.0743']\n# sizes ['3.62', '8.00', '11.91', '19.34', '23.27', '26.92', '31.04', '34.32', '39.89', '45.24', '48.92', '50.61', '53.62', '56.31', '59.93', '63.77', '65.98', '68.62', '70.67', '72.90', '74.19', '77.33', '78.56', '79.67', '82.08', '82.99', '84.81', '87.62', '88.60', '90.19', '91.29', '92.68', '96.38', '98.03', '100.00']\n\n# ################\nnumberOfClusters = 70\nrng = RandomState(randomSeed)\nkmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)\nsort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)\n\ny1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)\ny2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)\nfy1 = [\"{:.4f}\".format(1 - y) for y in y1]\nfy2 = [\"{:.2f}\".format(100 * y) for y in y2]\nprint(\"acc\", fy1)\nprint(\"sizes\", fy2)\n\n# acc ['0.9967', '0.9957', '0.9956', '0.9955', '0.9948', '0.9943', '0.9937', '0.9929', '0.9927', '0.9924', '0.9924', '0.9922', '0.9918', '0.9912', '0.9910', '0.9899', '0.9899', '0.9854', '0.9854', '0.9848', '0.9844', '0.9838', '0.9831', '0.9819', '0.9804', '0.9791', '0.9788', '0.9785', '0.9760', '0.9756', '0.9750', '0.9737', '0.9729', '0.9724', '0.9694', '0.9689', '0.9677', '0.9658', '0.9609', '0.9551', '0.9532', '0.9519', '0.9410', '0.9289', '0.9213', '0.9183', '0.9150', '0.9150', '0.9135', '0.9054', '0.9048', '0.9018', '0.9004', '0.8537', '0.6980', '0.6891', '0.6571', '0.6310', '0.5397', '0.4462', '0.3070', '0.2577', '0.2026', '0.1242', '0.1102', '0.0930', '0.0896', '0.0876', '0.0386', '0.0352']\n# sizes ['3.08', '7.37', '9.62', '11.87', '16.36', '18.42', '20.27', '23.34', '25.16', '26.27', '26.92', '28.41', '30.23', '33.07', '35.85', '36.18', '37.66', '38.58', '41.54', '42.31', '43.69', '45.96', '47.74', '49.21', '49.63', '51.70', '52.65', '53.81', '56.03', '58.42', '59.49', '62.15', '64.06', '65.27', '67.23', '67.55', '68.69', '69.22', '71.23', '72.23', '73.97', '75.67', '76.60', '76.95', '77.40', '78.32', '78.91', '80.06', '81.58', '81.95', '82.44', '83.99', '85.13', '85.26', '85.76', '86.48', '87.29', '87.85', '88.90', '89.44', '89.80', '90.67', '91.43', '92.48', '93.28', '95.25', '96.16', '97.21', '98.72', '100.00']\n\nlow = 1 # (11.92)\nlow = 3 # (11.91)\nlow = 4 # (11.87)\n\nlow = 2 # (27.84)\nlow = 6 # (26.92)\nlow = 11 # (26.92)\n\nlow = 3 # (33.66)\nlow = 8 # (34.32)\nlow = 14 # (33.07)\n\n\n# ####################\nnumberOfClusters = 15\nnumberOfClusters = 35\nnumberOfClusters = 70\nrng = RandomState(randomSeed)\nkmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)\nsort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)\ny1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)\ny1dev = kutil.get_cluster_sen_ratios(aDev, linesDev, kmeans, sort_order)\ny2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)\ny2dev = kutil.getScaledSizes(aDev, kmeans, sort_order)\n\n# numberOfClusters = 15\nlow = 1 # (11.92)\nlow = 2 # (27.84)\nlow = 3 # (33.66)\n\n# numberOfClusters = 35\nlow = 3 # (11.91) (trees_ijcai18_exp199_200K_10p.zip)\nlow = 6 # (26.92) (trees_ijcai18_exp199_200K_25p.zip)\nlow = 8 # (34.32)\n\n# numberOfClusters = 70\nlow = 4 # (11.87)\nlow = 11 # (26.92)\nlow = 14 # (33.07)\n\n# ###############\nclusterIds = sort_order # clusterIds == sort_order, it's just syntaxtic sugar\n(linesC1, aC1) = kutil.get_sentences_from_clusters(clusterIds[:low], linesTrainFull, aTrainFull, kmeans)\n(linesC2, aC2) = kutil.get_sentences_from_clusters(clusterIds[low:], linesTrainFull, aTrainFull, kmeans)\n(lines2C1, a2C1) = kutil.get_sentences_from_clusters(clusterIds[:low], linesDev, aDev, kmeans)\n(lines2C2, a2C2) = kutil.get_sentences_from_clusters(clusterIds[low:], linesDev, aDev, kmeans)\nprint(\"(numberOfClusters, low, acc, acc[c+1], fraction) = ({}, {}, {:.4f}%, {:.4f}%, {:.4f}%)\".format(numberOfClusters, low, (1 - y1[low - 1]) * 100, (1 - y1[low]) * 100, y2[low - 1] * 100))\nprint(\"(devAcc, devFraction) = ({:.4f}%, {:.4f}%)\".format((1 - y1dev[low - 1]) * 100, y2[low - 1] * 100))\nprint(len(linesC1), len(linesC2))\nprint(len(lines2C1), len(lines2C2))\n\nkutil.get_base_accuracy(linesTrainFull, \"train\").report()\nkutil.get_base_accuracy(linesDev, \"dev\").report()\nkutil.get_base_accuracy(linesC1, \"train C1\").report()\nkutil.get_base_accuracy(linesC2, \"train C2\").report()\nkutil.get_base_accuracy(lines2C1, \"dev C1\").report()\nkutil.get_base_accuracy(lines2C2, \"dev C2\").report()\n\ncmC1 = kutil.get_base_accuracy(lines2C1)\ncmC2 = kutil.get_base_accuracy(lines2C2)\n\nformula = \"(* (/ {}.0 {}) {})\"\nformulaC1 = formula.format(len(lines2C1), len(linesDev), \"{}\") # little hack \"{}\" means not setted yet\nformulaC2 = formula.format(len(lines2C2), len(linesDev), \"{}\")\nprint(\"formula: (+ {} {})\".format(formulaC1.format(cmC1.get_accuracy()), formulaC2.format(cmC2.get_accuracy())))\n\nload_trees.put_trees(\"C1.txt\", [l.tree for l in linesC1])\nload_trees.put_trees(\"C2.txt\", [l.tree for l in linesC2])\nload_trees.put_trees(\"2C1.txt\", [l.tree for l in lines2C1])\nload_trees.put_trees(\"2C2.txt\", [l.tree for l in lines2C2])\n\n# ###################################################\n# ###################################################\n# k=15 #############################################\n# #############################################\n#\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (15, 1, 99.4827%, 98.8064%, 11.9217%)\n# (devAcc, devFraction) = (99.1468%, 11.9217%)\n# 12583 91673\n# 1758 13242\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9942 (0.9942), f1=0.0000 (0 0 12510 73)\n# Accuracy (train C2): 0.9508 (0.8517), f1=0.8199 (10269 1181 76894 3329)\n# Accuracy (dev C1): 0.9915 (0.9915), f1=0.0000 (0 0 1743 15)\n# Accuracy (dev C2): 0.9323 (0.8510), f1=0.7536 (1370 293 10976 603)\n# formula: (+ (* (/ 1758.0 15000) 0.9914675767918089) (* (/ 13242.0 15000) 0.9323365050596587))\n# k=15, c=1 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k15_c1.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (15, 2, 98.8064%, 98.2541%, 27.8400%)\n# (devAcc, devFraction) = (98.1140%, 27.8400%)\n# 29217 75039\n# 4091 10909\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9900 (0.9901), f1=0.0000 (0 3 28924 290)\n# Accuracy (train C2): 0.9428 (0.8217), f1=0.8272 (10269 1178 60480 3112)\n# Accuracy (dev C1): 0.9856 (0.9856), f1=0.0000 (0 0 4032 59)\n# Accuracy (dev C2): 0.9219 (0.8232), f1=0.7628 (1370 293 8687 559)\n# formula: (+ (* (/ 4091.0 15000) 0.9855780982644831) (* (/ 10909.0 15000) 0.921899349161243))\n# k=15, c=2 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k15_c2.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (15, 3, 98.2541%, 98.2418%, 33.6633%)\n# (devAcc, devFraction) = (97.1366%, 33.6633%)\n# 35306 68950\n# 4999 10001\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9886 (0.9886), f1=0.0000 (0 3 34902 401)\n# Accuracy (train C2): 0.9394 (0.8075), f1=0.8309 (10269 1178 54502 3001)\n# Accuracy (dev C1): 0.9830 (0.9830), f1=0.0000 (0 0 4914 85)\n# Accuracy (dev C2): 0.9174 (0.8097), f1=0.7684 (1370 293 7805 533)\n# formula: (+ (* (/ 4999.0 15000) 0.9829965993198639) (* (/ 10001.0 15000) 0.9174082591740826))\n# k=15, c=3 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k15_c3.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# ###################################################\n# ###################################################\n# k=35 #############################################\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (35, 3, 99.4461%, 99.4165%, 11.9133%)\n# (devAcc, devFraction) = (98.3165%, 11.9133%)\n# 12531 91725\n# 1696 13304\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9951 (0.9951), f1=0.0000 (0 0 12470 61)\n# Accuracy (train C2): 0.9507 (0.8516), f1=0.8196 (10269 1181 76934 3341)\n# Accuracy (dev C1): 0.9906 (0.9906), f1=0.0000 (0 0 1680 16)\n# Accuracy (dev C2): 0.9327 (0.8518), f1=0.7538 (1370 293 11039 602)\n# formula: (+ (* (/ 1696.0 15000) 0.9905660377358491) (* (/ 13304.0 15000) 0.9327269993986771))\n# k=35 c=3 trees_ijcai18_exp199_200K_10p.zip\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (35, 6, 98.6283%, 98.5864%, 26.9167%)\n# (devAcc, devFraction) = (98.1670%, 26.9167%)\n# 28189 76067\n# 3890 11110\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9933 (0.9933), f1=0.0000 (0 0 28001 188)\n# Accuracy (train C2): 0.9422 (0.8227), f1=0.8237 (10269 1181 61403 3214)\n# Accuracy (dev C1): 0.9884 (0.9884), f1=0.0000 (0 0 3845 45)\n# Accuracy (dev C2): 0.9221 (0.8251), f1=0.7598 (1370 293 8874 573)\n# formula: (+ (* (/ 3890.0 15000) 0.9884318766066839) (* (/ 11110.0 15000) 0.922052205220522))\n# k=35 c=6 trees_ijcai18_exp199_200K_25p.zip\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (35, 8, 98.5765%, 98.3837%, 34.3217%)\n# (devAcc, devFraction) = (98.5386%, 34.3217%)\n# 35959 68297\n# 5008 9992\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9914 (0.9914), f1=0.0000 (0 2 35649 308)\n# Accuracy (train C2): 0.9374 (0.8043), f1=0.8278 (10269 1179 53755 3094)\n# Accuracy (dev C1): 0.9868 (0.9868), f1=0.0000 (0 0 4942 66)\n# Accuracy (dev C2): 0.9154 (0.8076), f1=0.7643 (1370 293 7777 552)\n# formula: (+ (* (/ 5008.0 15000) 0.9868210862619808) (* (/ 9992.0 15000) 0.9154323458767014))\n# k=35 c=8 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k35_c8.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# ###################################################\n# ###################################################\n# k=70 #############################################\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (70, 4, 99.5546%, 99.4801%, 11.8667%)\n# (devAcc, devFraction) = (99.3789%, 11.8667%)\n# 12538 91718\n# 1712 13288\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9959 (0.9959), f1=0.0000 (0 0 12487 51)\n# Accuracy (train C2): 0.9506 (0.8515), f1=0.8192 (10269 1181 76917 3351)\n# Accuracy (dev C1): 0.9953 (0.9953), f1=0.0000 (0 0 1704 8)\n# Accuracy (dev C2): 0.9320 (0.8510), f1=0.7521 (1370 293 11015 610)\n# formula: (+ (* (/ 1712.0 15000) 0.9953271028037384) (* (/ 13288.0 15000) 0.9320439494280554))\n\n# k=70 c=4 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k70_c4.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (70, 11, 99.2386%, 99.2161%, 26.9233%)\n# (devAcc, devFraction) = (96.2617%, 26.9233%)\n# 28299 75957\n# 3973 11027\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9946 (0.9946), f1=0.0130 (1 0 28146 152)\n# Accuracy (train C2): 0.9417 (0.8220), f1=0.8225 (10268 1181 61258 3250)\n# Accuracy (dev C1): 0.9930 (0.9930), f1=0.0000 (0 0 3945 28)\n# Accuracy (dev C2): 0.9199 (0.8223), f1=0.7563 (1370 293 8774 590)\n# formula: (+ (* (/ 3973.0 15000) 0.9929524288950415) (* (/ 11027.0 15000) 0.9199238233427043))\n\n# k=70 c=11 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k70_c11.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n# #############################################\n#\n\n# (numberOfClusters, low, acc, acc[c+1], fraction) = (70, 14, 99.1192%, 99.1013%, 33.0700%)\n# (devAcc, devFraction) = (97.2010%, 33.0700%)\n# 34701 69555\n# 4847 10153\n# Accuracy (train): 0.9560 (0.8689), f1=0.8176 (10269 1181 89404 3402)\n# Accuracy (dev): 0.9393 (0.8675), f1=0.7505 (1370 293 12719 618)\n# Accuracy (train C1): 0.9939 (0.9939), f1=0.0093 (1 0 34488 212)\n# Accuracy (train C2): 0.9372 (0.8065), f1=0.8245 (10268 1181 54916 3190)\n# Accuracy (dev C1): 0.9901 (0.9901), f1=0.0000 (0 0 4799 48)\n# Accuracy (dev C2): 0.9150 (0.8089), f1=0.7605 (1370 293 7920 570)\n# formula: (+ (* (/ 4847.0 15000) 0.9900969671962039) (* (/ 10153.0 15000) 0.9150004924652811))\n\n# k=70 c=14 zip -m ../taboo-jan/functionality/201/trees_ijcai18_exp199_200K_k70_c14.zip C1.txt C2.txt 2C1.txt 2C2.txt\n\n" ]
[ [ "numpy.random.RandomState", "sklearn.cluster.KMeans" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yingyichen-cyy/Nested-Co-teaching
[ "8b7e3ed02d8994d93dcb2011340fe28ba6012283" ]
[ "data/preprocess_sym_noise_cifar.py" ]
[ "import os \nimport PIL.Image as Image\nimport numpy as np \nfrom shutil import copyfile, copytree\nimport argparse\n\n\n## decide the size of the data subset\nparser = argparse.ArgumentParser(description='Create Symmetric Noisy Labels Dataset')\n\nparser.add_argument('--dataset', type = str, choices=['CIFAR10', 'CIFAR100'], default='CIFAR10', help='which dataset?')\nparser.add_argument('--noise-rate', type=float, default = 0.5, help='label flip probabilities')\nparser.add_argument('--outDir', type = str, default='CIFAR10/train_sn_0.5', help='output directory')\nparser.add_argument('--inDir', type = str, default='CIFAR10/train', help='input directory')\nparser.add_argument('--seed', type = int, default=0, help='random seed')\n\n\nargs = parser.parse_args()\nprint (args)\n\n## create train dir\nif not os.path.exists(args.outDir): \n os.mkdir(args.outDir)\n\n## ---randomly generates symmetric noise under certain noise rate--- ##\nnp.random.seed(args.seed) ## using default seed, reproduce our results\n\nif args.dataset == 'CIFAR10':\n nb_cls = 10\nelif args.dataset == 'CIFAR100':\n nb_cls = 100\n\nfor cls in np.arange(len(os.listdir(args.inDir))):\n\n train_src_cls = os.path.join(args.inDir, str(cls))\n train_dst_cls = os.path.join(args.outDir, str(cls)) \n\n if not os.path.exists(train_dst_cls):\n os.mkdir(train_dst_cls)\n\n img_list = sorted(os.listdir(train_src_cls))\n\n indices = np.random.permutation(len(img_list))\n for i, idx in enumerate(indices):\n image = img_list[idx]\n label = cls\n\n src = os.path.join(train_src_cls, image)\n dst = os.path.join(train_dst_cls, image)\n\n other_class_list = np.arange(nb_cls)\n other_class_list = np.delete(other_class_list, cls)\n\n if i < args.noise_rate * len(img_list):\n label = np.random.choice(other_class_list)\n train_dst_tmp = os.path.join(args.outDir, str(label))\n dst = os.path.join(train_dst_tmp, str(cls) + '_' + image)\n\n if not os.path.exists(train_dst_tmp):\n os.mkdir(train_dst_tmp)\n\n copyfile(src, dst)\n\n\nprint ('\\nSymmetric Noisy Labels {} Training Set with Noise Rate {}'.format(args.dataset, args.noise_rate))\n" ]
[ [ "numpy.arange", "numpy.delete", "numpy.random.seed", "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
okpoti2/cvxpy
[ "0087df795fc9472181fef1d09cd7656387bff481" ]
[ "cvxpy/tests/test_dqcp.py" ]
[ "\"\"\"\nCopyright, the CVXPY authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\n\nimport cvxpy as cp\nimport cvxpy.settings as s\nfrom cvxpy.reductions.solvers import bisection\nfrom cvxpy.tests import base_test\n\nSOLVER = cp.ECOS\n\n\nclass TestDqcp(base_test.BaseTest):\n def test_basic_with_interval(self) -> None:\n x = cp.Variable()\n expr = cp.ceil(x)\n\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_convex())\n self.assertFalse(expr.is_concave())\n self.assertFalse(expr.is_dcp())\n self.assertFalse(expr.is_dgp())\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n red = cp.Dqcp2Dcp(problem)\n reduced = red.reduce()\n self.assertTrue(reduced.is_dcp())\n self.assertEqual(len(reduced.parameters()), 1)\n soln = bisection.bisect(reduced, low=12, high=17, solver=cp.SCS)\n self.assertAlmostEqual(soln.opt_val, 12.0, places=3)\n\n problem.unpack(soln)\n self.assertEqual(soln.opt_val, problem.value)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n def test_basic_without_interval(self) -> None:\n x = cp.Variable()\n expr = cp.ceil(x)\n\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_convex())\n self.assertFalse(expr.is_concave())\n self.assertFalse(expr.is_dcp())\n self.assertFalse(expr.is_dgp())\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n red = cp.Dqcp2Dcp(problem)\n reduced = red.reduce()\n self.assertTrue(reduced.is_dcp())\n self.assertEqual(len(reduced.parameters()), 1)\n soln = bisection.bisect(reduced, solver=cp.SCS)\n self.assertAlmostEqual(soln.opt_val, 12.0, places=3)\n\n problem.unpack(soln)\n self.assertEqual(soln.opt_val, problem.value)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n def test_basic_solve(self) -> None:\n x = cp.Variable()\n expr = cp.ceil(x)\n\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_convex())\n self.assertFalse(expr.is_concave())\n self.assertFalse(expr.is_dcp())\n self.assertFalse(expr.is_dgp())\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n problem.solve(SOLVER, qcp=True, low=12, high=17)\n self.assertAlmostEqual(problem.value, 12.0, places=3)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n problem._clear_solution()\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 12.0, places=3)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n problem._clear_solution()\n problem.solve(SOLVER, qcp=True, high=17)\n self.assertAlmostEqual(problem.value, 12.0, places=3)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n problem._clear_solution()\n problem.solve(SOLVER, qcp=True, low=12)\n self.assertAlmostEqual(problem.value, 12.0, places=3)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n problem._clear_solution()\n problem.solve(SOLVER, qcp=True, low=0, high=100)\n self.assertAlmostEqual(problem.value, 12.0, places=3)\n self.assertAlmostEqual(x.value, 12.0, places=3)\n\n def test_basic_maximization_with_interval(self) -> None:\n x = cp.Variable()\n expr = cp.ceil(x)\n\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_convex())\n self.assertFalse(expr.is_concave())\n self.assertFalse(expr.is_dcp())\n self.assertFalse(expr.is_dgp())\n\n problem = cp.Problem(cp.Maximize(expr), [x >= 12, x <= 17])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 17.0, places=3)\n\n def test_basic_maximum(self) -> None:\n x, y = cp.Variable(2)\n expr = cp.maximum(cp.ceil(x), cp.ceil(y))\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17, y >= 17.4])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 18.0)\n self.assertLess(x.value, 17.1)\n self.assertGreater(x.value, 11.9)\n self.assertGreater(y.value, 17.3)\n\n def test_basic_minimum(self) -> None:\n x, y = cp.Variable(2)\n expr = cp.minimum(cp.ceil(x), cp.ceil(y))\n\n problem = cp.Problem(cp.Maximize(expr), [x >= 11.9, x <= 15.8, y >= 17.4])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 16.0)\n self.assertLess(x.value, 16.0)\n self.assertGreater(x.value, 14.9)\n self.assertGreater(y.value, 17.3)\n\n def test_basic_composition(self) -> None:\n x, y = cp.Variable(2)\n expr = cp.maximum(cp.ceil(cp.ceil(x)), cp.ceil(cp.ceil(y)))\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17, y >= 17.4])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 18.0)\n self.assertLess(x.value, 17.1)\n self.assertGreater(x.value, 11.9)\n self.assertGreater(y.value, 17.3)\n\n # This problem should have the same solution.\n expr = cp.maximum(cp.floor(cp.ceil(x)), cp.floor(cp.ceil(y)))\n problem = cp.Problem(cp.Minimize(expr), [x >= 12, x <= 17, y >= 17.4])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 18.0)\n self.assertLess(x.value, 17.1)\n self.assertGreater(x.value, 11.9)\n self.assertGreater(y.value, 17.3)\n\n def test_basic_floor(self) -> None:\n x = cp.Variable()\n expr = cp.floor(x)\n\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_convex())\n self.assertFalse(expr.is_concave())\n self.assertFalse(expr.is_dcp())\n self.assertFalse(expr.is_dgp())\n\n problem = cp.Problem(cp.Minimize(expr), [x >= 11.8, x <= 17])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 11.0)\n self.assertGreater(x.value, 11.7)\n\n def test_basic_multiply_nonneg(self) -> None:\n x, y = cp.Variable(2, nonneg=True)\n expr = x * y\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_quasiconvex())\n\n self.assertFalse(expr.is_dcp())\n\n problem = cp.Problem(cp.Maximize(expr), [x <= 12, y <= 6])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 72, places=1)\n self.assertAlmostEqual(x.value, 12, places=1)\n self.assertAlmostEqual(y.value, 6, places=1)\n\n def test_basic_multiply_nonpos(self) -> None:\n x, y = cp.Variable(2, nonpos=True)\n expr = x * y\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_quasiconvex())\n\n self.assertFalse(expr.is_dcp())\n\n problem = cp.Problem(cp.Maximize(expr), [x >= -12, y >= -6])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 72, places=1)\n self.assertAlmostEqual(x.value, -12, places=1)\n self.assertAlmostEqual(y.value, -6, places=1)\n\n def test_basic_multiply_qcvx(self) -> None:\n x = cp.Variable(nonneg=True)\n y = cp.Variable(nonpos=True)\n expr = x * y\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertFalse(expr.is_quasiconcave())\n\n self.assertFalse(expr.is_dcp())\n\n problem = cp.Problem(cp.Minimize(expr), [x <= 7, y >= -6])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, -42, places=1)\n self.assertAlmostEqual(x.value, 7, places=1)\n self.assertAlmostEqual(y.value, -6, places=1)\n\n x = cp.Variable(nonneg=True)\n y = cp.Variable(nonpos=True)\n expr = y * x\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertFalse(expr.is_quasiconcave())\n\n self.assertFalse(expr.is_dcp())\n\n problem = cp.Problem(cp.Minimize(expr), [x <= 7, y >= -6])\n self.assertTrue(problem.is_dqcp())\n self.assertFalse(problem.is_dcp())\n self.assertFalse(problem.is_dgp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, -42, places=1)\n self.assertAlmostEqual(x.value, 7, places=1)\n self.assertAlmostEqual(y.value, -6, places=1)\n\n def test_concave_multiply(self) -> None:\n x, y = cp.Variable(2, nonneg=True)\n expr = cp.sqrt(x) * cp.sqrt(y)\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_quasiconvex())\n\n problem = cp.Problem(cp.Maximize(expr), [x <= 4, y <= 9])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 6, places=1)\n self.assertAlmostEqual(x.value, 4, places=1)\n self.assertAlmostEqual(y.value, 9, places=1)\n\n x, y = cp.Variable(2, nonneg=True)\n expr = (cp.sqrt(x) + 2.0) * (cp.sqrt(y) + 4.0)\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertFalse(expr.is_quasiconvex())\n\n problem = cp.Problem(cp.Maximize(expr), [x <= 4, y <= 9])\n problem.solve(SOLVER, qcp=True)\n # (2 + 2) * (3 + 4) = 28\n self.assertAlmostEqual(problem.objective.value, 28, places=1)\n self.assertAlmostEqual(x.value, 4, places=1)\n self.assertAlmostEqual(y.value, 9, places=1)\n\n def test_basic_ratio(self) -> None:\n x = cp.Variable()\n y = cp.Variable(nonneg=True)\n expr = x / y\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertTrue(expr.is_quasiconvex())\n\n problem = cp.Problem(cp.Minimize(expr), [x == 12, y <= 6])\n self.assertTrue(problem.is_dqcp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 2.0, places=1)\n self.assertAlmostEqual(x.value, 12, places=1)\n self.assertAlmostEqual(y.value, 6, places=1)\n\n x = cp.Variable()\n y = cp.Variable(nonpos=True)\n expr = x / y\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconcave())\n self.assertTrue(expr.is_quasiconvex())\n\n problem = cp.Problem(cp.Maximize(expr), [x == 12, y >= -6])\n self.assertTrue(problem.is_dqcp())\n\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, -2.0, places=1)\n self.assertAlmostEqual(x.value, 12, places=1)\n self.assertAlmostEqual(y.value, -6, places=1)\n\n def test_lin_frac(self) -> None:\n x = cp.Variable((2,), nonneg=True)\n A = np.array([[1.0, 2.0], [3.0, 4.0]])\n b = np.arange(2)\n C = 2 * A\n d = np.arange(2)\n lin_frac = (cp.matmul(A, x) + b) / (cp.matmul(C, x) + d)\n self.assertTrue(lin_frac.is_dqcp())\n self.assertTrue(lin_frac.is_quasiconvex())\n self.assertTrue(lin_frac.is_quasiconcave())\n\n problem = cp.Problem(cp.Minimize(cp.sum(x)), [x >= 0, lin_frac <= 1])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 0, places=1)\n np.testing.assert_almost_equal(x.value, 0, decimal=5)\n\n def test_concave_frac(self) -> None:\n x = cp.Variable(nonneg=True)\n concave_frac = cp.sqrt(x) / cp.exp(x)\n self.assertTrue(concave_frac.is_dqcp())\n self.assertTrue(concave_frac.is_quasiconcave())\n self.assertFalse(concave_frac.is_quasiconvex())\n\n problem = cp.Problem(cp.Maximize(concave_frac))\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 0.428, places=1)\n self.assertAlmostEqual(x.value, 0.5, places=1)\n\n def test_length(self) -> None:\n x = cp.Variable(5)\n expr = cp.length(x)\n self.assertTrue(expr.is_dqcp())\n self.assertTrue(expr.is_quasiconvex())\n self.assertFalse(expr.is_quasiconcave())\n\n problem = cp.Problem(cp.Minimize(expr), [x[0] == 2.0, x[1] == 1.0])\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 2)\n np.testing.assert_almost_equal(x.value, np.array([2, 1, 0, 0, 0]))\n\n def test_length_example(self) -> None:\n \"\"\"Fix #1760.\"\"\"\n n = 10\n np.random.seed(1)\n A = np.random.randn(n, n)\n x_star = np.random.randn(n)\n b = A @ x_star\n epsilon = 1e-2\n x = cp.Variable(n)\n mse = cp.sum_squares(A @ x - b)/n\n problem = cp.Problem(cp.Minimize(cp.length(x)), [mse <= epsilon])\n assert problem.is_dqcp()\n\n problem.solve(qcp=True)\n assert np.isclose(problem.value, 8)\n\n def test_infeasible(self) -> None:\n x = cp.Variable(2)\n problem = cp.Problem(\n cp.Minimize(cp.length(x)), [x == -1, cp.ceil(x) >= 1])\n problem.solve(SOLVER, qcp=True)\n self.assertIn(problem.status, (s.INFEASIBLE, s.INFEASIBLE_INACCURATE))\n\n def test_sign(self) -> None:\n x = cp.Variable()\n problem = cp.Problem(cp.Minimize(cp.sign(x)), [-2 <= x, x <= -0.5])\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, -1)\n self.assertLessEqual(x.value, 0)\n\n problem = cp.Problem(cp.Maximize(cp.sign(x)), [1 <= x, x <= 2])\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.objective.value, 1.0)\n self.assertGreater(x.value, 0)\n\n # Check that sign doesn't change value.\n vector = np.array([.1, -.3, .5])\n variable = cp.Variable(len(vector))\n problem = cp.Problem(cp.Maximize(vector @ variable),\n [cp.norm2(variable) <= 1.])\n problem.solve(solver=cp.SCS)\n\n value = variable.value.copy()\n cp.sign(variable).value\n self.assertItemsAlmostEqual(value, variable.value)\n\n def test_dist_ratio(self) -> None:\n x = cp.Variable(2)\n a = np.ones(2)\n b = np.zeros(2)\n problem = cp.Problem(cp.Minimize(cp.dist_ratio(x, a, b)), [x <= 0.8])\n problem.solve(SOLVER, qcp=True)\n np.testing.assert_almost_equal(problem.objective.value, 0.25)\n np.testing.assert_almost_equal(x.value, np.array([0.8, 0.8]))\n\n def test_infeasible_exp_constr(self) -> None:\n x = cp.Variable()\n constr = [cp.exp(cp.ceil(x)) <= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.INFEASIBLE)\n\n def test_infeasible_inv_pos_constr(self) -> None:\n x = cp.Variable(nonneg=True)\n constr = [cp.inv_pos(cp.ceil(x)) <= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.INFEASIBLE)\n\n def test_infeasible_logistic_constr(self) -> None:\n x = cp.Variable(nonneg=True)\n constr = [cp.logistic(cp.ceil(x)) <= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.INFEASIBLE)\n\n def test_noop_exp_constr(self) -> None:\n x = cp.Variable()\n constr = [cp.exp(cp.ceil(x)) >= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.OPTIMAL)\n\n def test_noop_inv_pos_constr(self) -> None:\n x = cp.Variable()\n constr = [cp.inv_pos(cp.ceil(x)) >= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.OPTIMAL)\n\n def test_noop_logistic_constr(self) -> None:\n x = cp.Variable(nonneg=True)\n constr = [cp.logistic(cp.ceil(x)) >= -5]\n problem = cp.Problem(cp.Minimize(0), constr)\n problem.solve(SOLVER, qcp=True)\n self.assertEqual(problem.status, s.OPTIMAL)\n\n def test_gen_lambda_max_matrix_completion(self) -> None:\n A = cp.Variable((3, 3))\n B = cp.Variable((3, 3), PSD=True)\n gen_lambda_max = cp.gen_lambda_max(A, B)\n known_indices = tuple(zip(*[[0, 0], [0, 2], [1, 1]]))\n constr = [\n A[known_indices] == [1.0, 1.9, 0.8],\n B[known_indices] == [3.0, 1.4, 0.2],\n ]\n problem = cp.Problem(cp.Minimize(gen_lambda_max), constr)\n self.assertTrue(problem.is_dqcp())\n # smoke test\n problem.solve(cp.SCS, qcp=True)\n\n def test_card_ls(self) -> None:\n n = 10\n np.random.seed(0)\n A = np.random.randn(n, n)\n x_star = np.random.randn(n)\n b = cp.matmul(A, x_star)\n epsilon = 1e-3\n\n x = cp.Variable(n)\n objective_fn = cp.length(x)\n mse = cp.sum_squares(cp.matmul(A, x) - b)/n\n problem = cp.Problem(cp.Minimize(objective_fn), [mse <= epsilon])\n # smoke test\n problem.solve(SOLVER, qcp=True)\n\n def test_multiply_const(self) -> None:\n x = cp.Variable()\n obj = cp.Minimize(0.5 * cp.ceil(x))\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, 5, places=1)\n\n x = cp.Variable()\n obj = cp.Minimize(cp.ceil(x) * 0.5)\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, 5, places=1)\n\n x = cp.Variable()\n obj = cp.Maximize(-0.5 * cp.ceil(x))\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, -5, places=1)\n\n x = cp.Variable()\n obj = cp.Maximize(cp.ceil(x) * -0.5)\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, -5, places=1)\n\n def test_div_const(self) -> None:\n x = cp.Variable()\n obj = cp.Minimize(cp.ceil(x) / 0.5)\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, 20, places=1)\n\n x = cp.Variable()\n obj = cp.Maximize(cp.ceil(x) / -0.5)\n problem = cp.Problem(obj, [x >= 10])\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(x.value, 10, places=1)\n self.assertAlmostEqual(problem.value, -20, places=1)\n\n def test_reciprocal(self) -> None:\n x = cp.Variable(pos=True)\n problem = cp.Problem(cp.Minimize(1/x))\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 0, places=3)\n\n def test_abs(self) -> None:\n x = cp.Variable(pos=True)\n problem = cp.Problem(cp.Minimize(cp.abs(1/x)))\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 0, places=3)\n\n x = cp.Variable(neg=True)\n problem = cp.Problem(cp.Minimize(cp.abs(1/x)))\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 0, places=3)\n\n def test_tutorial_example(self) -> None:\n x = cp.Variable()\n y = cp.Variable(pos=True)\n objective_fn = -cp.sqrt(x) / y\n problem = cp.Problem(cp.Minimize(objective_fn), [cp.exp(x) <= y])\n # smoke test\n problem.solve(SOLVER, qcp=True)\n\n def test_curvature(self) -> None:\n x = cp.Variable(3)\n expr = cp.length(x)\n self.assertEqual(expr.curvature, s.QUASICONVEX)\n expr = -cp.length(x)\n self.assertEqual(expr.curvature, s.QUASICONCAVE)\n expr = cp.ceil(x)\n self.assertEqual(expr.curvature, s.QUASILINEAR)\n self.assertTrue(expr.is_quasilinear())\n\n def test_tutorial_dqcp(self) -> None:\n # The sign of variables affects curvature analysis.\n x = cp.Variable(nonneg=True)\n concave_frac = x * cp.sqrt(x)\n constraint = [cp.ceil(x) <= 10]\n problem = cp.Problem(cp.Maximize(concave_frac), constraint)\n self.assertTrue(concave_frac.is_quasiconcave())\n self.assertTrue(constraint[0].is_dqcp())\n self.assertTrue(problem.is_dqcp())\n\n w = cp.Variable()\n fn = w * cp.sqrt(w)\n problem = cp.Problem(cp.Maximize(fn))\n self.assertFalse(fn.is_dqcp())\n self.assertFalse(problem.is_dqcp())\n\n def test_add_constant(self) -> None:\n # The sign of variables affects curvature analysis.\n x = cp.Variable()\n problem = cp.Problem(cp.Minimize(cp.ceil(x) + 5), [x >= 2])\n problem.solve(SOLVER, qcp=True)\n np.testing.assert_almost_equal(x.value, 2)\n np.testing.assert_almost_equal(problem.objective.value, 7)\n\n def test_max(self) -> None:\n x = cp.Variable(2, pos=True)\n obj = cp.max((1 - 2*cp.sqrt(x) + x) / x)\n problem = cp.Problem(cp.Minimize(obj), [x[0] <= 0.5, x[1] <= 0.9])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 0.1715, places=3)\n\n def test_min(self) -> None:\n x = cp.Variable(2)\n expr = cp.min(cp.ceil(x))\n problem = cp.Problem(cp.Maximize(expr),\n [x[0] >= 11.9, x[0] <= 15.8, x[1] >= 17.4])\n self.assertTrue(problem.is_dqcp())\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.objective.value, 16.0)\n self.assertLess(x[0].value, 16.0)\n self.assertGreater(x[0].value, 14.9)\n self.assertGreater(x[1].value, 17.3)\n\n def test_sum_of_qccv_not_dqcp(self) -> None:\n t = cp.Variable(5, pos=True)\n expr = cp.sum(cp.square(t) / t)\n self.assertFalse(expr.is_dqcp())\n\n def test_flip_bounds(self) -> None:\n x = cp.Variable(pos=True)\n problem = cp.Problem(cp.Maximize(cp.ceil(x)), [x <= 1])\n problem.solve(SOLVER, qcp=True, low=0, high=0.5)\n self.assertGreater(x.value, 0)\n self.assertLessEqual(x.value, 1)\n\n problem.solve(SOLVER, qcp=True, low=0, high=None)\n self.assertGreater(x.value, 0)\n self.assertLessEqual(x.value, 1)\n\n problem.solve(SOLVER, qcp=True, low=None, high=0.5)\n self.assertGreater(x.value, 0)\n self.assertLessEqual(x.value, 1)\n\n def test_scalar_sum(self) -> None:\n x = cp.Variable(pos=True)\n problem = cp.Problem(cp.Minimize(cp.sum(1/x)))\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 0, places=3)\n\n problem = cp.Problem(cp.Minimize(cp.cumsum(1/x)))\n problem.solve(SOLVER, qcp=True)\n self.assertAlmostEqual(problem.value, 0, places=3)\n" ]
[ [ "numpy.random.seed", "numpy.arange", "numpy.ones", "numpy.testing.assert_almost_equal", "numpy.random.randn", "numpy.array", "numpy.zeros", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
voitsik/pypima
[ "594a6f8b800950377e182b9886d1c0288d3514d0" ]
[ "pypima/raexperiment.py" ]
[ "\"\"\"\nCreated on Sun Dec 29 04:02:35 2013\n\n@author: Petr Voytsik\n\"\"\"\n\nimport logging\nimport os.path\nimport shutil\nimport subprocess\nimport threading\nfrom datetime import datetime\nfrom io import BytesIO\n\nimport numpy as np\nimport pandas as pd\nimport pycurl\n\nimport pypima.pima\n\nfrom .fri import Fri\nfrom .pima import ActaFile, Pima, bpas_log_snr_new\nfrom .uvfits import UVFits\n\n\nclass Error(Exception):\n \"\"\"Raised when RaExperiment error occurs\"\"\"\n\n def __init__(self, exper, band, msg):\n self.exper = exper\n self.band = band\n self.msg = msg\n # self.time = str(datetime.now())\n\n def __str__(self):\n return f\"{self.exper}({self.band}): {self.msg}\"\n\n\nclass RaExperiment:\n \"\"\"This class describe experiment in RadioAstron AGN survey\"\"\"\n\n def __init__(\n self,\n experiment_code,\n band,\n data_base,\n data_dir=None,\n uv_fits=None,\n orbit=None,\n gvlbi=False,\n ):\n \"\"\"\n Parameters\n ----------\n\n experiment_code : str\n Experiment code.\n band : srt\n One letter frequency band code.\n data_base : pypima.db.DB\n pypima.db.DB instance.\n data_dir : str, optional\n Directory for FITS-IDI. If ``None`` working directory of the\n current experiment is used.\n uv_fits : str or list, optional\n Path(s) to the data file (FITS-IDI). If ``None`` (default) get a\n file name from data base and download file from the FTP archive.\n orbit : str, optional\n Path to a file with reconstructed orbit. If ``None`` (default),\n download it from the FTP archive.\n gvlbi : bool, optional\n If ``True``, process ground only of part of the experiment (GVLBI\n FITS file).\n\n \"\"\"\n # First, set common variables\n self.exper = experiment_code.lower()\n self.band = band.lower()\n self.db = data_base\n self.gvlbi = gvlbi\n self.sta_ref = \"RADIO-AS\"\n self.run_id = 0 # Record id in pima_runs database table\n self.lock = threading.Lock() # Lock for FITS file downloading control\n self.logger = logging.getLogger(f\"{self.exper}({self.band})\")\n self.antab = None\n self.antab_downloaded = False\n self.calibration_loaded = False\n self.split_time_aver = 0\n self.pima = None\n self.uv_fits = uv_fits\n self.orbit = orbit\n self.fri = None # Result of last fringe fitting\n self.scan_part = 0\n self.bad_obs_set = set() # Set of bad obs (autospec)\n self.acta_files = {} # Store autospectra data\n\n # dict (POLAR, frq_grp) -> BANDPASS_FILE\n self.bpass_files = {\n (\"RR\", 1): \"\",\n (\"LL\", 1): \"\",\n (\"RL\", 1): \"\",\n (\"LR\", 1): \"\",\n }\n\n if self.band not in (\"p\", \"l\", \"c\", \"k\"):\n self._error(f\"unknown band {band}\")\n\n self.pima_dir = os.getenv(\"PIMA_DIR\")\n self.exp_dir = os.getenv(\"pima_exp_dir\")\n if not self.exp_dir:\n self._error(\"Environment variable $pima_exp_dir is not set\")\n\n self.pima_scr = os.getenv(\"pima_scr_dir\")\n\n # Work directory path\n self.work_dir = os.path.join(self.exp_dir, self.exper + \"_auto\")\n if self.gvlbi:\n self.work_dir += \"_gvlbi\"\n\n # Select directory for raw data from a correlator\n if data_dir:\n self.data_dir = os.path.join(data_dir, self.exper)\n else:\n self.data_dir = self.work_dir\n\n if len(self.data_dir) >= pypima.pima.UVFILE_NAME_LEN - 1:\n self._error(\n \"Length of the data_dir path must be less than {} \\\nbytes\".format(\n pypima.pima.UVFILE_NAME_LEN - 1\n )\n )\n\n # PIMA control file path\n self.cnt_file_name = os.path.join(\n self.work_dir, f\"{self.exper}_{self.band}_pima.cnt\"\n )\n\n def init_workdir(self):\n \"\"\"Create working directory and PIMA control file.\"\"\"\n # Create work directory\n os.makedirs(self.work_dir, exist_ok=True)\n\n os.chdir(self.work_dir)\n\n # Create PIMA control file\n self._mk_cnt()\n\n self.pima = Pima(self.exper, self.band, self.work_dir)\n\n if self.uv_fits:\n self.pima.update_cnt({\"UV_FITS:\": self.uv_fits})\n\n # Only one sideband at P-band\n if self.band == \"p\":\n self.pima.update_cnt({\"END_FRQ:\": \"1\"})\n\n # Do not restrict delay rate window\n self.pima.update_cnt({\"FRIB.RATE_WINDOW_WIDTH:\": \"1.0D-8\"})\n\n # Setup staging dir\n staging_dir = os.getenv(\"PYPIMA_STAGING_DIR\")\n\n if staging_dir:\n staging_dir = os.path.join(staging_dir, self.exper)\n os.makedirs(staging_dir, exist_ok=True)\n self.pima.update_cnt({\"STAGING_DIR:\": staging_dir})\n else:\n self.pima.update_cnt({\"STAGING_DIR:\": \"NO\"})\n\n # Select the best fftw wisdom file\n for thread_num in (8, 4, 2, 1):\n for size in (\"huge\", \"big\", \"small\"):\n wis_file = f\"pima_{size}_measure_{thread_num:d}thr.wis\"\n wis_file = os.path.join(self.pima_dir, \"share\", \"pima\", wis_file)\n\n if os.path.isfile(wis_file):\n self.pima.update_cnt(\n {\"FFT_CONFIG_FILE:\": wis_file, \"NUM_THREADS:\": thread_num}\n )\n os.environ[\"OMP_NUM_THREADS\"] = str(thread_num)\n\n return\n\n def _error(self, msg):\n \"\"\"Raise pima.Error exception.\"\"\"\n self.logger.error(msg)\n raise Error(self.exper, self.band, msg)\n\n def _mk_cnt(self):\n \"\"\"Make new cnt-file from template.\"\"\"\n cnt_templ_name = os.path.join(\n self.pima_dir, \"share\", \"pima\", \"TEMPLATE_pima.cnt\"\n )\n\n sess_code = f\"{self.exper}_{self.band}\"\n fringe_file = os.path.join(self.work_dir, sess_code + \".fri\")\n frires_file = os.path.join(self.work_dir, sess_code + \".frr\")\n\n if self.band in (\"l\", \"p\"):\n polar = \"RR\"\n else:\n polar = \"LL\"\n\n with open(cnt_templ_name) as cnt_templ, open(\n self.cnt_file_name, \"w\"\n ) as cnt_file:\n for line in cnt_templ:\n if \"@CDATE@\" in line:\n line = line.replace(\"@CDATE@\", str(datetime.now()))\n elif \"@pima_dir@\" in line:\n line = line.replace(\"@pima_dir@\", self.pima_dir)\n elif line.startswith(\"SESS_CODE:\"):\n line = line.replace(\"@sess_code@\", sess_code)\n elif line.startswith(\"BAND:\"):\n line = line.replace(\"@band@\", self.band.upper())\n elif line.startswith(\"EXPER_DIR:\"):\n line = line.replace(\"@exper_dir@\", self.pima_scr)\n elif line.startswith(\"FRINGE_FILE:\"):\n line = line.replace(\"@fringe_file@\", fringe_file)\n elif line.startswith(\"FRIRES_FILE:\"):\n line = line.replace(\"@frires_file@\", frires_file)\n elif line.startswith(\"STA_REF:\") and self.sta_ref:\n line = \"{:<20}{}\\n\".format(\"STA_REF:\", self.sta_ref)\n elif line.startswith(\"EPHEMERIDES_FILE:\") and self.orbit:\n line = line.replace(\"@ephemerides_file@\", self.orbit)\n elif line.startswith(\"POLAR:\") or line.startswith(\"SPLT.POLAR:\"):\n line = line.replace(\"@polar@\", polar)\n cnt_file.write(line)\n\n def _download_fits(self, force_small=False):\n \"\"\"\n Download FITS-file from the FTP archive.\n\n \"\"\"\n fits_url, size = self.db.get_uvfits_url(\n self.exper, self.band, self.gvlbi, force_small\n )\n\n if not fits_url:\n self._error(\"Could not find FITS file name in DB\")\n\n # Delete spaces in filename\n uv_fits = os.path.join(\n self.data_dir, os.path.basename(fits_url).replace(\" \", \"\")\n )\n\n if len(uv_fits) > pypima.pima.UVFILE_NAME_LEN:\n uv_fits = uv_fits[: pypima.pima.UVFILE_NAME_LEN]\n assert uv_fits[-1] == \"/\"\n\n if os.path.isfile(uv_fits) and os.path.getsize(uv_fits) == size:\n self.logger.info(\"File %s already exists\", uv_fits)\n else:\n os.makedirs(self.data_dir, exist_ok=True)\n self.logger.info(\"Start downloading file %s...\", fits_url)\n try:\n with open(uv_fits, \"wb\") as fil:\n _download_it(fits_url, fil, max_retries=2)\n except pycurl.error as err:\n self._error(f\"Could not download file {fits_url}: {err}\")\n\n self.logger.info(\"FITS-file downloading is complete\")\n\n # We use self.uv_fits as a flag of FITS file existence, so set it at\n # the end of this function\n self.uv_fits = uv_fits\n\n def _get_orbit(self):\n \"\"\"Download reconstructed orbit file from FTP server.\"\"\"\n orbit_url = self.db.get_orbit_url(self.exper)\n\n if not orbit_url:\n self._error(\"Could not find reconstructed orbit\")\n\n self.orbit = os.path.join(self.work_dir, os.path.basename(orbit_url))\n\n self.logger.info(\"Start downloading orbit file %s ...\", orbit_url)\n\n buffer = BytesIO()\n try:\n _download_it(orbit_url, buffer)\n except pycurl.error as err:\n self._error(f\"Could not download file {orbit_url}: {err}\")\n\n orb_data = buffer.getvalue().decode().splitlines()\n\n with open(self.orbit, \"w\") as orb_file:\n if not orb_data[0].startswith(\"CCSDS_OEM_VERS\"):\n orb_file.write(\"CCSDS_OEM_VERS = 2.0\\n\")\n\n # Fix meta information\n for line in orb_data:\n if line.startswith(\"CENTER_NAME\"):\n line = \"CENTER_NAME = Earth Barycenter\"\n elif line.startswith(\"OBJECT_NAME\"):\n line = \"OBJECT_NAME = RADIO-ASTRON\"\n elif line.startswith(\"CREATION\"):\n line = line.replace(\"CREATION DATE\", \"CREATION_DATE\")\n elif line.startswith(\"STOP_TIME\") and len(line) < 20:\n for back_line in reversed(orb_data):\n cols = back_line.split()\n if cols:\n break\n line = line.strip() + \" \" + cols[0]\n\n orb_file.write(line + \"\\n\")\n\n self.logger.info(\"Orbit downloading is complete\")\n self.pima.update_cnt({\"EPHEMERIDES_FILE:\": self.orbit})\n\n def _get_antab(self):\n \"\"\"\n Download ANTAB-file from the FTP server.\n\n \"\"\"\n antab_url = self.db.get_antab_url(self.exper, self.band)\n\n if not antab_url:\n self.logger.warning(\"Could not get ANTAB-file url from DB.\")\n else:\n antab_dir = os.path.join(self.work_dir, \"antab\")\n os.makedirs(antab_dir, exist_ok=True)\n\n antab_file = os.path.join(antab_dir, os.path.basename(antab_url) + \".orig\")\n self.logger.info(\"Start downloading file %s\", antab_url)\n try:\n with open(antab_file, \"wb\") as fil:\n _download_it(antab_url, fil)\n\n self.antab_downloaded = True\n self.logger.info(\"ANTAB-file downloading is complete.\")\n self.antab = self._fix_antab(antab_file)\n except pycurl.error as err:\n self.antab_downloaded = False\n self.logger.warning(\"Could not download file %s: %s\", antab_url, err)\n\n def _fix_antab(self, antab):\n \"\"\"Fix antab.\"\"\"\n if not antab or not os.path.isfile(antab):\n return\n\n new_antab = antab.replace(\".orig\", \"\")\n\n # ANTAB file already exists and prepared\n if antab == new_antab:\n return new_antab\n\n freq_setup = self.pima.frequencies\n freq_list = [1e-6 * freq.freq for freq in freq_setup]\n\n # Should we fix frequency setup?\n fix_freq = False\n if self.band != \"p\" and freq_setup[0].side_band != -1:\n self.logger.warning(\"enable sideband fix for ANTAB\")\n fix_freq = True\n\n sta_list = self.pima.station_list(ivs_name=False)\n\n with open(antab) as inp, open(new_antab, \"w\") as out:\n magic = inp.readline()\n if not magic.startswith(\"! Produced by: TSM\"):\n self.logger.warning(\n \"antab file %s does NOT have magic in the\" \"first line\", antab\n )\n return\n\n # Do not forget to write a 'magic' line to the output file\n out.write(magic)\n\n for line in inp:\n line = line.strip()\n\n # Skip empty lines\n if not line:\n continue\n\n if line.startswith(\"POLY\") and line.endswith(\"/\"):\n line = line.replace(\"/\", \" /\")\n elif line.startswith(\"/\") and len(line) > 1:\n line = line.replace(\"/\", \"/ \", 1)\n elif not line.startswith(\"!\") and \"!\" in line:\n line = line.split(\"!\")[0].strip()\n\n # VLA (raes11a and friends)\n if \"YY\" in sta_list and \"Y27\" in line:\n line = line.replace(\"Y27\", \"YY\")\n elif \"KZ\" in sta_list and \"KL\" in line:\n line = line.replace(\"KL\", \"KZ\")\n elif \"EF\" in sta_list and \"EB\" in line:\n line = line.replace(\"EB\", \"EF\")\n elif \"WB\" in sta_list and \"WB1\" in line:\n line = line.replace(\"WB1\", \"WB\")\n\n toks = line.split()\n\n # Fix EF C-band channels table\n if len(toks) == 10 and toks[0] == \"!\" and toks[1].isdigit():\n self.logger.warning(\"fix EF C-band channels table\")\n toks.insert(2, \"6cm\")\n\n if fix_freq and len(toks) > 9 and toks[1].isdigit():\n if toks[6] == \"L\":\n toks[6] = \"U\"\n toks[9] = \"{:.2f}MHz\".format(freq_list[0])\n\n # Deselect stations\n if toks[0] == \"TSYS\" and len(toks) > 4:\n toks[4] = toks[4].upper()\n if toks[4] not in sta_list:\n self.logger.warning(\"deselect %s from ANTAB\", toks[4])\n toks.insert(0, \"!\")\n elif toks[0] == \"GAIN\":\n # EF, L-band GAINs\n if toks[-1] == \"/\":\n for ind in range(len(toks)):\n if toks[ind].startswith(\"POLY\"):\n toks[ind] = \"\\n\" + toks[ind]\n break\n\n # Comment out GAIN line for different frequency\n for tok in toks:\n if tok.startswith(\"FREQ=\") and \",\" in tok:\n fr1, fr2 = tok.replace(\"FREQ=\", \"\").split(\",\")\n fr1 = float(fr1) # Lower limit\n fr2 = float(fr2) # Upper limit\n if min(freq_list) < fr1 or max(freq_list) > fr2:\n self.logger.warning(\n \"deselect GAIN due to \" \"%s is out of freq range\",\n tok,\n )\n toks.insert(0, \"!\")\n break\n\n elif toks[0] == \"/\" and len(toks) > 1:\n toks[1] = \"\\n\" + toks[1]\n\n out.write(\" \".join(toks) + \"\\n\")\n\n return new_antab\n\n def load(\n self,\n download_only=False,\n update_db=False,\n scan_length=1200,\n scan_part=1,\n force_small=False,\n beg_frq=None,\n end_frq=None,\n ):\n \"\"\"\n Download data, run pima load, and do some checks.\n\n Parameters\n ----------\n download_only : bool, optional\n If ``True``, download FITS-file and return.\n update_db : bool, optional\n If ``True``, update database with experiment information.\n scan_length : float, optional\n Set maximum length of scan. Default is 20 min.\n scan_part : int, optional\n 1 is full scan, 2 is half of scan. In general `scan_part` can be\n used as run index.\n force_small : bool, optional\n Force laod 64-channels FITS_IDI files.\n beg_frq : int, optional\n Start IF index.\n end_frq : int, optional\n End IF index.\n\n \"\"\"\n self.scan_part = scan_part\n self.bad_obs_set.clear()\n self.acta_files.clear()\n\n # If self.uv_fits is not None assume FITS file already exists\n with self.lock:\n if not self.uv_fits:\n self._download_fits(force_small)\n\n if download_only:\n return\n\n self.pima.update_cnt({\"UV_FITS:\": self.uv_fits})\n\n # Check free space in STAGING_DIR\n staging_dir = self.pima.cnt_params[\"STAGING_DIR:\"]\n\n if os.path.isdir(staging_dir):\n df = shutil.disk_usage(staging_dir)\n\n if isinstance(self.uv_fits, list):\n fits_file = self.uv_fits[0]\n\n uv_fits_size = sum([os.path.getsize(file) for file in self.uv_fits])\n else:\n fits_file = self.uv_fits\n uv_fits_size = os.path.getsize(fits_file)\n\n fits_file_staging = os.path.join(staging_dir, os.path.basename(fits_file))\n\n # Free space < size of file + 10%\n if not os.path.isfile(fits_file_staging) and df.free < 1.1 * uv_fits_size:\n self.logger.warning(\"Not enough space in STAGING_DIR\")\n self.pima.update_cnt({\"STAGING_DIR:\": \"NO\"})\n shutil.rmtree(staging_dir)\n\n if self.orbit is None:\n self._get_orbit()\n\n # Set maximum scan length\n self.logger.info(\"Set maximum scan length to %s s\", scan_length)\n self.pima.update_cnt(\n {\"MAX_SCAN_LEN:\": str(scan_length), \"SCAN_LEN_USED:\": str(scan_length)}\n )\n\n if update_db:\n if isinstance(self.uv_fits, list):\n uv_fits = self.uv_fits[0] # Only first FITS to DB\n else:\n uv_fits = self.uv_fits\n\n self.run_id = self.db.add_exper_info(\n self.exper, self.band, os.path.basename(uv_fits), scan_part\n )\n\n self.pima.load()\n\n if update_db:\n self.db.update_exper_info(self.pima.exper_info, self.run_id)\n if scan_part == 1:\n self.db.model2db(self.run_id, self.pima.clock_model())\n\n #\n # Various checks and setups\n #\n if self.pima.obs_number == 0:\n self._error(\"ZERO observations have been loaded\")\n\n sou_dist = self.pima.source_dist()\n for source, distance in sou_dist.items():\n if distance > 1.0:\n self._error(f\"Dist = {distance} arcsec for source {source}\")\n\n # Set number of IFs\n if beg_frq:\n if beg_frq < 1 or beg_frq > self.pima.exper_info[\"if_num\"]:\n self._error(\n \"beg_frq must be in range [1, {}]\".format(\n self.pima.exper_info[\"if_num\"]\n )\n )\n else:\n self.pima.update_cnt({\"BEG_FRQ:\": beg_frq})\n else:\n self.pima.update_cnt({\"BEG_FRQ:\": 1})\n\n if end_frq:\n if (\n end_frq < int(self.pima.cnt_params[\"BEG_FRQ:\"])\n or end_frq > self.pima.exper_info[\"if_num\"]\n ):\n self._error(\n \"end_frq must be in range [{}, {}]\".format(\n self.pima.cnt_params[\"BEG_FRQ:\"],\n self.pima.exper_info[\"if_num\"]\n )\n )\n else:\n self.pima.update_cnt({\"END_FRQ:\": end_frq})\n else:\n self.pima.update_cnt({\"END_FRQ:\": self.pima.exper_info[\"if_num\"]})\n\n if \"RADIO-AS\" not in self.pima.station_list():\n self.logger.warning(\"RADIO-AS is not in station list\")\n self.sta_ref = self.pima.station_list()[0]\n self.pima.update_cnt({\"STA_REF:\": self.sta_ref})\n\n desel_nam = self.pima.number_of_deselected_points\n if desel_nam > 10:\n self.logger.warning(\"Total number of deselected points is %s\", desel_nam)\n\n # Save memory by reducing oversampling\n if self.pima.ap_minmax[0] < 0.1:\n self.pima.update_cnt(\n {\"FRIB.OVERSAMPLE_MD:\": \"2\", \"FRIB.OVERSAMPLE_RT:\": \"2\"}\n )\n\n # Average all spectral channels in each IF when splitting.\n self.pima.update_cnt({\"SPLT.FRQ_MSEG:\": str(self.pima.chan_number)})\n\n if scan_part == 1:\n self.pima.update_cnt(\n {\"FRIB.1D_RESFRQ_PLOT:\": \"TXT\", \"FRIB.1D_RESTIM_PLOT:\": \"TXT\"}\n )\n else:\n self.pima.update_cnt(\n {\"FRIB.1D_RESFRQ_PLOT:\": \"NO\", \"FRIB.1D_RESTIM_PLOT:\": \"NO\"}\n )\n\n def load_antab(self):\n \"\"\"\n Download ANTAB file and load calibration information to PIMA.\n\n \"\"\"\n # Always download antab-file.\n if not self.antab_downloaded:\n self._get_antab()\n\n # Try to load calibration information from ANTAB\n if self.antab and os.path.isfile(self.antab):\n try:\n self.pima.load_gains(self.antab)\n self.pima.load_tsys(self.antab)\n self.calibration_loaded = True\n except pypima.pima.Error:\n self.logger.warning(\"Could not load calibration information\")\n self.calibration_loaded = False\n\n def _select_ref_sta(self, fri, ref_sta=None):\n \"\"\"\n Select reference station for bandpass calibration.\n\n Parameters\n ----------\n fri : ``Fri`` object\n **PIMA** fringe fitting results as ``Fri`` object.\n ref_sta : str, optional\n\n \"\"\"\n if ref_sta and ref_sta not in self.pima.station_list():\n self._error(f\"Station {ref_sta} is not in station list\")\n else:\n self.sta_ref = ref_sta\n\n if not self.sta_ref:\n snr_detecton = float(self.pima.cnt_params[\"FRIB.SNR_DETECTION:\"])\n\n obs = fri.max_snr(\"RADIO-AS\")\n\n if obs:\n if obs[\"SNR\"] < snr_detecton:\n self.logger.debug(\n \"SNR is too low on space baseline for \\\n bandpass: %s\",\n obs[\"SNR\"],\n )\n else:\n if obs[\"sta1\"] == \"RADIO-AS\":\n self.sta_ref = obs[\"sta2\"]\n elif obs[\"sta2\"] == \"RADIO-AS\":\n self.sta_ref = obs[\"sta1\"]\n else:\n self.logger.info(\"No scans with RADIO-AS\")\n\n if not self.sta_ref:\n obs = fri.max_snr()\n\n if not obs:\n return False\n\n if obs[\"SNR\"] < snr_detecton:\n self.logger.debug(\"SNR is too low for bandpass: %s\", obs[\"SNR\"])\n else:\n good_stations = [\"ARECIBO\", \"GBT-VLBA\", \"EFLSBERG\", \"ATCA-104\"]\n for sta in good_stations:\n if sta in (obs[\"sta1\"], obs[\"sta2\"]):\n self.sta_ref = sta\n break\n if self.sta_ref is None:\n self.sta_ref = obs[\"sta1\"]\n\n if self.sta_ref:\n self.pima.update_cnt(\n {\n \"STA_REF:\": self.sta_ref,\n \"BPS.SNR_MIN_ACCUM:\": \"5.5\",\n \"BPS.SNR_MIN_FINE:\": \"5.5\",\n }\n )\n self.logger.info(\"New reference station is %s\", self.sta_ref)\n\n return True\n else:\n return False\n\n def _check_bad_autospec_obs(self):\n \"\"\"Return set of observation numbers with bad autospectrum.\"\"\"\n self.generate_autospectra()\n\n if not self.acta_files:\n return\n\n bad_obs_set = set()\n\n for obs in self.pima.observations:\n for sta in (obs.sta1, obs.sta2):\n if self.pima.cnt_params[\"POLAR:\"] in (\"RR\", \"LL\"):\n sta_pol = self.pima.cnt_params[\"POLAR:\"]\n elif self.pima.cnt_params[\"POLAR:\"] == \"RL\":\n if sta == obs.sta1:\n sta_pol = \"RR\"\n else:\n sta_pol = \"LL\"\n elif self.pima.cnt_params[\"POLAR:\"] == \"LR\":\n if sta == obs.sta1:\n sta_pol = \"LL\"\n else:\n sta_pol = \"RR\"\n else:\n raise RuntimeError(\n \"unsupported polar {}\".format(self.pima.cnt_params[\"POLAR:\"])\n )\n\n try:\n acta_file = self.acta_files[sta_pol, obs.time_code, sta]\n except KeyError as err:\n self.logger.warning(\"no ACTA file for %s\", err)\n continue\n\n if np.median(acta_file.ampl) < 0.5:\n self.logger.warning(\n \"Bad autospec for sta: %s obs: %s\", sta, obs.obs\n )\n bad_obs_set.add(obs.obs)\n\n return bad_obs_set\n\n def _auto_bpas(self, fringe_fit: bool = False) -> None:\n \"\"\"Iterate over bandpass parameters and select the best case.\"\"\"\n # log_bps_dict = {}\n snr_dict = {}\n\n for deg in range(1, 7):\n log_file = self.pima.bpas(\n params={\"BPS.DEG_AMP:\": str(deg), \"BPS.DEG_PHS:\": str(deg)}\n )\n log_file_deg = f\"{log_file}_{deg}\"\n os.rename(log_file, log_file_deg)\n\n bps_file = self.pima.cnt_params[\"BANDPASS_FILE:\"]\n bps_file_deg = f\"{bps_file}_{deg}\"\n os.rename(bps_file, bps_file_deg)\n\n # log_bps_dict[log_file_deg] = bps_file_deg\n\n if fringe_fit:\n fri = Fri(\n self.pima.fine(\n params={\n \"PHASE_ACCEL_MIN:\": \"0\",\n \"PHASE_ACCEL_MAX:\": \"0\",\n \"FRIB.FINE_SEARCH:\": \"LSQ\",\n \"BANDPASS_FILE:\": bps_file_deg,\n }\n )\n )\n if not fri:\n self._error(\"fringe fitting fails in _auto_bpas\")\n fri.update_status(64)\n # snr_data = {}\n # for rec in fri:\n # if rec['status'] == 'y':\n # snr_data[rec['obs']] = rec['SNR']\n if fri.any_detections(\"RADIO-AS\"):\n snr_data = {\n rec[\"obs\"]: rec[\"SNR\"]\n for rec in fri\n if rec[\"status\"] == \"y\"\n if rec[\"sta1\"] == \"RADIO-AS\"\n }\n else:\n snr_data = {\n rec[\"obs\"]: rec[\"SNR\"] for rec in fri if rec[\"status\"] == \"y\"\n }\n else:\n snr_data = bpas_log_snr_new(log_file_deg, mode=\"INIT\")\n\n if snr_data:\n snr_dict[bps_file_deg] = snr_data\n\n if not snr_dict:\n self.logger.warning(\"could not get bpas_accum SNR from logs\")\n self.pima.bpas()\n else:\n table = pd.DataFrame.from_records(\n list(snr_dict.values()), index=list(snr_dict.keys())\n )\n\n self.logger.debug(\"\\n%s\", str(table))\n\n norm_table = table / table.iloc[0]\n scores = norm_table.sum(axis=1)\n\n # best_bps_file = log_bps_dict[scores.idxmax()]\n best_bps_file = scores.idxmax()\n self.logger.info(\"Best bps is: %s\", best_bps_file)\n\n self.pima.update_cnt({\"BANDPASS_FILE:\": best_bps_file})\n\n def _bandpass(\n self, bandpass_mode=None, ampl_bandpass=True, bandpass_var=0, bandpass_norm=\"IF\"\n ):\n \"\"\"\n Setup **PIMA** bandpass parameters and run ``bpas`` task.\n\n \"\"\"\n bpas_params = {}\n\n if bandpass_var == 0:\n self.logger.warning(\"using bandpass parameters from TEMPLATE\")\n elif bandpass_var == 1:\n bpas_params = {\n \"BPS.MODE:\": \"FINE\",\n \"BPS.NOBS_ACCUM:\": \"8\",\n \"BPS.MSEG_ACCUM:\": \"1\",\n \"BPS.NOBS_FINE:\": \"12\",\n \"BPS.MINOBS_FINE:\": \"8\",\n \"BPS.MSEG_FINE:\": \"1\",\n \"BPS.SNR_MIN_ACCUM:\": \"200.0\",\n \"BPS.SNR_MIN_FINE:\": \"200.0\",\n \"BPS.AMPL_REJECT:\": \"0.4\",\n \"BPS.PHAS_REJECT:\": \"0.2\",\n \"BPS.INTRP_METHOD:\": \"SPLINE\",\n \"BPS.DEG_AMP:\": \"17\",\n \"BPS.DEG_PHS:\": \"11\",\n \"BPS.AMP_MIN:\": \"0.01\",\n \"BPS.NORML:\": \"IF\",\n \"BPS.SEFD_USE:\": \"NO\",\n }\n elif bandpass_var == 2:\n bpas_params = {\n \"BPS.MODE:\": \"FINE\",\n \"BPS.NOBS_ACCUM:\": \"8\",\n \"BPS.MSEG_ACCUM:\": \"1\",\n \"BPS.NOBS_FINE:\": \"12\",\n \"BPS.MINOBS_FINE:\": \"8\",\n \"BPS.MSEG_FINE:\": \"1\",\n \"BPS.SNR_MIN_ACCUM:\": \"50.0\",\n \"BPS.SNR_MIN_FINE:\": \"50.0\",\n \"BPS.AMPL_REJECT:\": \"0.4\",\n \"BPS.PHAS_REJECT:\": \"0.2\",\n \"BPS.INTRP_METHOD:\": \"SPLINE\",\n \"BPS.DEG_AMP:\": \"5\",\n \"BPS.DEG_PHS:\": \"5\",\n \"BPS.AMP_MIN:\": \"0.01\",\n \"BPS.NORML:\": \"IF\",\n \"BPS.SEFD_USE:\": \"NO\",\n }\n elif bandpass_var == 3:\n mseg = self.pima.chan_number // 2\n min_snr = 5.1 # Could be tuned\n\n bpas_params = {\n \"BPS.MODE:\": \"ACCUM\",\n \"BPS.NOBS_ACCUM:\": \"6\",\n \"BPS.MSEG_ACCUM:\": mseg,\n \"BPS.NOBS_FINE:\": \"12\",\n \"BPS.MINOBS_FINE:\": \"8\",\n \"BPS.MSEG_FINE:\": mseg,\n \"BPS.SNR_MIN_ACCUM:\": min_snr,\n \"BPS.SNR_MIN_FINE:\": min_snr,\n \"BPS.AMPL_REJECT:\": \"0.4\",\n \"BPS.PHAS_REJECT:\": \"0.2\",\n \"BPS.INTRP_METHOD:\": \"LINEAR\",\n \"BPS.DEG_AMP:\": \"0\",\n \"BPS.DEG_PHS:\": \"1\",\n \"BPS.AMP_MIN:\": \"0.01\",\n \"BPS.NORML:\": \"IF\",\n \"BPS.SEFD_USE:\": \"NO\",\n \"FRIB.SNR_DETECTION:\": min_snr,\n }\n elif bandpass_var in (4, 5):\n mseg = 4\n min_snr = 5.1 # Could be tuned\n\n bpas_params = {\n \"BPS.MODE:\": \"ACCUM\",\n \"BPS.NOBS_ACCUM:\": \"6\",\n \"BPS.MSEG_ACCUM:\": mseg,\n \"BPS.NOBS_FINE:\": \"12\",\n \"BPS.MINOBS_FINE:\": \"8\",\n \"BPS.MSEG_FINE:\": mseg,\n \"BPS.SNR_MIN_ACCUM:\": min_snr,\n \"BPS.SNR_MIN_FINE:\": min_snr,\n \"BPS.AMPL_REJECT:\": \"0.4\",\n \"BPS.PHAS_REJECT:\": \"0.2\",\n \"BPS.INTRP_METHOD:\": \"LEGENDRE\",\n \"BPS.DEG_AMP:\": \"5\",\n \"BPS.DEG_PHS:\": \"5\",\n \"BPS.AMP_MIN:\": \"0.01\",\n \"BPS.NORML:\": \"IF\",\n \"BPS.SEFD_USE:\": \"NO\",\n \"FRIB.SNR_DETECTION:\": min_snr,\n }\n else:\n self._error(f\"Unsupported bandpass_var {bandpass_var}\")\n\n if bandpass_mode:\n bpas_params[\"BPS.MODE:\"] = bandpass_mode\n\n if not ampl_bandpass:\n bpas_params[\"BPS.DEG_AMP:\"] = \"0\"\n\n if bandpass_norm:\n bpas_params[\"BPS.NORML:\"] = bandpass_norm\n if bandpass_norm == \"NO\":\n bpas_params[\"BPS.MODE:\"] = \"INIT\"\n\n self.pima.update_cnt(bpas_params)\n\n try:\n if bandpass_var in (4, 5) and self.pima.cnt_params[\"BPS.MODE:\"] == \"ACCUM\":\n self.logger.info(\"starting _auto_bpas\")\n if bandpass_var == 4:\n self._auto_bpas(fringe_fit=False)\n else:\n self._auto_bpas(fringe_fit=True)\n else:\n if self.gvlbi:\n bpas_params = {\n \"FRIB.FINE_SEARCH:\": \"PAR\",\n \"MKDB.FRINGE_ALGORITHM:\": \"DRF\",\n \"PHASE_ACCEL_MIN:\": \"0\",\n \"PHASE_ACCEL_MAX:\": \"0\",\n }\n else:\n bpas_params = {}\n\n self.pima.bpas(bpas_params)\n except pypima.pima.Error:\n self.logger.warning(\"continue without bandpass\")\n self.pima.update_cnt({\"BANDPASS_FILE:\": \"NO\"})\n return False\n\n return True\n\n def fringe_fitting(\n self,\n bandpass=False,\n accel=False,\n bandpass_mode=None,\n ampl_bandpass=True,\n bandpass_var=0,\n bandpass_use=None,\n bandpass_norm=\"IF\",\n bandpass_renorm=True,\n reference_station=None,\n ):\n \"\"\"\n Perform a fringe fitting.\n\n Parameters\n ----------\n bandpass : bool, optional\n If ``True`` try to do a bandpass calibration. Default is ``False``.\n accel : bool, optional\n If ``True`` turn on a phase acceleration fitting.\n bandpass_mode : str, optional\n Set the ``BPS.MODE`` **PIMA** parameter.\n ampl_bandpass : bool, optional\n If ``True``, do the amplitude bandpass calibration. Set polynomial\n degree to zero otherwise.\n bandpass_var : int, optional\n Select predefined bandpass parameters.\n bandpass_use : str, optional\n Set ``BANDPASS_USE`` **PIMA** parameter.\n bandpass_norm : str, optional\n Set ``BPS.NORML`` **PIMA** parameter. This keywords specifies the\n way how the bandpass normalization is made.\n bandpass_renorm: bool, optional\n If ``True``, apply the bandpass renormalization factors for given\n intermediate frequencies using only a part of the bandwidth.\n reference_station : str, optional\n Reference station for bandpass calibration. If ``None`` select\n an optimal station for ground-space baselines.\n\n Returns\n -------\n fri : ``Fri`` object\n Fringe fitting results as ``Fri`` object.\n\n \"\"\"\n if accel:\n self.pima.update_cnt({\"FRIB.FINE_SEARCH:\": \"ACC\"})\n if self.band == \"l\":\n self.pima.update_cnt(\n {\"PHASE_ACCEL_MIN:\": \"-1.D-13\", \"PHASE_ACCEL_MAX:\": \"1.D-13\"}\n )\n elif self.band == \"k\":\n self.pima.update_cnt(\n {\"PHASE_ACCEL_MIN:\": \"-5.D-15\", \"PHASE_ACCEL_MAX:\": \"5.D-15\"}\n )\n else:\n self.pima.update_cnt(\n {\"PHASE_ACCEL_MIN:\": \"-1.D-14\", \"PHASE_ACCEL_MAX:\": \"1.D-14\"}\n )\n else:\n self.pima.update_cnt(\n {\n \"FRIB.FINE_SEARCH:\": \"LSQ\",\n \"PHASE_ACCEL_MIN:\": \"0\",\n \"PHASE_ACCEL_MAX:\": \"0\",\n }\n )\n\n if bandpass_use:\n self.pima.update_cnt({\"BANDPASS_USE:\": bandpass_use})\n\n if bandpass_use == \"NO\":\n bandpass = False\n\n if bandpass and self.pima.chan_number > 512:\n self.logger.warning(\n \"Too many spectral channels for bandpass: %s\", self.pima.chan_number\n )\n bandpass = False\n\n polar = self.pima.cnt_params[\"POLAR:\"]\n frq_grp = int(self.pima.cnt_params[\"FRQ_GRP:\"])\n\n if bandpass:\n if bandpass_renorm:\n self.pima.update_cnt({\"SPLT.BPASS_NRML_METHOD:\": \"WEIGHTED\"})\n else:\n self.pima.update_cnt({\"SPLT.BPASS_NRML_METHOD:\": \"NO\"})\n\n # Update list of obs with bad autospectrum\n bad_obs = self._check_bad_autospec_obs()\n if bad_obs:\n self.bad_obs_set = bad_obs\n else:\n self.bad_obs_set.clear()\n\n # If bps-file already exists -- use it\n if (polar, frq_grp) in self.bpass_files and os.path.isfile(\n self.bpass_files[polar, frq_grp]\n ):\n self.pima.update_cnt(\n {\"BANDPASS_FILE:\": self.bpass_files[polar, frq_grp]}\n )\n else:\n self.pima.mk_exclude_obs_file(self.bad_obs_set, \"coarse\")\n\n if self.gvlbi:\n coarse_params = {\n \"FRIB.FINE_SEARCH:\": \"PAR\",\n \"MKDB.FRINGE_ALGORITHM:\": \"DRF\",\n \"PHASE_ACCEL_MIN:\": \"0\",\n \"PHASE_ACCEL_MAX:\": \"0\",\n }\n else:\n coarse_params = {}\n\n fri_file = self.pima.coarse(coarse_params)\n fri = Fri(fri_file)\n\n # Exclude suspicious observations\n obs_list = []\n for rec in fri:\n if abs(rec[\"rate\"]) > 1e-11 or abs(rec[\"delay\"]) > 1e-6:\n obs_list.append(rec[\"obs\"])\n\n self.pima.mk_exclude_obs_file(obs_list, \"bpas\")\n fri.remove_obs(obs_list)\n\n self.pima.update_cnt({\"FRIB.SNR_DETECTION:\": \"5.2\"})\n\n # Now auto select reference station\n if fri and self._select_ref_sta(fri, reference_station):\n bandpass = self._bandpass(\n bandpass_mode, ampl_bandpass, bandpass_var, bandpass_norm\n )\n self.bpass_files[polar, frq_grp] = self.pima.cnt_params[\n \"BANDPASS_FILE:\"\n ]\n else:\n self.logger.info(\n \"skip bandpass due to absence of the \" \"useful scans\"\n )\n bandpass = False\n self.bpass_files[polar] = \"\"\n self.pima.update_cnt({\"BANDPASS_FILE:\": \"NO\"})\n\n self.pima.mk_exclude_obs_file(self.bad_obs_set, \"fine\")\n\n if frq_grp > 1:\n fri_file = f\"{self.exper}_{self.band}_{polar}_{frq_grp}.fri\"\n frr_file = f\"{self.exper}_{self.band}_{polar}_{frq_grp}.frr\"\n else:\n fri_file = f\"{self.exper}_{self.band}_{polar}.fri\"\n frr_file = f\"{self.exper}_{self.band}_{polar}.frr\"\n\n fri_file = os.path.join(self.work_dir, fri_file)\n frr_file = os.path.join(self.work_dir, frr_file)\n self.pima.update_cnt({\"FRINGE_FILE:\": fri_file, \"FRIRES_FILE:\": frr_file})\n\n fri_file = self.pima.fine()\n self.fri = Fri(fri_file)\n self.fri.aux[\"bandpass\"] = bandpass\n\n if not self.fri:\n self.logger.warning(\"PIMA fri-file is empty after fine\")\n else:\n if self.pima.exper_info[\"sp_chann_num\"] <= 128:\n ch_num = 64\n elif self.pima.exper_info[\"sp_chann_num\"] == 256:\n ch_num = 256\n else:\n ch_num = 2048\n\n self.fri.update_status(ch_num)\n\n return self.fri\n\n def flag_edge_chann(self, number):\n \"\"\"\n Flag `number` spectral channels at the edges of the bandpass. Must be\n called after ``load``.\n\n Parameters\n ----------\n number : int\n Number of spectral channels to flag.\n \"\"\"\n chann_num = self.pima.chan_number\n mask = []\n\n if number < 0 or number >= chann_num / 2:\n self._error(f\"invald number of channels to flag: {number}\")\n elif number > 0:\n ind_frq = \"1-{}\".format(self.pima.exper_info[\"if_num\"])\n ind_chn1 = f\"1-{number}\"\n ind_chn2 = \"{}-{}\".format(chann_num - number + 1, chann_num)\n\n mask = [\n (\"ALL\", \"ALL\", ind_frq, ind_chn1, \"OFF\"),\n (\"ALL\", \"ALL\", ind_frq, ind_chn2, \"OFF\"),\n ]\n\n mask_gen_file = self.pima.mk_bpass_mask_gen(mask)\n mask_file = self.pima.set_mask_file(mask_gen_file)\n\n if mask_file:\n self.logger.info(\"Set %s as new mask file\", mask_file)\n\n def split(self, source=None, average=False):\n \"\"\"\n Split a multi-source uv data set into single-source data files.\n\n Parameters\n ----------\n source : string, optional\n Do split only for given source. By default split all sources in\n the experiment.\n average : bool, optional\n If ``True`` average data over full scan length.\n\n \"\"\"\n # Delete old uv-fits remained from previous run\n exper_dir = self.pima.cnt_params[\"EXPER_DIR:\"]\n sess_code = self.pima.cnt_params[\"SESS_CODE:\"]\n pima_fits_dir = os.path.join(exper_dir, sess_code + \"_uvs\")\n\n if os.path.isdir(pima_fits_dir):\n shutil.rmtree(pima_fits_dir)\n\n if not self.calibration_loaded:\n self.logger.warning(\n \"Could not do splitting due to absence of \" \"calibration information\"\n )\n return\n\n if not self.fri.any_detections():\n self.logger.warning(\"No useful scans for splitting\")\n return\n\n snr_detection = round(min(7.0, self.fri.min_detected_snr() - 0.05), 2)\n self.logger.info(\"Set FRIB.SNR_DETECTION to %s\", snr_detection)\n split_params = {\n \"FRIB.SNR_DETECTION:\": f\"{snr_detection:.2f}\",\n \"DEBUG_LEVEL:\": \"6\",\n }\n\n # Exclude suspicious observations\n obs_list = self.fri.non_detections()\n for rec in self.fri:\n if (\n abs(rec[\"rate\"]) > 1e-10\n or abs(rec[\"delay\"]) > 1e-6\n or rec[\"duration\"] < 30\n ):\n obs_list.append(rec[\"obs\"])\n\n # Exclude very short observations\n ap_len = self.pima.ap_minmax[0]\n min_scan_len = float(self.pima.cnt_params[\"MIN_SCAN_LEN:\"])\n for obs in self.pima.observations:\n if obs.ap_num * ap_len < min_scan_len:\n obs_list.append(obs.obs)\n\n self.pima.mk_exclude_obs_file(obs_list, \"splt\")\n\n if source:\n split_params[\"SPLT.SOU_NAME:\"] = source\n else:\n split_params[\"SPLT.SOU_NAME:\"] = \"ALL\"\n\n if average:\n time_segments = max([obs.ap_num for obs in self.pima.observations])\n else:\n time_segments = 1\n\n self.split_time_aver = time_segments * ap_len\n self.pima.split(tim_mseg=time_segments, params=split_params)\n\n def copy_uvfits(self, out_dir):\n \"\"\"Copy calibrated uv-fits files from pima scratch dir to `out_dir`.\"\"\"\n exper_dir = self.pima.cnt_params[\"EXPER_DIR:\"]\n sess_code = self.pima.cnt_params[\"SESS_CODE:\"]\n band = self.pima.cnt_params[\"BAND:\"]\n polar = self.pima.cnt_params[\"POLAR:\"]\n\n pima_fits_dir = os.path.join(exper_dir, sess_code + \"_uvs\")\n\n if not os.path.isdir(pima_fits_dir):\n return\n\n splt_sou_name = self.pima.cnt_params[\"SPLT.SOU_NAME:\"]\n\n for source_names in self.pima.source_list:\n if splt_sou_name != \"ALL\" and splt_sou_name not in source_names:\n continue\n\n pima_fits_name = \"{}_{}_uva.fits\".format(source_names[1], band)\n pima_fits_path = os.path.join(pima_fits_dir, pima_fits_name)\n\n if not os.path.isfile(pima_fits_path):\n self.logger.warning('UV-FITS \"%s\" does not exists.', pima_fits_path)\n continue\n\n # Use B1950 name for output directory\n b1950_name = source_names[2]\n\n # Fix source names\n if b1950_name == \"OJ287\":\n b1950_name = \"0851+202\"\n\n out_fits_dir = os.path.join(out_dir, b1950_name)\n os.makedirs(out_fits_dir, exist_ok=True)\n\n # Correlator name\n corr_name = self.pima.exper_info[\"correlator_name\"]\n\n out_fits_name = \"{}_{}_{}_{}_{:04d}s_{}_uva.fits\".format(\n b1950_name,\n self.exper,\n self.band.upper(),\n polar,\n round(self.split_time_aver),\n corr_name,\n )\n\n if self.scan_part >= 1000:\n scan_part_base = (self.scan_part // 1000) * 1000\n out_fits_name = out_fits_name.replace(\n \"_uva\", f\"_ALT{scan_part_base}_uva\"\n )\n\n out_fits_path = os.path.join(out_fits_dir, out_fits_name)\n\n self.logger.info(\"Copy %s to %s\", pima_fits_path, out_fits_path)\n shutil.copy(pima_fits_path, out_fits_path)\n\n # Run `fits_to_radplot` only for averaged uv-fits\n if self.split_time_aver > 2:\n try:\n pypima.pima.fits_to_txt(out_fits_path)\n except subprocess.SubprocessError:\n self._error(\"fits_to_radplot failed\")\n\n if self.run_id > 0:\n with UVFits(out_fits_path) as uvfits_file:\n self.db.uvfits2db(uvfits_file, b1950_name, self.run_id)\n\n def fringes2db(self):\n \"\"\"\n Put fringe fitting information to the database.\n\n \"\"\"\n if self.run_id > 0 and self.fri:\n self.db.fri2db(self.fri, self.pima.exper_info, self.run_id)\n\n def delete_uvfits(self):\n \"\"\"\n Delete UV-FITS file.\n\n \"\"\"\n # Delete FITS file in `data_dir` only\n if (\n isinstance(self.uv_fits, str)\n and os.path.isfile(self.uv_fits)\n and self.uv_fits.startswith(self.data_dir)\n ):\n os.remove(self.uv_fits)\n\n # Delete data directory if empty\n try:\n os.rmdir(self.data_dir)\n except OSError:\n pass\n\n # Delete staging directory\n staging_dir = self.pima.cnt_params[\"STAGING_DIR:\"]\n if os.path.isdir(staging_dir):\n shutil.rmtree(staging_dir)\n\n def generate_autospectra(self, plot=False, out_dir=None, db=False) -> None:\n \"\"\"\n Generate autocorrelation spectrum.\n\n Thist function generates autocorrelation spectrum for each station for each\n scan using ``acta`` **PIMA** task and fill `self.acta_files` dict.\n\n Parameters\n ----------\n plot : bool\n If ``True`` plot autospectra.\n out_dir : str\n Plot output directory.\n db : bool\n If ``True`` store autospectra to the database.\n\n \"\"\"\n if self.acta_files:\n self.logger.debug(\"acta has already been called\")\n return\n\n for polar in (\"RR\", \"LL\"):\n # Sometimes PIMA crashes on `acta` task\n try:\n file_list = self.pima.acta(params={\"POLAR:\": polar})\n except pypima.pima.Error:\n # Remove core dump file.\n if os.path.isfile(\"core\"):\n os.remove(\"core\")\n\n return\n\n utc_tai = self.pima.exper_info[\"utc_minus_tai\"]\n\n for file_name in file_list:\n acta_file = ActaFile(file_name, polar, utc_tai)\n\n sta = acta_file.header[\"station\"]\n scan_name = acta_file.header[\"scan_name\"]\n\n assert (polar, scan_name, sta) not in self.acta_files\n\n self.acta_files[polar, scan_name, sta] = acta_file\n\n if plot:\n acta_file.plot(out_dir)\n\n if db:\n self.db.autospec2db(acta_file)\n\n\ndef _download_it(url, buffer, max_retries=0, ftp_user=None):\n \"\"\"\n Download data from `url` and write it to `buffer` using pycurl.\n\n Parameters\n ----------\n url : str\n URL\n buffer : object\n Object with `write` function. For inctance, BytesIO or file descriptor.\n max_retries : int\n Number of attempts to download.\n\n \"\"\"\n done = False\n retries = 0\n\n curl = pycurl.Curl()\n curl.setopt(pycurl.URL, url)\n curl.setopt(pycurl.CONNECTTIMEOUT, 30)\n curl.setopt(pycurl.LOW_SPEED_LIMIT, 10000)\n curl.setopt(pycurl.LOW_SPEED_TIME, 60)\n curl.setopt(pycurl.NETRC, 1)\n if ftp_user:\n curl.setopt(pycurl.USERNAME, ftp_user)\n curl.setopt(pycurl.WRITEDATA, buffer)\n\n while not done:\n try:\n curl.perform()\n except pycurl.error as err:\n errno, errstr = err.args\n\n if errno == pycurl.E_OPERATION_TIMEDOUT and retries < max_retries:\n retries += 1\n\n # Try to continue data transfer\n curl.setopt(pycurl.RESUME_FROM_LARGE, buffer.tell())\n else:\n raise\n else:\n done = True\n\n curl.close()\n" ]
[ [ "numpy.median" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shunzgim/PyQC
[ "8bcbb5b6c5990cac578b2645c558a1fdac29bc1f" ]
[ "examples/HHL.py" ]
[ "from pyqc import *\nimport numpy as np\n\n\nif __name__ == '__main__':\n A = np.array([[1.5, 0.5],\n [0.5, 1.5]])\n t = 2*np.pi\n r = 2**4\n ########## step1 ########### 申请后端模拟器\n qubit_nums = 4\n env = Environment(simType._FULL_AMPLITUDE) \n q = env.allocateQubits(qubit_nums)\n ########## step2 ########### 设置量子线路\n qcirc = env.quantum_circuit\n \"\"\"\n #相位估计\n qcirc.insert(H,target=q[3])\n qcirc.insert(CNOT, target=q[2], control=q[3])\n qcirc.insert(CNOT, target=q[1], control=q[2])\n qcirc.insert(X,target=q[2])\n qcirc.insert(Swap, target=[q[1],q[2]])\n #提取占比\n qcirc.insert(CUOne(Ry(Var(np.pi))), target=q[0], control=q[2])\n qcirc.insert(CUOne(Ry(Var(np.pi/3))), target=q[0], control=q[1])\n #逆相位估计\n qcirc.insert(Swap, target=[q[1],q[2]])\n qcirc.insert(X,target=q[2])\n qcirc.insert(CNOT, target=q[1], control=q[2])\n qcirc.insert(CNOT, target=q[2], control=q[3])\n qcirc.insert(H,target=q[3])\n \"\"\"\n #相位估计\n qcirc.insert(H,target=q[1])\n qcirc.insert(H,target=q[2])\n qcirc.insert(CUOne(HSimOneGate(A,t,1/4)), target=q[3], control=q[2])\n qcirc.insert(CUOne(HSimOneGate(A,t,1/2)), target=q[3], control=q[1])\n qcirc.insert(Swap, target=[q[1],q[2]])\n qcirc.insert(H, target=q[2])\n qcirc.insert(CRDag(Var(0.5*np.pi)), target=q[1], control=q[2])\n qcirc.insert(H, target=q[1])\n #提取占比\n #qcirc.insert(Swap, target=[q[1],q[2]])\n #qcirc.insert(CUOne(Ry(Var(2*np.pi/r))), target=q[0], control=q[1])\n #qcirc.insert(CUOne(Ry(Var(np.pi/r))), target=q[0], control=q[2])\n qcirc.insert(CUOne(Ry(Var(np.pi))), target=q[0], control=q[2])\n qcirc.insert(CUOne(Ry(Var(np.pi/3))), target=q[0], control=q[1])\n #逆相位估计\n qcirc.insert(H, target=q[1])\n qcirc.insert(CR(Var(0.5*np.pi)), target=q[1], control=q[2])\n qcirc.insert(H, target=q[2])\n qcirc.insert(Swap, target=[q[1],q[2]])\n qcirc.insert(CUOne(HSimDagOneGate(A,t,1/2)), target=q[3], control=q[1])\n qcirc.insert(CUOne(HSimDagOneGate(A,t,1/4)), target=q[3], control=q[2])\n qcirc.insert(H,target=q[2])\n qcirc.insert(H,target=q[1])\n #\"\"\"\n for i in range(qubit_nums):\n qcirc.insert(Measure, target=q[i])\n ########## step3 ########### 可视化量子线路(可选)\n qcirc.show()\n ########## step4 ########### 执行线路\n env.exec()\n ########## step5 ########### 返回结果\n res_str = env.getMeasureResult(show=True,name='HHL_result')\n print('sample result:',res_str)" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
we0091234/myYoloxTrain
[ "aa0aba21056b67be1392a60d69ea3245d4c06838" ]
[ "export_onnx.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport argparse\nimport os\nfrom loguru import logger\n\nimport torch\nfrom torch import nn\n\nfrom yolox.exp import get_exp\nfrom yolox.models.network_blocks import SiLU\nfrom yolox.utils import replace_module\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"YOLOX onnx deploy\")\n parser.add_argument(\n \"--output-name\", type=str, default=\"yolox.onnx\", help=\"output name of models\"\n )\n parser.add_argument(\n \"--input\", default=\"images\", type=str, help=\"input node name of onnx model\"\n )\n parser.add_argument(\n \"--output\", default=\"output\", type=str, help=\"output node name of onnx model\"\n )\n parser.add_argument(\n \"-o\", \"--opset\", default=11, type=int, help=\"onnx opset version\"\n )\n parser.add_argument(\"--batch-size\", type=int, default=1, help=\"batch size\")\n parser.add_argument(\"--image_h\", type=int, default=1, help=\"image height\")\n parser.add_argument(\"--image_w\", type=int, default=1, help=\"image width\")\n parser.add_argument(\n \"--dynamic\", action=\"store_true\", help=\"whether the input shape should be dynamic or not\"\n )\n parser.add_argument(\"--no-onnxsim\", action=\"store_true\", help=\"use onnxsim or not\")\n parser.add_argument(\n \"-f\",\n \"--exp_file\",\n default=None,\n type=str,\n help=\"expriment description file\",\n )\n parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n parser.add_argument(\"-n\", \"--name\", type=str, default=None, help=\"model name\")\n parser.add_argument(\"-c\", \"--ckpt\", default=None, type=str, help=\"ckpt path\")\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n\n return parser\n\n\[email protected]\ndef main():\n args = make_parser().parse_args()\n logger.info(\"args value: {}\".format(args))\n exp = get_exp(args.exp_file, args.name)\n exp.merge(args.opts)\n\n if not args.experiment_name:\n args.experiment_name = exp.exp_name\n\n model = exp.get_model()\n if args.ckpt is None:\n file_name = os.path.join(exp.output_dir, args.experiment_name)\n ckpt_file = os.path.join(file_name, \"best_ckpt.pth\")\n else:\n ckpt_file = args.ckpt\n\n # load the model state dict\n ckpt = torch.load(ckpt_file, map_location=\"cpu\")\n\n model.eval()\n if \"model\" in ckpt:\n ckpt = ckpt[\"model\"]\n model.load_state_dict(ckpt)\n model = replace_module(model, nn.SiLU, SiLU)\n model.head.decode_in_inference = True #true 新的onnx,输出是8400*86 False 输出是8400*85\n model.head.onnx_export=True #设为true才行\n\n logger.info(\"loading checkpoint done.\")\n # dummy_input = torch.randn(args.batch_size, 3, exp.test_size[0], exp.test_size[1])\n dummy_input = torch.randn(args.batch_size, 3, args.image_h, args.image_w)\n\n torch.onnx._export(\n model,\n dummy_input,\n args.output_name,\n input_names=[args.input],\n output_names=[args.output],\n dynamic_axes={args.input: {0: 'batch'},\n args.output: {0: 'batch'}} if args.dynamic else None,\n opset_version=args.opset,\n )\n logger.info(\"generated onnx model named {}\".format(args.output_name))\n\n if not args.no_onnxsim:\n import onnx\n\n from onnxsim import simplify\n\n input_shapes = {args.input: list(dummy_input.shape)} if args.dynamic else None\n \n # use onnxsimplify to reduce reduent model.\n onnx_model = onnx.load(args.output_name)\n model_simp, check = simplify(onnx_model,\n dynamic_input_shape=args.dynamic,\n input_shapes=input_shapes)\n assert check, \"Simplified ONNX model could not be validated\"\n onnx.save(model_simp, args.output_name)\n logger.info(\"generated simplified onnx model named {}\".format(args.output_name))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.randn", "torch.onnx._export", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jayholman/vmaf
[ "0bba4faf68ab89e38314cc596e6908b4fb83984d" ]
[ "python/src/vmaf/tools/reader.py" ]
[ "__copyright__ = \"Copyright 2016-2018, Netflix, Inc.\"\n__license__ = \"Apache, Version 2.0\"\n\nimport os\n\nimport numpy as np\n\nclass YuvReader(object):\n\n SUPPORTED_YUV_8BIT_TYPES = ['yuv420p',\n 'yuv422p',\n 'yuv444p',\n ]\n\n SUPPORTED_YUV_10BIT_LE_TYPES = ['yuv420p10le',\n 'yuv422p10le',\n 'yuv444p10le',\n ]\n\n # ex: for yuv420p, the width and height of U/V is 0.5x, 0.5x of Y\n UV_WIDTH_HEIGHT_MULTIPLIERS_DICT = {'yuv420p': (0.5, 0.5),\n 'yuv422p': (0.5, 1.0),\n 'yuv444p': (1.0, 1.0),\n 'yuv420p10le': (0.5, 0.5),\n 'yuv422p10le': (0.5, 1.0),\n 'yuv444p10le': (1.0, 1.0),\n }\n\n def __init__(self, filepath, width, height, yuv_type):\n\n self.filepath = filepath\n self.width = width\n self.height = height\n self.yuv_type = yuv_type\n\n self._asserts()\n\n # TODO python3: this doesn't work well with python3, need to refactor this class\n self.file = open(self.filepath, 'rb')\n\n def close(self):\n self.file.close()\n\n # make YuvReader withable, e.g.:\n # with YuvReader(...) as yuv_reader:\n # ...\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n # make YuvReader iterable, e.g.:\n # for y, u, v in yuv_reader:\n # ...\n def __iter__(self):\n return self\n\n def next(self):\n \"\"\"next() is for python2 only, in python3 all you need to define is __next__(self)\"\"\"\n return self.__next__()\n\n @property\n def num_bytes(self):\n self._assert_file_exist()\n return os.path.getsize(self.filepath)\n\n @property\n def num_frms(self):\n w_multiplier, h_multiplier = self._get_uv_width_height_multiplier()\n\n if self._is_10bitle():\n num_frms = float(self.num_bytes) / self.width / self.height / (1.0 + w_multiplier * h_multiplier * 2) / 2\n\n elif self._is_8bit():\n num_frms = float(self.num_bytes) / self.width / self.height / (1.0 + w_multiplier * h_multiplier * 2)\n\n else:\n assert False\n\n assert num_frms.is_integer(), 'Number of frames is not integer: {}'.format(num_frms)\n\n return int(num_frms)\n\n def _get_uv_width_height_multiplier(self):\n self._assert_yuv_type()\n return self.UV_WIDTH_HEIGHT_MULTIPLIERS_DICT[self.yuv_type]\n\n def _assert_yuv_type(self):\n assert (self.yuv_type in self.SUPPORTED_YUV_8BIT_TYPES\n or self.yuv_type in self.SUPPORTED_YUV_10BIT_LE_TYPES), \\\n 'Unsupported YUV type: {}'.format(self.yuv_type)\n\n def _assert_file_exist(self):\n assert os.path.exists(self.filepath), \\\n \"File does not exist: {}\".format(self.filepath)\n\n def _asserts(self):\n\n # assert YUV type\n self._assert_yuv_type()\n\n # assert file exists\n self._assert_file_exist()\n\n # assert file size: if consists of integer number of frames\n assert isinstance(self.num_frms, int)\n\n def _is_8bit(self):\n return self.yuv_type in self.SUPPORTED_YUV_8BIT_TYPES\n\n def _is_10bitle(self):\n return self.yuv_type in self.SUPPORTED_YUV_10BIT_LE_TYPES\n\n def __next__(self):\n y_width = self.width\n y_height = self.height\n uv_w_multiplier, uv_h_multiplier = self._get_uv_width_height_multiplier()\n uv_width = int(y_width * uv_w_multiplier)\n uv_height = int(y_height * uv_h_multiplier)\n\n if self._is_10bitle():\n pix_type = np.uint16\n elif self._is_8bit():\n pix_type = np.uint8\n else:\n assert False\n\n y = np.fromfile(self.file, pix_type, count=y_width*y_height)\n if y.size == 0:\n raise StopIteration\n u = np.fromfile(self.file, pix_type, count=uv_width*uv_height)\n if u.size == 0:\n raise StopIteration\n v = np.fromfile(self.file, pix_type, count=uv_width*uv_height)\n if v.size == 0:\n raise StopIteration\n\n y = y.reshape(y_height, y_width)\n u = u.reshape(uv_height, uv_width)\n v = v.reshape(uv_height, uv_width)\n\n if self._is_10bitle():\n return y.astype(np.double) / 4.0, u.astype(np.double) / 4.0, v.astype(np.double) / 4.0\n\n elif self._is_8bit():\n return y.astype(np.double), u.astype(np.double), v.astype(np.double)\n\n else:\n assert False\n" ]
[ [ "numpy.fromfile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cclauss/MORAN_v2
[ "ae6b7b54d38c4eb8c0da34c923bca1e569f12a08" ]
[ "models/morn.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport numpy as np\n\nclass MORN(nn.Module):\n def __init__(self, nc, targetH, targetW, inputDataType='torch.cuda.FloatTensor', maxBatch=256):\n super(MORN, self).__init__()\n self.targetH = targetH\n self.targetW = targetW\n self.inputDataType = inputDataType\n self.maxBatch = maxBatch\n\n self.cnn = nn.Sequential(\n nn.MaxPool2d(2, 2), \n nn.Conv2d(nc, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True), nn.MaxPool2d(2, 2),\n nn.Conv2d(64, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(True), nn.MaxPool2d(2, 2),\n nn.Conv2d(128, 64, 3, 1, 1), nn.BatchNorm2d(64), nn.ReLU(True), \n nn.Conv2d(64, 16, 3, 1, 1), nn.BatchNorm2d(16), nn.ReLU(True), \n nn.Conv2d(16, 1, 3, 1, 1), nn.BatchNorm2d(1)\n )\n\n self.pool = nn.MaxPool2d(2, 1)\n\n h_list = np.arange(self.targetH)*2./(self.targetH-1)-1\n w_list = np.arange(self.targetW)*2./(self.targetW-1)-1\n\n grid = np.meshgrid(\n w_list, \n h_list, \n indexing='ij'\n )\n grid = np.stack(grid, axis=-1)\n grid = np.transpose(grid, (1, 0, 2))\n grid = np.expand_dims(grid, 0)\n grid = np.tile(grid, [maxBatch, 1, 1, 1])\n grid = torch.from_numpy(grid).type(self.inputDataType).cuda()\n self.grid = Variable(grid, requires_grad=False)\n self.grid_x = self.grid[:, :, :, 0].unsqueeze(3)\n self.grid_y = self.grid[:, :, :, 1].unsqueeze(3)\n\n def forward(self, x, test, enhance=1, debug=False):\n\n if not test and np.random.random() > 0.5:\n return nn.functional.upsample(x, size=(self.targetH, self.targetW), mode='bilinear')\n if not test:\n enhance = 0\n\n assert x.size(0) <= self.maxBatch\n assert x.data.type() == self.inputDataType\n\n grid = self.grid[:x.size(0)]\n grid_x = self.grid_x[:x.size(0)]\n grid_y = self.grid_y[:x.size(0)]\n x_small = nn.functional.upsample(x, size=(self.targetH, self.targetW), mode='bilinear')\n\n offsets = self.cnn(x_small)\n offsets_posi = nn.functional.relu(offsets, inplace=False)\n offsets_nega = nn.functional.relu(-offsets, inplace=False)\n offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)\n\n offsets_grid = nn.functional.grid_sample(offsets_pool, grid)\n offsets_grid = offsets_grid.permute(0, 2, 3, 1).contiguous()\n offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)\n x_rectified = nn.functional.grid_sample(x, offsets_x)\n\n for iteration in range(enhance):\n offsets = self.cnn(x_rectified)\n\n offsets_posi = nn.functional.relu(offsets, inplace=False)\n offsets_nega = nn.functional.relu(-offsets, inplace=False)\n offsets_pool = self.pool(offsets_posi) - self.pool(offsets_nega)\n\n offsets_grid += nn.functional.grid_sample(offsets_pool, grid).permute(0, 2, 3, 1).contiguous()\n offsets_x = torch.cat([grid_x, grid_y + offsets_grid], 3)\n x_rectified = nn.functional.grid_sample(x, offsets_x)\n\n if debug:\n\n offsets_mean = torch.mean(offsets_grid.view(x.size(0), -1), 1)\n offsets_max, _ = torch.max(offsets_grid.view(x.size(0), -1), 1)\n offsets_min, _ = torch.min(offsets_grid.view(x.size(0), -1), 1)\n\n import matplotlib.pyplot as plt\n from colour import Color\n from torchvision import transforms\n import cv2\n\n alpha = 0.7\n density_range = 256\n color_map = np.empty([self.targetH, self.targetW, 3], dtype=int)\n cmap = plt.get_cmap(\"rainbow\")\n blue = Color(\"blue\")\n hex_colors = list(blue.range_to(Color(\"red\"), density_range))\n rgb_colors = [[rgb * 255 for rgb in color.rgb] for color in hex_colors][::-1]\n to_pil_image = transforms.ToPILImage()\n\n for i in range(x.size(0)):\n\n img_small = x_small[i].data.cpu().mul_(0.5).add_(0.5)\n img = to_pil_image(img_small)\n img = np.array(img)\n if len(img.shape) == 2:\n img = cv2.merge([img.copy()]*3)\n img_copy = img.copy()\n\n v_max = offsets_max.data[i]\n v_min = offsets_min.data[i]\n img_offsets = (offsets_grid[i]).view(1, self.targetH, self.targetW).data.cpu().add_(-v_min).mul_(1./(v_max-v_min))\n img_offsets = to_pil_image(img_offsets)\n img_offsets = np.array(img_offsets)\n color_map = np.empty([self.targetH, self.targetW, 3], dtype=int)\n for h_i in range(self.targetH):\n for w_i in range(self.targetW):\n color_map[h_i][w_i] = rgb_colors[int(img_offsets[h_i, w_i]/256.*density_range)]\n color_map = color_map.astype(np.uint8)\n cv2.addWeighted(color_map, alpha, img_copy, 1-alpha, 0, img_copy)\n\n img_processed = x_rectified[i].data.cpu().mul_(0.5).add_(0.5)\n img_processed = to_pil_image(img_processed)\n img_processed = np.array(img_processed)\n if len(img_processed.shape) == 2:\n img_processed = cv2.merge([img_processed.copy()]*3)\n\n total_img = np.ones([self.targetH, self.targetW*3+10, 3], dtype=int)*255\n total_img[0:self.targetH, 0:self.targetW] = img\n total_img[0:self.targetH, self.targetW+5:2*self.targetW+5] = img_copy\n total_img[0:self.targetH, self.targetW*2+10:3*self.targetW+10] = img_processed\n total_img = cv2.resize(total_img.astype(np.uint8), (300, 50))\n # cv2.imshow(\"Input_Offsets_Output\", total_img)\n # cv2.waitKey()\n\n return x_rectified, total_img\n\n return x_rectified\n" ]
[ [ "torch.nn.functional.upsample", "numpy.expand_dims", "torch.cat", "matplotlib.pyplot.get_cmap", "torch.autograd.Variable", "numpy.arange", "torch.from_numpy", "numpy.stack", "torch.nn.functional.relu", "torch.nn.Conv2d", "torch.nn.BatchNorm2d", "numpy.transpose", "numpy.array", "numpy.meshgrid", "numpy.random.random", "numpy.tile", "numpy.ones", "torch.nn.MaxPool2d", "torch.nn.functional.grid_sample", "torch.nn.ReLU", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ashhadulislam/smote_variants
[ "7c0bc1b5d93e28bda053f2c0ac8648186de865a4" ]
[ "smote_variants/smote_v_ashhad.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 15 11:15:24 2018\n\n@author: gykovacs\n\"\"\"\n\nprint(\"Imported the sv again\")\n\n# import system packages\nimport os\nimport pickle\nimport itertools\nimport logging\nimport re\nimport time\nimport glob\nimport inspect\n\n# used to parallelize evaluation\nfrom joblib import Parallel, delayed\n\n# numerical methods and arrays\nimport numpy as np\nimport pandas as pd\n\n# import packages used for the implementation of sampling methods\nfrom sklearn.model_selection import (RepeatedStratifiedKFold, KFold,\n cross_val_score, StratifiedKFold)\nfrom sklearn.neighbors import NearestNeighbors, KNeighborsClassifier\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.metrics import (log_loss, roc_auc_score, accuracy_score,\n confusion_matrix, f1_score)\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.cluster import KMeans, AgglomerativeClustering, DBSCAN\nfrom sklearn.manifold import LocallyLinearEmbedding, TSNE, Isomap\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.base import clone, BaseEstimator, ClassifierMixin\n\n# some statistical methods\nfrom scipy.stats import skew\nimport scipy.signal as ssignal\nimport scipy.spatial as sspatial\nimport scipy.optimize as soptimize\nimport scipy.special as sspecial\nfrom scipy.stats.mstats import gmean\n\n# from ._version import __version__\n\n\nimport numpy as np\nimport random as rd\nimport math\nfrom math import sqrt\n\n\n__author__ = \"György Kovács\"\n__license__ = \"MIT\"\n__email__ = \"[email protected]\"\n\n# for handler in _logger.root.handlers[:]:\n# _logger.root.removeHandler(handler)\n\n# setting the _logger format\n_logger = logging.getLogger('smote_variants')\n_logger.setLevel(logging.DEBUG)\n_logger_ch = logging.StreamHandler()\n_logger_ch.setFormatter(logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(message)s\"))\n_logger.addHandler(_logger_ch)\n\n# exported names\n__all__ = ['__author__',\n '__license__',\n '__version__',\n '__email__',\n 'get_all_oversamplers',\n 'get_all_noisefilters',\n 'get_n_quickest_oversamplers',\n 'get_all_oversamplers_multiclass',\n 'get_n_quickest_oversamplers_multiclass',\n 'evaluate_oversamplers',\n 'read_oversampling_results',\n 'model_selection',\n 'cross_validate',\n 'MLPClassifierWrapper',\n 'OverSampling',\n 'NoiseFilter',\n 'TomekLinkRemoval',\n 'CondensedNearestNeighbors',\n 'OneSidedSelection',\n 'CNNTomekLinks',\n 'NeighborhoodCleaningRule',\n 'EditedNearestNeighbors',\n 'JUST_DUPLI', \n 'KNNOR_SMOTE',\n 'SMOTE',\n 'SMOTE_TomekLinks',\n 'SMOTE_ENN',\n 'Borderline_SMOTE1',\n 'Borderline_SMOTE2',\n 'ADASYN',\n 'AHC',\n 'LLE_SMOTE',\n 'distance_SMOTE',\n 'SMMO',\n 'polynom_fit_SMOTE',\n 'Stefanowski',\n 'ADOMS',\n 'Safe_Level_SMOTE',\n 'MSMOTE',\n 'DE_oversampling',\n 'SMOBD',\n 'SUNDO',\n 'MSYN',\n 'SVM_balance',\n 'TRIM_SMOTE',\n 'SMOTE_RSB',\n 'ProWSyn',\n 'SL_graph_SMOTE',\n 'NRSBoundary_SMOTE',\n 'LVQ_SMOTE',\n 'SOI_CJ',\n 'ROSE',\n 'SMOTE_OUT',\n 'SMOTE_Cosine',\n 'Selected_SMOTE',\n 'LN_SMOTE',\n 'MWMOTE',\n 'PDFOS',\n 'IPADE_ID',\n 'RWO_sampling',\n 'NEATER',\n 'DEAGO',\n 'Gazzah',\n 'MCT',\n 'ADG',\n 'SMOTE_IPF',\n 'KernelADASYN',\n 'MOT2LD',\n 'V_SYNTH',\n 'OUPS',\n 'SMOTE_D',\n 'SMOTE_PSO',\n 'CURE_SMOTE',\n 'SOMO',\n 'ISOMAP_Hybrid',\n 'CE_SMOTE',\n 'Edge_Det_SMOTE',\n 'CBSO',\n 'E_SMOTE',\n 'DBSMOTE',\n 'ASMOBD',\n 'Assembled_SMOTE',\n 'SDSMOTE',\n 'DSMOTE',\n 'G_SMOTE',\n 'NT_SMOTE',\n 'Lee',\n 'SPY',\n 'SMOTE_PSOBAT',\n 'MDO',\n 'Random_SMOTE',\n 'ISMOTE',\n 'VIS_RST',\n 'GASMOTE',\n 'A_SUWO',\n 'SMOTE_FRST_2T',\n 'AND_SMOTE',\n 'NRAS',\n 'AMSCO',\n 'SSO',\n 'NDO_sampling',\n 'DSRBF',\n 'Gaussian_SMOTE',\n 'kmeans_SMOTE',\n 'Supervised_SMOTE',\n 'SN_SMOTE',\n 'CCR',\n 'ANS',\n 'cluster_SMOTE',\n 'NoSMOTE',\n 'MulticlassOversampling',\n 'OversamplingClassifier']\n\n\ndef get_all_oversamplers():\n \"\"\"\n Returns all oversampling classes\n\n Returns:\n list(OverSampling): list of all oversampling classes\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_all_oversamplers()\n \"\"\"\n\n return OverSampling.__subclasses__()\n\n\ndef get_n_quickest_oversamplers(n=10):\n \"\"\"\n Returns the n quickest oversamplers based on testing on the datasets of\n the imbalanced_databases package.\n\n Args:\n n (int): number of oversamplers to return\n\n Returns:\n list(OverSampling): list of the n quickest oversampling classes\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_n_quickest_oversamplers(10)\n \"\"\"\n\n runtimes = {'SPY': 0.11, 'OUPS': 0.16, 'SMOTE_D': 0.20, 'NT_SMOTE': 0.20,\n 'Gazzah': 0.21, 'ROSE': 0.25, 'NDO_sampling': 0.27,\n 'Borderline_SMOTE1': 0.28, 'SMOTE': 0.28,\n 'Borderline_SMOTE2': 0.29, 'ISMOTE': 0.30, 'SMMO': 0.31,\n 'SMOTE_OUT': 0.37, 'SN_SMOTE': 0.44, 'Selected_SMOTE': 0.47,\n 'distance_SMOTE': 0.47, 'Gaussian_SMOTE': 0.48, 'MCT': 0.51,\n 'Random_SMOTE': 0.57, 'ADASYN': 0.58, 'SL_graph_SMOTE': 0.58,\n 'CURE_SMOTE': 0.59, 'ANS': 0.63, 'MSMOTE': 0.72,\n 'Safe_Level_SMOTE': 0.79, 'SMOBD': 0.80, 'CBSO': 0.81,\n 'Assembled_SMOTE': 0.82, 'SDSMOTE': 0.88,\n 'SMOTE_TomekLinks': 0.91, 'Edge_Det_SMOTE': 0.94,\n 'ProWSyn': 1.00, 'Stefanowski': 1.04, 'NRAS': 1.06,\n 'AND_SMOTE': 1.13, 'DBSMOTE': 1.17, 'polynom_fit_SMOTE': 1.18,\n 'ASMOBD': 1.18, 'MDO': 1.18, 'SOI_CJ': 1.24, 'LN_SMOTE': 1.26,\n 'VIS_RST': 1.34, 'TRIM_SMOTE': 1.36, 'LLE_SMOTE': 1.62,\n 'SMOTE_ENN': 1.86, 'SMOTE_Cosine': 2.00, 'kmeans_SMOTE': 2.43,\n 'MWMOTE': 2.45, 'V_SYNTH': 2.59, 'A_SUWO': 2.81,\n 'RWO_sampling': 2.91, 'SMOTE_RSB': 3.88, 'ADOMS': 3.89,\n 'SMOTE_IPF': 4.10, 'Lee': 4.16, 'SMOTE_FRST_2T': 4.18,\n 'cluster_SMOTE': 4.19, 'SOMO': 4.30, 'DE_oversampling': 4.67,\n 'CCR': 4.72, 'NRSBoundary_SMOTE': 5.26, 'AHC': 5.27,\n 'ISOMAP_Hybrid': 6.11, 'LVQ_SMOTE': 6.99, 'CE_SMOTE': 7.45,\n 'MSYN': 11.92, 'PDFOS': 15.14, 'KernelADASYN': 17.87,\n 'G_SMOTE': 19.23, 'E_SMOTE': 19.50, 'SVM_balance': 24.05,\n 'SUNDO': 26.21, 'GASMOTE': 31.38, 'DEAGO': 33.39,\n 'NEATER': 41.39, 'SMOTE_PSO': 45.12, 'IPADE_ID': 90.01,\n 'DSMOTE': 146.73, 'MOT2LD': 149.42, 'Supervised_SMOTE': 195.74,\n 'SSO': 215.27, 'DSRBF': 272.11, 'SMOTE_PSOBAT': 324.31,\n 'ADG': 493.64, 'AMSCO': 1502.36}\n\n samplers = get_all_oversamplers()\n samplers = sorted(\n samplers, key=lambda x: runtimes.get(x.__name__, 1e8))\n\n return samplers[:n]\n\n\ndef get_all_oversamplers_multiclass(strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Returns all oversampling classes which can be used with the multiclass\n strategy specified\n\n Args:\n strategy (str): the multiclass oversampling strategy -\n 'eq_1_vs_many_successive'/'equalize_1_vs_many'\n\n Returns:\n list(OverSampling): list of all oversampling classes which can be used\n with the multiclass strategy specified\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_all_oversamplers_multiclass()\n \"\"\"\n\n oversamplers = get_all_oversamplers()\n\n if (strategy == 'eq_1_vs_many_successive' or\n strategy == 'equalize_1_vs_many'):\n\n def multiclass_filter(o):\n return ((OverSampling.cat_changes_majority not in o.categories) or\n ('proportion' in o().get_params()))\n\n return [o for o in oversamplers if multiclass_filter(o)]\n else:\n raise ValueError((\"It is not known which oversamplers work with the\"\n \" strategy %s\") % strategy)\n\n\ndef get_n_quickest_oversamplers_multiclass(n,\n strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Returns the n quickest oversamplers based on testing on the datasets of\n the imbalanced_databases package, and suitable for using the multiclass\n strategy specified.\n\n Args:\n n (int): number of oversamplers to return\n strategy (str): the multiclass oversampling strategy -\n 'eq_1_vs_many_successive'/'equalize_1_vs_many'\n\n Returns:\n list(OverSampling): list of n quickest oversampling classes which can\n be used with the multiclass strategy specified\n\n Example::\n\n import smote_variants as sv\n\n oversamplers= sv.get_n_quickest_oversamplers_multiclass()\n \"\"\"\n\n oversamplers = get_all_oversamplers()\n quickest_oversamplers = get_n_quickest_oversamplers(len(oversamplers))\n\n if (strategy == 'eq_1_vs_many_successive'\n or strategy == 'equalize_1_vs_many'):\n\n def multiclass_filter(o):\n return ((OverSampling.cat_changes_majority not in o.categories) or\n ('proportion' in o().get_params()))\n\n return [o for o in quickest_oversamplers if multiclass_filter(o)][:n]\n else:\n raise ValueError(\"It is not known which oversamplers work with the\"\n \" strategy %s\" % strategy)\n\n\ndef get_all_noisefilters():\n \"\"\"\n Returns all noise filters\n Returns:\n list(NoiseFilter): list of all noise filter classes\n \"\"\"\n return NoiseFilter.__subclasses__()\n\n\ndef mode(data):\n values, counts = np.unique(data, return_counts=True)\n return values[np.where(counts == max(counts))[0][0]]\n\n\nclass StatisticsMixin:\n \"\"\"\n Mixin to compute class statistics and determine minority/majority labels\n \"\"\"\n\n def class_label_statistics(self, X, y):\n \"\"\"\n determines class sizes and minority and majority labels\n Args:\n X (np.array): features\n y (np.array): target labels\n \"\"\"\n unique, counts = np.unique(y, return_counts=True)\n self.class_stats = dict(zip(unique, counts))\n self.min_label = unique[0] if counts[0] < counts[1] else unique[1]\n self.maj_label = unique[1] if counts[0] < counts[1] else unique[0]\n # shorthands\n self.min_label = self.min_label\n self.maj_label = self.maj_label\n\n def check_enough_min_samples_for_sampling(self, threshold=2):\n if self.class_stats[self.min_label] < threshold:\n m = (\"The number of minority samples (%d) is not enough \"\n \"for sampling\")\n m = m % self.class_stats[self.min_label]\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return False\n return True\n\n\nclass RandomStateMixin:\n \"\"\"\n Mixin to set random state\n \"\"\"\n\n def set_random_state(self, random_state):\n \"\"\"\n sets the random_state member of the object\n\n Args:\n random_state (int/np.random.RandomState/None): the random state\n initializer\n \"\"\"\n\n self._random_state_init = random_state\n\n if random_state is None:\n self.random_state = np.random\n elif isinstance(random_state, int):\n self.random_state = np.random.RandomState(random_state)\n elif isinstance(random_state, np.random.RandomState):\n self.random_state = random_state\n elif random_state is np.random:\n self.random_state = random_state\n else:\n raise ValueError(\n \"random state cannot be initialized by \" + str(random_state))\n\n\nclass ParameterCheckingMixin:\n \"\"\"\n Mixin to check if parameters come from a valid range\n \"\"\"\n\n def check_in_range(self, x, name, r):\n \"\"\"\n Check if parameter is in range\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n r (list-like(2)): the lower and upper bound of a range\n Throws:\n ValueError\n \"\"\"\n if x < r[0] or x > r[1]:\n m = (\"Value for parameter %s outside the range [%f,%f] not\"\n \" allowed: %f\")\n m = m % (name, r[0], r[1], x)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_out_range(self, x, name, r):\n \"\"\"\n Check if parameter is outside of range\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n r (list-like(2)): the lower and upper bound of a range\n Throws:\n ValueError\n \"\"\"\n if x >= r[0] and x <= r[1]:\n m = \"Value for parameter %s in the range [%f,%f] not allowed: %f\"\n m = m % (name, r[0], r[1], x)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_or_equal(self, x, name, val):\n \"\"\"\n Check if parameter is less than or equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x > val:\n m = \"Value for parameter %s greater than %f not allowed: %f > %f\"\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_or_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x > y:\n m = (\"Value for parameter %s greater than parameter %s not\"\n \" allowed: %f > %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less(self, x, name, val):\n \"\"\"\n Check if parameter is less than value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x >= val:\n m = (\"Value for parameter %s greater than or equal to %f\"\n \" not allowed: %f >= %f\")\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_less_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x >= y:\n m = (\"Value for parameter %s greater than or equal to parameter\"\n \" %s not allowed: %f >= %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_or_equal(self, x, name, val):\n \"\"\"\n Check if parameter is greater than or equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x < val:\n m = \"Value for parameter %s less than %f is not allowed: %f < %f\"\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_or_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is less than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x < y:\n m = (\"Value for parameter %s less than parameter %s is not\"\n \" allowed: %f < %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater(self, x, name, val):\n \"\"\"\n Check if parameter is greater than value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x <= val:\n m = (\"Value for parameter %s less than or equal to %f not allowed\"\n \" %f < %f\")\n m = m % (name, val, x, val)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_greater_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is greater than or equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x <= y:\n m = (\"Value for parameter %s less than or equal to parameter %s\"\n \" not allowed: %f <= %f\")\n m = m % (name_x, name_y, x, y)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_equal(self, x, name, val):\n \"\"\"\n Check if parameter is equal to value\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n val (numeric): value to compare to\n Throws:\n ValueError\n \"\"\"\n if x == val:\n m = (\"Value for parameter %s equal to parameter %f is not allowed:\"\n \" %f == %f\")\n m = m % (name, val, x, val)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_equal_par(self, x, name_x, y, name_y):\n \"\"\"\n Check if parameter is equal to another parameter\n Args:\n x (numeric): the parameter value\n name_x (str): the parameter name\n y (numeric): the other parameter value\n name_y (str): the other parameter name\n Throws:\n ValueError\n \"\"\"\n if x == y:\n m = (\"Value for parameter %s equal to parameter %s is not \"\n \"allowed: %f == %f\")\n m = m % (name_x, name_y, x, y)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_isin(self, x, name, li):\n \"\"\"\n Check if parameter is in list\n Args:\n x (numeric): the parameter value\n name (str): the parameter name\n li (list): list to check if parameter is in it\n Throws:\n ValueError\n \"\"\"\n if x not in li:\n m = \"Value for parameter %s not in list %s is not allowed: %s\"\n m = m % (name, str(li), str(x))\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def check_n_jobs(self, x, name):\n \"\"\"\n Check n_jobs parameter\n Args:\n x (int/None): number of jobs\n name (str): the parameter name\n Throws:\n ValueError\n \"\"\"\n if not ((x is None)\n or (x is not None and isinstance(x, int) and not x == 0)):\n m = \"Value for parameter n_jobs is not allowed: %s\" % str(x)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n\nclass ParameterCombinationsMixin:\n \"\"\"\n Mixin to generate parameter combinations\n \"\"\"\n\n @classmethod\n def generate_parameter_combinations(cls, dictionary, raw):\n \"\"\"\n Generates reasonable paramter combinations\n Args:\n dictionary (dict): dictionary of paramter ranges\n num (int): maximum number of combinations to generate\n \"\"\"\n if raw:\n return dictionary\n keys = sorted(list(dictionary.keys()))\n values = [dictionary[k] for k in keys]\n combinations = [dict(zip(keys, p))\n for p in list(itertools.product(*values))]\n return combinations\n\n\nclass NoiseFilter(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin):\n \"\"\"\n Parent class of noise filtering methods\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n pass\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n Args:\n X (np.array): features\n y (np.array): target labels\n \"\"\"\n pass\n\n def get_params(self, deep=False):\n \"\"\"\n Return parameters\n\n Returns:\n dict: dictionary of parameters\n \"\"\"\n\n return {}\n\n def set_params(self, **params):\n \"\"\"\n Set parameters\n\n Args:\n params (dict): dictionary of parameters\n \"\"\"\n\n for key, value in params.items():\n setattr(self, key, value)\n\n return self\n\n\nclass TomekLinkRemoval(NoiseFilter):\n \"\"\"\n Tomek link removal\n\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, strategy='remove_majority', n_jobs=1):\n \"\"\"\n Constructor of the noise filter.\n\n Args:\n strategy (str): noise removal strategy:\n 'remove_majority'/'remove_both'\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_isin(strategy, 'strategy', [\n 'remove_majority', 'remove_both'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise from dataset\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: dataset after noise removal\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # using 2 neighbors because the first neighbor is the point itself\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n distances, indices = nn.fit(X).kneighbors(X)\n\n # identify links\n links = []\n for i in range(len(indices)):\n if indices[indices[i][1]][1] == i:\n if not y[indices[i][1]] == y[indices[indices[i][1]][1]]:\n links.append((i, indices[i][1]))\n\n # determine links to be removed\n to_remove = []\n for li in links:\n if self.strategy == 'remove_majority':\n if y[li[0]] == self.min_label:\n to_remove.append(li[1])\n else:\n to_remove.append(li[0])\n elif self.strategy == 'remove_both':\n to_remove.append(li[0])\n to_remove.append(li[1])\n else:\n m = 'No Tomek link strategy %s implemented' % self.strategy\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n to_remove = list(set(to_remove))\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n\nclass CondensedNearestNeighbors(NoiseFilter):\n \"\"\"\n Condensed nearest neighbors\n\n References:\n * BibTex::\n\n @ARTICLE{condensed_nn,\n author={Hart, P.},\n journal={IEEE Transactions on Information Theory},\n title={The condensed nearest neighbor rule (Corresp.)},\n year={1968},\n volume={14},\n number={3},\n pages={515-516},\n keywords={Pattern classification},\n doi={10.1109/TIT.1968.1054155},\n ISSN={0018-9448},\n month={May}}\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removing object\n\n Args:\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise from dataset\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: dataset after noise removal\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # Initial result set consists of all minority samples and 1 majority\n # sample\n\n X_maj = X[y == self.maj_label]\n X_hat = np.vstack([X[y == self.min_label], X_maj[0]])\n y_hat = np.hstack([np.repeat(self.min_label, len(X_hat)-1),\n [self.maj_label]])\n X_maj = X_maj[1:]\n\n # Adding misclassified majority elements repeatedly\n while True:\n knn = KNeighborsClassifier(n_neighbors=1, n_jobs=self.n_jobs)\n knn.fit(X_hat, y_hat)\n pred = knn.predict(X_maj)\n\n if np.all(pred == self.maj_label):\n break\n else:\n X_hat = np.vstack([X_hat, X_maj[pred != self.maj_label]])\n y_hat = np.hstack(\n [y_hat,\n np.repeat(self.maj_label, len(X_hat) - len(y_hat))])\n X_maj = np.delete(X_maj, np.where(\n pred != self.maj_label)[0], axis=0)\n if len(X_maj) == 0:\n break\n\n return X_hat, y_hat\n\n\nclass OneSidedSelection(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods\n for Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n t = TomekLinkRemoval(n_jobs=self.n_jobs)\n X0, y0 = t.remove_noise(X, y)\n cnn = CondensedNearestNeighbors(n_jobs=self.n_jobs)\n\n return cnn.remove_noise(X0, y0)\n\n\nclass CNNTomekLinks(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods\n for Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n c = CondensedNearestNeighbors(n_jobs=self.n_jobs)\n X0, y0 = c.remove_noise(X, y)\n t = TomekLinkRemoval(n_jobs=self.n_jobs)\n\n return t.remove_noise(X0, y0)\n\n\nclass NeighborhoodCleaningRule(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n # fitting nearest neighbors with proposed parameter\n # using 4 neighbors because the first neighbor is the point itself\n nn = NearestNeighbors(n_neighbors=4, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n # identifying the samples to be removed\n to_remove = []\n for i in range(len(X)):\n if (y[i] == self.maj_label and\n mode(y[indices[i][1:]]) == self.min_label):\n # if sample i is majority and the decision based on\n # neighbors is minority\n to_remove.append(i)\n elif (y[i] == self.min_label and\n mode(y[indices[i][1:]]) == self.maj_label):\n # if sample i is minority and the decision based on\n # neighbors is majority\n for j in indices[i][1:]:\n if y[j] == self.maj_label:\n to_remove.append(j)\n\n # removing the noisy samples and returning the results\n to_remove = list(set(to_remove))\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n\nclass EditedNearestNeighbors(NoiseFilter):\n \"\"\"\n References:\n * BibTex::\n\n @article{smoteNoise0,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA}\n }\n \"\"\"\n\n def __init__(self, remove='both', n_jobs=1):\n \"\"\"\n Constructor of the noise removal object\n\n Args:\n remove (str): class to remove from 'both'/'min'/'maj'\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n\n self.check_isin(remove, 'remove', ['both', 'min', 'maj'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.remove = remove\n self.n_jobs = n_jobs\n\n def remove_noise(self, X, y):\n \"\"\"\n Removes noise\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n\n Returns:\n np.matrix, np.array: cleaned features and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running noise removal via %s\" % self.__class__.__name__)\n self.class_label_statistics(X, y)\n\n if len(X) < 4:\n _logger.info(self.__class__.__name__ + ': ' +\n \"Not enough samples for noise removal\")\n return X.copy(), y.copy()\n\n nn = NearestNeighbors(n_neighbors=4, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n to_remove = []\n for i in range(len(X)):\n if not y[i] == mode(y[indices[i][1:]]):\n if (self.remove == 'both' or\n (self.remove == 'min' and y[i] == self.min_label) or\n (self.remove == 'maj' and y[i] == self.maj_label)):\n to_remove.append(i)\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n def get_params(self):\n \"\"\"\n Get noise removal parameters\n\n Returns:\n dict: dictionary of parameters\n \"\"\"\n return {'remove': self.remove}\n\n\nclass OverSampling(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin,\n RandomStateMixin):\n \"\"\"\n Base class of oversampling methods\n \"\"\"\n\n categories = []\n\n cat_noise_removal = 'NR'\n cat_dim_reduction = 'DR'\n cat_uses_classifier = 'Clas'\n cat_sample_componentwise = 'SCmp'\n cat_sample_ordinary = 'SO'\n cat_sample_copy = 'SCpy'\n cat_memetic = 'M'\n cat_density_estimation = 'DE'\n cat_density_based = 'DB'\n cat_extensive = 'Ex'\n cat_changes_majority = 'CM'\n cat_uses_clustering = 'Clus'\n cat_borderline = 'BL'\n cat_application = 'A'\n\n def __init__(self):\n pass\n\n def det_n_to_sample(self, strategy, n_maj, n_min):\n \"\"\"\n Determines the number of samples to generate\n Args:\n strategy (str/float): if float, the fraction of the difference\n of the minority and majority numbers to\n generate, like 0.1 means that 10% of the\n difference will be generated if str,\n like 'min2maj', the minority class will\n be upsampled to match the cardinality\n of the majority class\n \"\"\"\n if isinstance(strategy, float) or isinstance(strategy, int):\n return max([0, int((n_maj - n_min)*strategy)])\n else:\n m = \"Value %s for parameter strategy is not supported\" % strategy\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n def sample_between_points(self, x, y):\n \"\"\"\n Sample randomly along the line between two points.\n Args:\n x (np.array): point 1\n y (np.array): point 2\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (y - x)*self.random_state.random_sample()\n\n def sample_between_points_componentwise(self, x, y, mask=None):\n \"\"\"\n Sample each dimension separately between the two points.\n Args:\n x (np.array): point 1\n y (np.array): point 2\n mask (np.array): array of 0,1s - specifies which dimensions\n to sample\n Returns:\n np.array: the new sample being generated\n \"\"\"\n if mask is None:\n return x + (y - x)*self.random_state.random_sample()\n else:\n return x + (y - x)*self.random_state.random_sample()*mask\n\n def sample_by_jittering(self, x, std):\n \"\"\"\n Sample by jittering.\n Args:\n x (np.array): base point\n std (float): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (self.random_state.random_sample() - 0.5)*2.0*std\n\n def sample_by_jittering_componentwise(self, x, std):\n \"\"\"\n Sample by jittering componentwise.\n Args:\n x (np.array): base point\n std (np.array): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return x + (self.random_state.random_sample(len(x))-0.5)*2.0 * std\n\n def sample_by_gaussian_jittering(self, x, std):\n \"\"\"\n Sample by Gaussian jittering\n Args:\n x (np.array): base point\n std (np.array): standard deviation\n Returns:\n np.array: the new sample\n \"\"\"\n return self.random_state.normal(x, std)\n\n def sample(self, X, y):\n \"\"\"\n The samplig function reimplemented in child classes\n Args:\n X (np.matrix): features\n y (np.array): labels\n Returns:\n np.matrix, np.array: sampled X and y\n \"\"\"\n return X, y\n\n def fit_resample(self, X, y):\n \"\"\"\n Alias of the function \"sample\" for compatibility with imbalanced-learn\n pipelines\n \"\"\"\n return self.sample(X, y)\n\n def sample_with_timing(self, X, y):\n begin = time.time()\n X_samp, y_samp = self.sample(X, y)\n _logger.info(self.__class__.__name__ + \": \" +\n (\"runtime: %f\" % (time.time() - begin)))\n return X_samp, y_samp\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transforms new data according to the possible transformation\n implemented by the function \"sample\".\n Args:\n X (np.matrix): features\n Returns:\n np.matrix: transformed features\n \"\"\"\n return X\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the object as a dictionary.\n Returns:\n dict: the parameters of the object\n \"\"\"\n pass\n\n def set_params(self, **params):\n \"\"\"\n Set parameters\n\n Args:\n params (dict): dictionary of parameters\n \"\"\"\n\n for key, value in params.items():\n setattr(self, key, value)\n\n return self\n\n def descriptor(self):\n \"\"\"\n Returns:\n str: JSON description of the current sampling object\n \"\"\"\n return str((self.__class__.__name__, str(self.get_params())))\n\n def __str__(self):\n return self.descriptor()\n\n\nclass UnderSampling(StatisticsMixin,\n ParameterCheckingMixin,\n ParameterCombinationsMixin):\n \"\"\"\n Base class of undersampling approaches.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructorm\n \"\"\"\n super().__init__()\n\n def sample(self, X, y):\n \"\"\"\n Carry out undersampling\n Args:\n X (np.matrix): features\n y (np.array): labels\n Returns:\n np.matrix, np.array: sampled X and y\n \"\"\"\n pass\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the object as a dictionary.\n Returns:\n dict: the parameters of the object\n \"\"\"\n pass\n\n def descriptor(self):\n \"\"\"\n Returns:\n str: JSON description of the current sampling object\n \"\"\"\n return str((self.__class__.__name__, str(self.get_params())))\n\n\nclass NoSMOTE(OverSampling):\n \"\"\"\n The goal of this class is to provide a functionality to send data through\n on any model selection/evaluation pipeline with no oversampling carried\n out. It can be used to get baseline estimates on preformance.\n \"\"\"\n\n categories = []\n\n def __init__(self, random_state=None):\n \"\"\"\n Constructor of the NoSMOTE object.\n\n Args:\n random_state (int/np.random.RandomState/None): dummy parameter for \\\n the compatibility of interfaces\n \"\"\"\n super().__init__()\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({}, raw=False)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {}\n\nprint(\"Updating JUST_DUPLI\")\nclass JUST_DUPLI(OverSampling):\n \n\n categories = [OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,random_state=None):\n \"\"\"\n \n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.proportion = proportion\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n\n parameter_combinations = {'proportion': [0.1,1,2,4,6,8,10]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Too few minority\")\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n num_minority=X_min.shape[0]\n \n X_new_min=[]\n N = n_to_sample\n while N>0:\n for i in range(X_min.shape[0]):\n if N==0:\n break\n# print(\"Using \",i,\"th minority value\")\n v = X_min[i,:]\n m0 = v\n X_new_min.append(m0)\n N-=1\n y_new_min=[self.min_label for i in range(len(X_new_min))] \n X_new_min=np.array(X_new_min)\n X_new_all=np.concatenate((X, X_new_min), axis=0)\n y_new_all=np.concatenate((y, y_new_min), axis=0)\n\n \n\n return X_new_all, y_new_all\n\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'random_state': self._random_state_init}\n\n\n \nprint(\"Updated KNNOR to get sup the neighbors vs minority\")\nclass KNNOR_SMOTE(OverSampling):\n \n\n categories = [OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=0.05,\n# n_maj_neighbors=5, \n n_jobs=1,\n dist_threshold=0.8,\n dist_threshold_majority=0.5,\n max_dist=0.5,\n random_state=None):\n \"\"\"\n Constructor of the KNNOR-SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0\n means that after sampling the number of minority samples will\n be equal to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_maj_neighbors (int): nth majority neighbor to consider while computing distance\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n dist_threshold (float): proportion of minority samples to be used basis distacne to minority samples\n\n dist_threshold_majority(float): proportion of minority samples to be use basis distance to majority samples\n --remove above\n \n max_dist(float): maximum distance between original point and generated point\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater(n_neighbors, \"n_neighbors\", 0)\n# self.check_greater_or_equal(n_maj_neighbors, \"n_maj_neighbors\", 1) \n self.check_n_jobs(n_jobs, 'n_jobs')\n self.check_greater(dist_threshold, \"dist_threshold\", 0)\n# self.check_greater(dist_threshold_majority, \"dist_threshold_majority\", 0)\n self.check_greater(max_dist, \"max_dist\", 0) \n \n \n self.proportion = proportion\n self.n_neighbors = n_neighbors\n# self.n_maj_neighbors = n_maj_neighbors\n self.n_jobs = n_jobs\n self.dist_threshold = dist_threshold\n# self.dist_threshold_majority=dist_threshold_majority\n self.max_dist = max_dist\n\n self.set_random_state(random_state)\n\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\" \n\n \n parameter_combinations = {'proportion': [0.1,0.95,1,1.3,1.4,1.5],\n 'n_neighbors': [1,2,3,4,5,7],\n# 'n_maj_neighbors': [1,2,3,4,5,6],\n 'dist_threshold': [0.1,0.6,0.7,0.8,0.9],\n# 'dist_threshold_majority': [0.1, 0.3, 0.5, 0.7, 0.9],\n 'max_dist': [0.001,0.01,0.02,0.09,0.1,0.5,0.8]\n }\n \n\n parameter_combinations = {'proportion': [1.3],\n 'n_neighbors': [7],\n# 'n_maj_neighbors': [1,2,3,4,5,6],\n 'dist_threshold': [0.1],\n# 'dist_threshold_majority': [0.1, 0.3, 0.5, 0.7, 0.9],\n 'max_dist': [0.8]\n }\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n \n def predict_classification(self,X,y,new_vector, num_neighbors_to_test,expected_class_index):\n from sklearn.neighbors import KNeighborsClassifier\n posit=np.argsort(abs((X-new_vector)*(X-new_vector)).sum(axis=1))\n classes = y[posit[0:num_neighbors_to_test]]\n# print(classes)\n# print(np.sum(classes==expected_class_index)==classes.shape[0])\n# knn = KNeighborsClassifier(n_neighbors=num_neighbors_to_test, metric='euclidean')\n# knn.fit(X, y)\n# y_pred = knn.predict([new_vector])[0]\n return np.sum(classes==expected_class_index)==classes.shape[0]\n# return y_pred==expected_class_index\n def check_duplicates(self,new_row,old_rows):\n for row in old_rows:\n same=True\n for i in range(len(row)):\n if new_row[i]!=row[i]:\n same=False\n continue\n if same:\n# print(\"These are the same\")\n return True \n return False\n \n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n threshold_cannot_use=10\n # if we are generating points that are unusable, above number of times\n # we will \n # a. reduce the max_dist by 0.01 or make it 0.01 (whichever is more)\n # b. reduce the n_neighbors by 1 or set it to 2 (whichever is more)\n original_max_dist=self.max_dist\n original_n_neighbors=self.n_neighbors \n original_dist_threshold=self.dist_threshold\n \n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n print(\"GOing to add \",n_to_sample,\" points\")\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n num_minority=X_min.shape[0]\n# self.n_neighbors=math.ceil(num_minority*self.n_neighbors)\n \n# print(\"Using \",self.n_neighbors,\" neighbors\",\"against\",len(X_min),\" minority samples\")\n# _logger.info(\"stats:\"+str(self.n_neighbors)+\" \"+str(len(X_min)))\n if self.n_neighbors>=len(X_min):\n print(\"Count of minority samples less than number of neighbors\")\n return X.copy(), y.copy() \n \n X_new_min=[]\n # here check for points that are close to clusters according to the distance threshold\n\n # for each point we will have a list of distances\n # calculate distance of each point from\n # every other point\n dist_matrix = np.linalg.norm(X_min - X_min[:,None], axis = -1)\n dist_matrix.sort(axis = 1)\n# print(\"Dist_matrix is \",dist_matrix)\n # sorted each row\n kth_distances = dist_matrix[:, (self.n_neighbors-1)] \n # get one column\n \n kth_distances_sorted = np.sort(kth_distances)\n # sort lowest to greatest\n threshold_dist = kth_distances_sorted[math.floor(self.dist_threshold*len(kth_distances_sorted))]\n\n \n \n# dist_matrix_maj = np.linalg.norm(X_maj - X_maj[:,None], axis = -1) \n# print(\"The dist_matrix_maj matrix is \\n\",dist_matrix) \n# dist_matrix_maj.sort(axis=1)\n# print(\"the majority distance matrix, sorted is \\n\",dist_matrix_maj) \n # get the maximum from the minimum\n # that is, get the maximum value from 2nd column of dist_matrix_maj array\n# threshold_dist_maj=dist_matrix_maj.max(axis=0)[1]\n# print(\"Threshold distance from majority is \",threshold_dist_maj)\n # now calculate minimum distance of minority class\n # from each of the majority class\n# dist_matrix_maj = np.linalg.norm(X_min - X_maj[:,None], axis = -1) \n# dist_matrix_maj.sort(axis=1)\n# print(\"the distance of each minority point from majority, sorted is \\n\",dist_matrix_maj)\n# kth_distances_maj = dist_matrix_maj[:,self.n_maj_neighbors-1]\n# print(\"The distances of each minority from its kth majority neighbor\",kth_distances_maj)\n\n \n # print(\"The kth neighbor distance array is \\n\",kth_distances)\n\n \n \n # print(\"The old kth neighbor distance array is \\n\",kth_distances)\n # print(\"The sorted kth neighbor distance array is \\n\",kth_distances_sorted)\n\n# threshold_dist_maj = kth_distances_maj_sorted[math.floor(self.dist_threshold_majority*len(kth_distances_maj_sorted))]\n # print(\"The threshold distance is \",threshold_dist) \n \n N = n_to_sample\n consecutive_cannot_use=0\n while N>0:\n for i in range(X_min.shape[0]):\n# if kth_distances[i]>threshold_dist and kth_distances_maj[i]<threshold_dist_maj:\n\n if kth_distances[i]>threshold_dist :\n# print(f\"{kth_distances[i]}>{threshold_dist} and {kth_distances_maj[i]}<{threshold_dist_maj}\") \n# print(\"Ignored co ordinate\",X_min[i])\n continue\n if N==0:\n break\n# print(\"Using \",i,\"th minority value\")\n v = X_min[i,:]\n val=np.sort( abs((X_min-v)*(X_min-v)).sum(axis=1) )\n # sorted list of distance of val from X_min\n # X_min being the minority class\n posit=np.argsort(abs((X_min-v)*(X_min-v)).sum(axis=1))\n kv = X_min[posit[1:self.n_neighbors+1],:]\n # skip the first element as that will be 0\n # then take all the k closest neighbors \n alphak = rd.uniform(0,self.max_dist)\n m0 = v\n for j in range(self.n_neighbors):\n m1 = m0 + alphak * (kv[j,:] - m0)\n m0 = m1\n num_neighbors_to_test=math.floor(math.sqrt(self.n_neighbors))\n can_use=self.predict_classification(X,y,m0, num_neighbors_to_test,self.min_label)\n# print(\"Nearest neighbor says, usability\",can_use)\n can_use=can_use and not(self.check_duplicates(m0,X_min))\n can_use=can_use and not(self.check_duplicates(m0,X_new_min)) \n# print(\"Is it duplicate?\",self.check_duplicates(m0,X_min),self.check_duplicates(m0,X_new_min))\n# print(\"Overall Usability\",can_use) \n if can_use:\n consecutive_cannot_use=0\n# print(f\"Successful augmentation at n_neighbors={self.n_neighbors},max_dist={self.max_dist}\")\n# print(f\"dist_threshold={self.dist_threshold},threshold_dist={threshold_dist}\") \n self.n_neighbors=min(self.n_neighbors+1,original_n_neighbors)\n self.max_dist=min(self.max_dist+0.01,original_max_dist)\n self.dist_threshold=max(self.dist_threshold-0.01,original_dist_threshold)\n threshold_dist = kth_distances_sorted[math.floor(self.dist_threshold*len(kth_distances_sorted))]\n X_new_min.append(m0)\n N-=1\n else:\n consecutive_cannot_use+=1\n# print(\"Cannot use\")\n if consecutive_cannot_use>=threshold_cannot_use:\n# print(\"Too many failures, changing the values\")\n self.n_neighbors=max(self.n_neighbors-1,2)\n self.max_dist=max(self.max_dist-0.01,0.01)\n self.dist_threshold=min(self.dist_threshold+0.01,0.9)\n threshold_dist = kth_distances_sorted[math.floor(self.dist_threshold*len(kth_distances_sorted))]\n# print(f\"Changed to n_neighbors={self.n_neighbors},max_dist={self.max_dist}\")\n# print(f\"new dist_threshold={self.dist_threshold},threshold_dist={threshold_dist}\") \n consecutive_cannot_use=0\n \n y_new_min=[self.min_label for i in range(len(X_new_min))] \n X_new_min=np.array(X_new_min)\n X_new_all=np.concatenate((X, X_new_min), axis=0)\n y_new_all=np.concatenate((y, y_new_min), axis=0)\n\n \n\n return X_new_all, y_new_all\n \n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n# 'n_maj_neighbors': self.n_maj_neighbors,\n 'dist_threshold': self.dist_threshold,\n 'max_dist': self.max_dist,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n \n \n \nclass SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote,\n author={Chawla, N. V. and Bowyer, K. W. and Hall, L. O. and\n Kegelmeyer, W. P.},\n title={{SMOTE}: synthetic minority over-sampling technique},\n journal={Journal of Artificial Intelligence Research},\n volume={16},\n year={2002},\n pages={321--357}\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0\n means that after sampling the number of minority samples will\n be equal to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n # _logger.warning(self.__class__.__name__ +\n # \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the model\n n_neigh = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if n_to_sample == 0:\n return X.copy(), y.copy()\n\n # generating samples\n base_indices = self.random_state.choice(list(range(len(X_min))),\n n_to_sample)\n neighbor_indices = self.random_state.choice(list(range(1, n_neigh)),\n n_to_sample)\n\n X_base = X_min[base_indices]\n X_neighbor = X_min[ind[base_indices, neighbor_indices]]\n\n samples = X_base + np.multiply(self.random_state.rand(n_to_sample,\n 1),\n X_neighbor - X_base)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_TomekLinks(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_tomeklinks_enn,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA},\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return SMOTE.parameter_combinations(raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n smote = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_new, y_new = smote.sample(X, y)\n\n t = TomekLinkRemoval(strategy='remove_both', n_jobs=self.n_jobs)\n\n X_samp, y_samp = t.remove_noise(X_new, y_new)\n\n if len(X_samp) == 0:\n m = (\"All samples have been removed, \"\n \"returning the original dataset.\")\n _logger.info(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n return X_samp, y_samp\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_ENN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_tomeklinks_enn,\n author = {Batista, Gustavo E. A. P. A. and Prati,\n Ronaldo C. and Monard, Maria Carolina},\n title = {A Study of the Behavior of Several Methods for\n Balancing Machine Learning Training Data},\n journal = {SIGKDD Explor. Newsl.},\n issue_date = {June 2004},\n volume = {6},\n number = {1},\n month = jun,\n year = {2004},\n issn = {1931-0145},\n pages = {20--29},\n numpages = {10},\n url = {http://doi.acm.org/10.1145/1007730.1007735},\n doi = {10.1145/1007730.1007735},\n acmid = {1007735},\n publisher = {ACM},\n address = {New York, NY, USA},\n }\n\n Notes:\n * Can remove too many of minority samples.\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the SMOTE object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return SMOTE.parameter_combinations(raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n smote = SMOTE(self.proportion, self.n_neighbors,\n n_jobs=self.n_jobs, random_state=self.random_state)\n X_new, y_new = smote.sample(X, y)\n\n enn = EditedNearestNeighbors(n_jobs=self.n_jobs)\n\n return enn.remove_noise(X_new, y_new)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Borderline_SMOTE1(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{borderlineSMOTE,\n author=\"Han, Hui\n and Wang, Wen-Yuan\n and Mao, Bing-Huan\",\n editor=\"Huang, De-Shuang\n and Zhang, Xiao-Ping\n and Huang, Guang-Bin\",\n title=\"Borderline-SMOTE: A New Over-Sampling Method\n in Imbalanced Data Sets Learning\",\n booktitle=\"Advances in Intelligent Computing\",\n year=\"2005\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"878--887\",\n isbn=\"978-3-540-31902-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n k_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique for determining the borderline\n k_neighbors (int): control parameter of the nearest neighbor\n technique for sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(k_neighbors, 'k_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.k_neighbors = k_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'k_neighbors': [3, 5, 7]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining number of samples to be generated\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting model\n X_min = X[y == self.min_label]\n\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining minority samples in danger\n noise = []\n danger = []\n for i in range(len(indices)):\n if self.n_neighbors == sum(y[indices[i][1:]] == self.maj_label):\n noise.append(i)\n elif mode(y[indices[i][1:]]) == self.maj_label:\n danger.append(i)\n X_danger = X_min[danger]\n X_min = np.delete(X_min, np.array(noise).astype(int), axis=0)\n\n if len(X_danger) == 0:\n _logger.info(self.__class__.__name__ +\n \": \" + \"No samples in danger\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to minority samples\n k_neigh = min([len(X_min), self.k_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=k_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n # extracting neighbors of samples in danger\n distances, indices = nn.kneighbors(X_danger)\n\n # generating samples near points in danger\n base_indices = self.random_state.choice(list(range(len(X_danger))),\n n_to_sample)\n neighbor_indices = self.random_state.choice(list(range(1, k_neigh)),\n n_to_sample)\n\n X_base = X_danger[base_indices]\n X_neighbor = X_min[indices[base_indices, neighbor_indices]]\n\n samples = X_base + \\\n np.multiply(self.random_state.rand(\n n_to_sample, 1), X_neighbor - X_base)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'k_neighbors': self.k_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Borderline_SMOTE2(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{borderlineSMOTE,\n author=\"Han, Hui\n and Wang, Wen-Yuan\n and Mao, Bing-Huan\",\n editor=\"Huang, De-Shuang\n and Zhang, Xiao-Ping\n and Huang, Guang-Bin\",\n title=\"Borderline-SMOTE: A New Over-Sampling\n Method in Imbalanced Data Sets Learning\",\n booktitle=\"Advances in Intelligent Computing\",\n year=\"2005\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"878--887\",\n isbn=\"978-3-540-31902-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n k_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n technique for determining the borderline\n k_neighbors (int): control parameter of the nearest neighbor\n technique for sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(k_neighbors, 'k_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.k_neighbors = k_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'k_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining number of samples to be generated\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model\n X_min = X[y == self.min_label]\n\n n_neighbors = min([self.n_neighbors+1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining minority samples in danger\n noise = []\n danger = []\n for i in range(len(indices)):\n if self.n_neighbors == sum(y[indices[i][1:]] == self.maj_label):\n noise.append(i)\n elif mode(y[indices[i][1:]]) == self.maj_label:\n danger.append(i)\n X_danger = X_min[danger]\n X_min = np.delete(X_min, np.array(noise).astype(int), axis=0)\n\n if len(X_min) < 2:\n m = (\"The number of minority samples after preprocessing (%d) is \"\n \"not enough for sampling\")\n m = m % (len(X_min))\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n if len(X_danger) == 0:\n m = \"No samples in danger\"\n _logger.info(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to minority samples\n k_neigh = self.k_neighbors + 1\n k_neigh = min([k_neigh, len(X)])\n nn = NearestNeighbors(n_neighbors=k_neigh, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_danger)\n\n # generating the samples\n base_indices = self.random_state.choice(\n list(range(len(X_danger))), n_to_sample)\n neighbor_indices = self.random_state.choice(\n list(range(1, k_neigh)), n_to_sample)\n\n X_base = X_danger[base_indices]\n X_neighbor = X[indices[base_indices, neighbor_indices]]\n diff = X_neighbor - X_base\n r = self.random_state.rand(n_to_sample, 1)\n mask = y[neighbor_indices] == self.maj_label\n r[mask] = r[mask]*0.5\n\n samples = X_base + np.multiply(r, diff)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*n_to_sample)]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'k_neighbors': self.k_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ADASYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{adasyn,\n author={He, H. and Bai, Y. and Garcia,\n E. A. and Li, S.},\n title={{ADASYN}: adaptive synthetic sampling\n approach for imbalanced learning},\n booktitle={Proceedings of IJCNN},\n year={2008},\n pages={1322--1328}\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_density_based]\n\n def __init__(self,\n n_neighbors=5,\n d_th=0.9,\n beta=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): control parameter of the nearest neighbor\n component\n d_th (float): tolerated deviation level from balancedness\n beta (float): target level of balancedness, same as proportion\n in other techniques\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_greater_or_equal(d_th, 'd_th', 0)\n self.check_greater_or_equal(beta, 'beta', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.d_th = d_th\n self.beta = beta\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7, 9],\n 'd_th': [0.9],\n 'beta': [1.0, 0.75, 0.5, 0.25]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # checking if sampling is needed\n m_min = len(X_min)\n m_maj = len(X) - m_min\n\n n_to_sample = (m_maj - m_min)*self.beta\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = float(m_min)/m_maj\n if d > self.d_th:\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model to all samples\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # determining the distribution of points to be generated\n r = []\n for i in range(len(indices)):\n r.append(sum(y[indices[i][1:]] ==\n self.maj_label)/self.n_neighbors)\n r = np.array(r)\n if sum(r) > 0:\n r = r/sum(r)\n\n if any(np.isnan(r)) or sum(r) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"not enough non-noise samples for oversampling\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors models to minority samples\n n_neigh = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neigh, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X_min)\n\n # sampling points\n base_indices = self.random_state.choice(\n list(range(len(X_min))), size=int(n_to_sample), p=r)\n neighbor_indices = self.random_state.choice(\n list(range(1, n_neigh)), int(n_to_sample))\n\n X_base = X_min[base_indices]\n X_neighbor = X_min[indices[base_indices, neighbor_indices]]\n diff = X_neighbor - X_base\n r = self.random_state.rand(int(n_to_sample), 1)\n\n samples = X_base + np.multiply(r, diff)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.hstack([self.min_label]*int(n_to_sample))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'd_th': self.d_th,\n 'beta': self.beta,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AHC(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{AHC,\n title = \"Learning from imbalanced data in surveillance\n of nosocomial infection\",\n journal = \"Artificial Intelligence in Medicine\",\n volume = \"37\",\n number = \"1\",\n pages = \"7 - 18\",\n year = \"2006\",\n note = \"Intelligent Data Analysis in Medicine\",\n issn = \"0933-3657\",\n doi = \"https://doi.org/10.1016/j.artmed.2005.03.002\",\n url = {http://www.sciencedirect.com/science/article/\n pii/S0933365705000850},\n author = \"Gilles Cohen and Mélanie Hilario and Hugo Sax\n and Stéphane Hugonnet and Antoine Geissbuhler\",\n keywords = \"Nosocomial infection, Machine learning,\n Support vector machines, Data imbalance\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_application]\n\n def __init__(self, strategy='min', n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (str): which class to sample (min/maj/minmaj)\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_isin(strategy, 'strategy', ['min', 'maj', 'minmaj'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'strategy': ['min', 'maj', 'minmaj']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample_majority(self, X, n_clusters):\n \"\"\"\n Sample the majority class\n\n Args:\n X (np.ndarray): majority samples\n n_clusters (int): number of clusters to find\n\n Returns:\n np.ndarray: downsampled vectors\n \"\"\"\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n return kmeans.cluster_centers_\n\n def sample_minority(self, X):\n \"\"\"\n Sampling the minority class\n\n Args:\n X (np.ndarray): minority samples\n\n Returns:\n np.ndarray: the oversampled set of vectors\n \"\"\"\n ac = AgglomerativeClustering(n_clusters=1)\n ac.fit(X)\n n_samples = len(X)\n\n cc = [None]*len(ac.children_)\n weights = [None]*len(ac.children_)\n\n def cluster_centers(children, i, cc, weights):\n \"\"\"\n Extract cluster centers\n\n Args:\n children (np.array): indices of children\n i (int): index to process\n cc (np.array): cluster centers\n weights (np.array): cluster weights\n\n Returns:\n int, float: new cluster center, new weight\n \"\"\"\n if i < n_samples:\n return X[i], 1.0\n\n if cc[i - n_samples] is None:\n a, w_a = cluster_centers(\n children, children[i - n_samples][0], cc, weights)\n b, w_b = cluster_centers(\n children, children[i - n_samples][1], cc, weights)\n cc[i - n_samples] = (w_a*a + w_b*b)/(w_a + w_b)\n weights[i - n_samples] = w_a + w_b\n\n return cc[i - n_samples], weights[i - n_samples]\n\n cluster_centers(ac.children_, ac.children_[-1][-1] + 1, cc, weights)\n\n return np.vstack(cc)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n if self.strategy == 'maj':\n X_maj_resampled = self.sample_majority(X_maj, len(X_min))\n return (np.vstack([X_min, X_maj_resampled]),\n np.hstack([np.repeat(self.min_label, len(X_min)),\n np.repeat(self.maj_label,\n len(X_maj_resampled))]))\n elif self.strategy == 'min':\n X_min_resampled = self.sample_minority(X_min)\n return (np.vstack([X_min_resampled, X_min, X_maj]),\n np.hstack([np.repeat(self.min_label,\n (len(X_min_resampled) + len(X_min))),\n np.repeat(self.maj_label, len(X_maj))]))\n elif self.strategy == 'minmaj':\n X_min_resampled = self.sample_minority(X_min)\n n_maj_sample = min([len(X_maj), len(X_min_resampled) + len(X_min)])\n X_maj_resampled = self.sample_majority(X_maj, n_maj_sample)\n return (np.vstack([X_min_resampled, X_min, X_maj_resampled]),\n np.hstack([np.repeat(self.min_label,\n (len(X_min_resampled) + len(X_min))),\n np.repeat(self.maj_label,\n len(X_maj_resampled))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'strategy': self.strategy,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LLE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{lle_smote,\n author={Wang, J. and Xu, M. and Wang,\n H. and Zhang, J.},\n booktitle={2006 8th international Conference\n on Signal Processing},\n title={Classification of Imbalanced Data by Using\n the SMOTE Algorithm and Locally Linear\n Embedding},\n year={2006},\n volume={3},\n number={},\n pages={},\n keywords={artificial intelligence;\n biomedical imaging;medical computing;\n imbalanced data classification;\n SMOTE algorithm;\n locally linear embedding;\n medical imaging intelligence;\n synthetic minority oversampling\n technique;\n high-dimensional data;\n low-dimensional space;\n Biomedical imaging;\n Back;Training data;\n Data mining;Biomedical engineering;\n Research and development;\n Electronic mail;Pattern recognition;\n Performance analysis;\n Classification algorithms},\n doi={10.1109/ICOSP.2006.345752},\n ISSN={2164-5221},\n month={Nov}}\n\n Notes:\n * There might be numerical issues if the nearest neighbors contain\n some element multiple times.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_components=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj\n and n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples will\n be equal to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_components (int): dimensionality of the embedding space\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 2)\n self.check_greater_or_equal(n_components, 'n_components', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_components': [2, 3, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # do the locally linear embedding\n lle = LocallyLinearEmbedding(\n self.n_neighbors, self.n_components, n_jobs=self.n_jobs)\n try:\n lle.fit(X_min)\n except Exception as e:\n return X.copy(), y.copy()\n X_min_transformed = lle.transform(X_min)\n\n # fitting the nearest neighbors model for sampling\n n_neighbors = min([self.n_neighbors+1, len(X_min_transformed)])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min_transformed)\n dist, ind = nn.kneighbors(X_min_transformed)\n\n def solve_for_weights(xi, Z):\n \"\"\"\n Solve for locally linear embedding weights\n\n Args:\n xi (np.array): vector\n Z (np.matrix): matrix of neighbors in rows\n\n Returns:\n np.array: reconstruction weights\n\n Following https://cs.nyu.edu/~roweis/lle/algorithm.html\n \"\"\"\n Z = Z - xi\n Z = Z.T\n C = np.dot(Z.T, Z)\n try:\n w = np.linalg.solve(C, np.repeat(1.0, len(C)))\n if np.linalg.norm(w) > 1e8:\n w = np.repeat(1.0, len(C))\n except Exception as e:\n w = np.repeat(1.0, len(C))\n return w/np.sum(w)\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n random_coords = self.random_state.choice(ind[idx][1:])\n xi = self.sample_between_points(X_min_transformed[idx],\n X_min_transformed[random_coords])\n Z = X_min_transformed[ind[idx][1:]]\n w = solve_for_weights(xi, Z)\n samples.append(np.dot(w, X_min[ind[idx][1:]]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass distance_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{distance_smote,\n author={de la Calleja, J. and Fuentes, O.},\n booktitle={Proceedings of the Twentieth\n International Florida Artificial\n Intelligence},\n title={A distance-based over-sampling method\n for learning from imbalanced data sets},\n year={2007},\n volume={3},\n pages={634--635}\n }\n\n Notes:\n * It is not clear what the authors mean by \"weighted distance\".\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # fitting the model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n mean_vector = np.mean(X_min[ind[idx][1:]], axis=0)\n samples.append(self.sample_between_points(X_min[idx], mean_vector))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMMO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{smmo,\n author = {de la Calleja, Jorge and Fuentes, Olac\n and González, Jesús},\n booktitle= {Proceedings of the Twenty-First\n International Florida Artificial\n Intelligence Research Society\n Conference},\n year = {2008},\n month = {01},\n pages = {276-281},\n title = {Selecting Minority Examples from\n Misclassified Data for Over-Sampling.}\n }\n\n Notes:\n * In this paper the ensemble is not specified. I have selected\n some very fast, basic classifiers.\n * Also, it is not clear what the authors mean by \"weighted distance\".\n * The original technique is not prepared for the case when no minority\n samples are classified correctly be the ensemble.\n \"\"\"\n\n categories = [OverSampling.cat_borderline,\n OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n ensemble=[QuadraticDiscriminantAnalysis(),\n DecisionTreeClassifier(random_state=2),\n GaussianNB()],\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n ensemble (list): list of classifiers, if None, default list of\n classifiers is used\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n try:\n len_ens = len(ensemble)\n except Exception as e:\n raise ValueError('The ensemble needs to be a list-like object')\n if len_ens == 0:\n raise ValueError('At least 1 classifier needs to be specified')\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.ensemble = ensemble\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n ensembles = [[QuadraticDiscriminantAnalysis(),\n DecisionTreeClassifier(random_state=2),\n GaussianNB()]]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'ensemble': ensembles}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # training and in-sample prediction (out-of-sample by k-fold cross\n # validation might be better)\n predictions = []\n for e in self.ensemble:\n predictions.append(e.fit(X, y).predict(X))\n\n # constructing ensemble prediction\n pred = np.where(np.sum(np.vstack(predictions), axis=0)\n > len(self.ensemble)/2, 1, 0)\n\n # create mask of minority samples to sample\n mask_to_sample = np.where(np.logical_and(np.logical_not(\n np.equal(pred, y)), y == self.min_label))[0]\n if len(mask_to_sample) < 2:\n m = \"Not enough minority samples selected %d\" % len(mask_to_sample)\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_min_to_sample = X[mask_to_sample]\n\n # fitting nearest neighbors model for sampling\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min_to_sample)\n\n # doing the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min_to_sample))\n mean = np.mean(X_min[ind[idx][1:]], axis=0)\n samples.append(self.sample_between_points(\n X_min_to_sample[idx], mean))\n\n return (np.vstack([X, np.vstack([samples])]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'ensemble': self.ensemble,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass polynom_fit_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{polynomial_fit_smote,\n author={Gazzah, S. and Amara, N. E. B.},\n booktitle={2008 The Eighth IAPR International\n Workshop on Document Analysis Systems},\n title={New Oversampling Approaches Based on\n Polynomial Fitting for Imbalanced Data\n Sets},\n year={2008},\n volume={},\n number={},\n pages={677-684},\n keywords={curve fitting;learning (artificial\n intelligence);mesh generation;pattern\n classification;polynomials;sampling\n methods;support vector machines;\n oversampling approach;polynomial\n fitting function;imbalanced data\n set;pattern classification task;\n class-modular strategy;support\n vector machine;true negative rate;\n true positive rate;star topology;\n bus topology;polynomial curve\n topology;mesh topology;Polynomials;\n Topology;Support vector machines;\n Support vector machine classification;\n Pattern classification;Performance\n evaluation;Training data;Text\n analysis;Data engineering;Convergence;\n writer identification system;majority\n class;minority class;imbalanced data\n sets;polynomial fitting functions;\n class-modular strategy},\n doi={10.1109/DAS.2008.74},\n ISSN={},\n month={Sept},}\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n topology='star',\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n topoplogy (str): 'star'/'bus'/'mesh'\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n if topology.startswith('poly'):\n self.check_greater_or_equal(\n int(topology.split('_')[-1]), 'topology', 1)\n else:\n self.check_isin(topology, \"topology\", ['star', 'bus', 'mesh'])\n\n self.proportion = proportion\n self.topology = topology\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'topology': ['star', 'bus', 'mesh',\n 'poly_1', 'poly_2', 'poly_3']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # extracting minority samples\n X_min = X[y == self.min_label]\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n samples = []\n if self.topology == 'star':\n # Implementation of the star topology\n X_mean = np.mean(X_min, axis=0)\n k = max([1, int(np.rint(n_to_sample/len(X_min)))])\n for x in X_min:\n diff = X_mean - x\n for i in range(1, k+1):\n samples.append(x + float(i)/(k+1)*diff)\n elif self.topology == 'bus':\n # Implementation of the bus topology\n k = max([1, int(np.rint(n_to_sample/len(X_min)))])\n for i in range(1, len(X_min)):\n diff = X_min[i-1] - X_min[i]\n for j in range(1, k+1):\n samples.append(X_min[i] + float(j)/(k+1)*diff)\n elif self.topology == 'mesh':\n # Implementation of the mesh topology\n if len(X_min)**2 > n_to_sample:\n while len(samples) < n_to_sample:\n random_i = self.random_state.randint(len(X_min))\n random_j = self.random_state.randint(len(X_min))\n diff = X_min[random_i] - X_min[random_j]\n samples.append(X_min[random_i] + 0.5*diff)\n else:\n n_combs = (len(X_min)*(len(X_min)-1)/2)\n k = max([1, int(np.rint(n_to_sample/n_combs))])\n for i in range(len(X_min)):\n for j in range(len(X_min)):\n diff = X_min[i] - X_min[j]\n for li in range(1, k+1):\n samples.append(X_min[j] + float(li)/(k+1)*diff)\n elif self.topology.startswith('poly'):\n # Implementation of the polynomial topology\n deg = int(self.topology.split('_')[1])\n dim = len(X_min[0])\n\n def fit_poly(d):\n return np.poly1d(np.polyfit(np.arange(len(X_min)),\n X_min[:, d], deg))\n\n polys = [fit_poly(d) for d in range(dim)]\n\n for d in range(dim):\n random_sample = self.random_state.random_sample()*len(X_min)\n samples_gen = [polys[d](random_sample)\n for _ in range(n_to_sample)]\n samples.append(np.array(samples_gen))\n samples = np.vstack(samples).T\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'topology': self.topology,\n 'random_state': self._random_state_init}\n\n\nclass Stefanowski(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{stefanowski,\n author = {Stefanowski, Jerzy and Wilk, Szymon},\n title = {Selective Pre-processing of Imbalanced Data for\n Improving Classification Performance},\n booktitle = {Proceedings of the 10th International Conference\n on Data Warehousing and Knowledge Discovery},\n series = {DaWaK '08},\n year = {2008},\n isbn = {978-3-540-85835-5},\n location = {Turin, Italy},\n pages = {283--292},\n numpages = {10},\n url = {http://dx.doi.org/10.1007/978-3-540-85836-2_27},\n doi = {10.1007/978-3-540-85836-2_27},\n acmid = {1430591},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal,\n OverSampling.cat_sample_copy,\n OverSampling.cat_borderline]\n\n def __init__(self, strategy='weak_amp', n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (str): 'weak_amp'/'weak_amp_relabel'/'strong_amp'\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_isin(strategy,\n 'strategy',\n ['weak_amp', 'weak_amp_relabel', 'strong_amp'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.strategy = strategy\n self.n_jobs = n_jobs\n\n # this method does not maintain randomness, the parameter is\n # introduced for the compatibility of interfaces\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n if not raw:\n return [{'strategy': 'weak_amp'},\n {'strategy': 'weak_amp_relabel'},\n {'strategy': 'strong_amp'}, ]\n else:\n return {'strategy': ['weak_amp', 'weak_amp_relabel', 'strong_amp']}\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if self.class_stats[self.min_label] < 6:\n m = (\"The number of minority samples (%d) is not\"\n \" enough for sampling\")\n m = m % (self.class_stats[self.min_label])\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # copying y as its values will change\n y = y.copy()\n # fitting the nearest neighbors model for noise filtering, 4 neighbors\n # instead of 3 as the closest neighbor to a point is itself\n nn = NearestNeighbors(n_neighbors=min(4, len(X)), n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X)\n\n # fitting the nearest neighbors model for sample generation,\n # 6 neighbors instead of 5 for the same reason\n nn5 = NearestNeighbors(n_neighbors=min(6, len(X)), n_jobs=self.n_jobs)\n nn5.fit(X)\n distance5, indices5 = nn5.kneighbors(X)\n\n # determining noisy and safe flags\n flags = []\n for i in range(len(indices)):\n if mode(y[indices[i][1:]]) == y[i]:\n flags.append('safe')\n else:\n flags.append('noisy')\n flags = np.array(flags)\n\n D = (y == self.maj_label) & (flags == 'noisy')\n minority_indices = np.where(y == self.min_label)[0]\n\n samples = []\n if self.strategy == 'weak_amp' or self.strategy == 'weak_amp_relabel':\n # weak mplification - the number of copies is the number of\n # majority nearest neighbors\n for i in minority_indices:\n if flags[i] == 'noisy':\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n if self.strategy == 'weak_amp_relabel':\n # relabling - noisy majority neighbors are relabelled to minority\n for i in minority_indices:\n if flags[i] == 'noisy':\n for j in indices[i][1:]:\n if y[j] == self.maj_label and flags[j] == 'noisy':\n y[j] = self.min_label\n D[j] = False\n if self.strategy == 'strong_amp':\n # safe minority samples are copied as many times as many safe\n # majority samples are among the nearest neighbors\n for i in minority_indices:\n if flags[i] == 'safe':\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n # if classified correctly by knn(5), noisy minority samples are\n # amplified by creating as many copies as many save majority\n # samples in its neighborhood are present otherwise amplify\n # based on the 5 neighborhood\n for i in minority_indices:\n if flags[i] == 'noisy':\n if mode(y[indices5[i][1:]]) == y[i]:\n k = np.sum(np.logical_and(\n y[indices[i][1:]] == self.maj_label,\n flags[indices[i][1:]] == 'safe'))\n else:\n k = np.sum(np.logical_and(\n y[indices5[i][1:]] == self.maj_label,\n flags[indices5[i][1:]] == 'safe'))\n for _ in range(k):\n samples.append(X[i])\n\n to_remove = np.where(D)[0]\n\n X_noise_removed = np.delete(X, to_remove, axis=0)\n y_noise_removed = np.delete(y, to_remove, axis=0)\n\n if len(samples) == 0 and len(X_noise_removed) > 10:\n m = \"no samples to add\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X_noise_removed, y_noise_removed\n elif len(samples) == 0:\n m = \"all samples removed as noise, returning the original dataset\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n return (np.vstack([X_noise_removed,\n np.vstack(samples)]),\n np.hstack([y_noise_removed,\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'strategy': self.strategy,\n 'n_jobs': self.n_jobs}\n\n\nclass ADOMS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{adoms,\n author={Tang, S. and Chen, S.},\n booktitle={2008 International Conference on\n Information Technology and\n Applications in Biomedicine},\n title={The generation mechanism of synthetic\n minority class examples},\n year={2008},\n volume={},\n number={},\n pages={444-447},\n keywords={medical image processing;\n generation mechanism;synthetic\n minority class examples;class\n imbalance problem;medical image\n analysis;oversampling algorithm;\n Principal component analysis;\n Biomedical imaging;Medical\n diagnostic imaging;Information\n technology;Biomedical engineering;\n Noise generators;Concrete;Nearest\n neighbor searches;Data analysis;\n Image analysis},\n doi={10.1109/ITAB.2008.4570642},\n ISSN={2168-2194},\n month={May}}\n \"\"\"\n\n categories = [OverSampling.cat_dim_reduction,\n OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and\n n_min to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): parameter of the nearest neighbor component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0.0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distance, indices = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n index = self.random_state.randint(len(X_min))\n neighbors = X_min[indices[index]]\n\n # fitting the PCA\n pca = PCA(n_components=1)\n pca.fit(neighbors)\n\n # extracting the principal direction\n principal_direction = pca.components_[0]\n\n # do the sampling according to the description in the paper\n random_index = self.random_state.randint(1, len(neighbors))\n random_neighbor = neighbors[random_index]\n d = np.linalg.norm(random_neighbor - X_min[index])\n r = self.random_state.random_sample()\n inner_product = np.dot(random_neighbor - X_min[index],\n principal_direction)\n sign = 1.0 if inner_product > 0.0 else -1.0\n samples.append(X_min[index] + sign*r*d*principal_direction)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Safe_Level_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{safe_level_smote,\n author = {\n Bunkhumpornpat, Chumphol and Sinapiromsaran,\n Krung and Lursinsap, Chidchanok},\n title = {Safe-Level-SMOTE: Safe-Level-Synthetic\n Minority Over-Sampling TEchnique for\n Handling the Class Imbalanced Problem},\n booktitle = {Proceedings of the 13th Pacific-Asia\n Conference on Advances in Knowledge\n Discovery and Data Mining},\n series = {PAKDD '09},\n year = {2009},\n isbn = {978-3-642-01306-5},\n location = {Bangkok, Thailand},\n pages = {475--482},\n numpages = {8},\n url = {http://dx.doi.org/10.1007/978-3-642-01307-2_43},\n doi = {10.1007/978-3-642-01307-2_43},\n acmid = {1533904},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n keywords = {Class Imbalanced Problem, Over-sampling,\n SMOTE, Safe Level},\n }\n\n Notes:\n * The original method was not prepared for the case when no minority\n sample has minority neighbors.\n \"\"\"\n\n categories = [OverSampling.cat_borderline,\n OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # fitting nearest neighbors model\n n_neighbors = min([self.n_neighbors+1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X)\n\n minority_labels = (y == self.min_label)\n minority_indices = np.where(minority_labels)[0]\n\n # do the sampling\n numattrs = len(X[0])\n samples = []\n for _ in range(n_to_sample):\n index = self.random_state.randint(len(minority_indices))\n neighbor_index = self.random_state.choice(indices[index][1:])\n\n p = X[index]\n n = X[neighbor_index]\n\n # find safe levels\n sl_p = np.sum(y[indices[index][1:]] == self.min_label)\n sl_n = np.sum(y[indices[neighbor_index][1:]]\n == self.min_label)\n\n if sl_n > 0:\n sl_ratio = float(sl_p)/sl_n\n else:\n sl_ratio = np.inf\n\n if sl_ratio == np.inf and sl_p == 0:\n pass\n else:\n s = np.zeros(numattrs)\n for atti in range(numattrs):\n # iterate through attributes and do sampling according to\n # safe level\n if sl_ratio == np.inf and sl_p > 0:\n gap = 0.0\n elif sl_ratio == 1:\n gap = self.random_state.random_sample()\n elif sl_ratio > 1:\n gap = self.random_state.random_sample()*1.0/sl_ratio\n elif sl_ratio < 1:\n gap = (1 - sl_ratio) + \\\n self.random_state.random_sample()*sl_ratio\n dif = n[atti] - p[atti]\n s[atti] = p[atti] + gap*dif\n samples.append(s)\n\n if len(samples) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"No samples generated\")\n return X.copy(), y.copy()\n else:\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{msmote,\n author = {Hu, Shengguo and Liang,\n Yanfeng and Ma, Lintao and He, Ying},\n title = {MSMOTE: Improving Classification\n Performance When Training Data\n is Imbalanced},\n booktitle = {Proceedings of the 2009 Second\n International Workshop on\n Computer Science and Engineering\n - Volume 02},\n series = {IWCSE '09},\n year = {2009},\n isbn = {978-0-7695-3881-5},\n pages = {13--17},\n numpages = {5},\n url = {https://doi.org/10.1109/WCSE.2009.756},\n doi = {10.1109/WCSE.2009.756},\n acmid = {1682710},\n publisher = {IEEE Computer Society},\n address = {Washington, DC, USA},\n keywords = {imbalanced data, over-sampling,\n SMOTE, AdaBoost, samples groups,\n SMOTEBoost},\n }\n\n Notes:\n * The original method was not prepared for the case when all\n minority samples are noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distance, indices = nn.kneighbors(X_min)\n\n noise_mask = np.repeat(False, len(X_min))\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n index = self.random_state.randint(len(X_min))\n\n n_p = np.sum(y[indices[index][1:]] == self.min_label)\n\n if n_p == self.n_neighbors:\n sample_type = 'security'\n elif n_p == 0:\n sample_type = 'noise'\n noise_mask[index] = True\n if np.all(noise_mask):\n _logger.info(\"All minority samples are noise\")\n return X.copy(), y.copy()\n else:\n sample_type = 'border'\n\n if sample_type == 'security':\n neighbor_index = self.random_state.choice(indices[index][1:])\n elif sample_type == 'border':\n neighbor_index = indices[index][1]\n else:\n continue\n\n s_gen = self.sample_between_points_componentwise(X_min[index],\n X[neighbor_index])\n samples.append(s_gen)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DE_oversampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{de_oversampling,\n author={Chen, L. and Cai, Z. and Chen, L. and\n Gu, Q.},\n booktitle={2010 Third International Conference\n on Knowledge Discovery and Data Mining},\n title={A Novel Differential Evolution-Clustering\n Hybrid Resampling Algorithm on Imbalanced\n Datasets},\n year={2010},\n volume={},\n number={},\n pages={81-85},\n keywords={pattern clustering;sampling methods;\n support vector machines;differential\n evolution;clustering algorithm;hybrid\n resampling algorithm;imbalanced\n datasets;support vector machine;\n minority class;mutation operators;\n crossover operators;data cleaning\n method;F-measure criterion;ROC area\n criterion;Support vector machines;\n Intrusion detection;Support vector\n machine classification;Cleaning;\n Electronic mail;Clustering algorithms;\n Signal to noise ratio;Learning\n systems;Data mining;Geology;imbalanced\n datasets;hybrid resampling;clustering;\n differential evolution;support vector\n machine},\n doi={10.1109/WKDD.2010.48},\n ISSN={},\n month={Jan},}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n crossover_rate=0.5,\n similarity_threshold=0.5,\n n_clusters=30, n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): control parameter of the nearest neighbor\n component\n crossover_rate (float): cross over rate of evoluation\n similarity_threshold (float): similarity threshold paramter\n n_clusters (int): number of clusters for cleansing\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 2)\n self.check_in_range(crossover_rate, 'crossover_rate', [0, 1])\n self.check_in_range(similarity_threshold,\n 'similarity_threshold', [0, 1])\n self.check_greater_or_equal(n_clusters, 'n_clusters', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.crossover_rate = crossover_rate\n self.similarity_threshold = similarity_threshold\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'crossover_rate': [0.1, 0.5, 0.9],\n 'similarity_threshold': [0.5, 0.9],\n 'n_clusters': [10, 20, 50]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = len(X[0])\n\n X_min = X[y == self.min_label]\n\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distance, indices = nn.kneighbors(X_min)\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n # mutation according to the description in the paper\n random_index = self.random_state.randint(len(X_min))\n random_point = X_min[random_index]\n random_neighbor_indices = self.random_state.choice(\n indices[random_index][1:], 2, replace=False)\n random_neighbor_1 = X_min[random_neighbor_indices[0]]\n random_neighbor_2 = X_min[random_neighbor_indices[1]]\n\n mutated = random_point + \\\n (random_neighbor_1 - random_neighbor_2) * \\\n self.random_state.random_sample()\n\n # crossover - updates the vector 'mutated'\n rand_s = self.random_state.randint(d)\n for i in range(d):\n random_value = self.random_state.random_sample()\n if random_value >= self.crossover_rate and not i == rand_s:\n mutated[i] = random_point[i]\n elif random_value < self.crossover_rate or i == rand_s:\n pass\n\n samples.append(mutated)\n\n # assembling all data for clearning\n X, y = np.vstack([X, np.vstack(samples)]), np.hstack(\n [y, np.repeat(self.min_label, len(samples))])\n X_min = X[y == self.min_label]\n\n # cleansing based on clustering\n n_clusters = min([len(X), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n unique_labels = np.unique(kmeans.labels_)\n\n def cluster_filter(li):\n return len(np.unique(y[np.where(kmeans.labels_ == li)[0]])) == 1\n\n one_label_clusters = [li for li in unique_labels if cluster_filter(li)]\n to_remove = []\n\n # going through the clusters having one label only\n for li in one_label_clusters:\n cluster_indices = np.where(kmeans.labels_ == li)[0]\n mean_of_cluster = kmeans.cluster_centers_[li]\n\n # finding center-like sample\n center_like_index = None\n center_like_dist = np.inf\n\n for i in cluster_indices:\n dist = np.linalg.norm(X[i] - mean_of_cluster)\n if dist < center_like_dist:\n center_like_dist = dist\n center_like_index = i\n\n # removing the samples similar to the center-like sample\n for i in cluster_indices:\n if i != center_like_index:\n d = np.inner(X[i], X[center_like_index]) / \\\n (np.linalg.norm(X[i]) *\n np.linalg.norm(X[center_like_index]))\n if d > self.similarity_threshold:\n to_remove.append(i)\n\n return np.delete(X, to_remove, axis=0), np.delete(y, to_remove)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'crossover_rate': self.crossover_rate,\n 'similarity_threshold': self.similarity_threshold,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n# Borrowed from sklearn-dev, will be removed once the sklearn implementation\n# becomes stable\n\n\nclass OPTICS:\n def __init__(self, min_samples=5, max_eps=np.inf, metric='euclidean',\n p=2, metric_params=None, maxima_ratio=.75,\n rejection_ratio=.7, similarity_threshold=0.4,\n significant_min=.003, min_cluster_size=.005,\n min_maxima_ratio=0.001, algorithm='ball_tree',\n leaf_size=30, n_jobs=1):\n\n self.max_eps = max_eps\n self.min_samples = min_samples\n self.maxima_ratio = maxima_ratio\n self.rejection_ratio = rejection_ratio\n self.similarity_threshold = similarity_threshold\n self.significant_min = significant_min\n self.min_cluster_size = min_cluster_size\n self.min_maxima_ratio = min_maxima_ratio\n self.algorithm = algorithm\n self.metric = metric\n self.metric_params = metric_params\n self.p = p\n self.leaf_size = leaf_size\n self.n_jobs = n_jobs\n\n def fit(self, X, y=None):\n \"\"\"Perform OPTICS clustering\n Extracts an ordered list of points and reachability distances, and\n performs initial clustering using `max_eps` distance specified at\n OPTICS object instantiation.\n Parameters\n ----------\n X : array, shape (n_samples, n_features)\n The data.\n y : ignored\n Returns\n -------\n self : instance of OPTICS\n The instance.\n \"\"\"\n n_samples = len(X)\n\n if self.min_samples > n_samples:\n m = (\"Number of training samples (n_samples=%d) must \"\n \"be greater than min_samples (min_samples=%d) \"\n \"used for clustering.\")\n m = m % (n_samples, self.min_samples)\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n if self.min_cluster_size <= 0 or (self.min_cluster_size !=\n int(self.min_cluster_size)\n and self.min_cluster_size > 1):\n m = ('min_cluster_size must be a positive integer or '\n 'a float between 0 and 1. Got %r')\n m = m % self.min_cluster_size\n raise ValueError(self.__class__.__name__ + \": \" + m)\n elif self.min_cluster_size > n_samples:\n m = ('min_cluster_size must be no greater than the '\n 'number of samples (%d). Got %d')\n m = m % (n_samples, self.min_cluster_size)\n\n raise ValueError(self.__class__.__name__ + \": \" + m)\n\n # Start all points as 'unprocessed' ##\n self.reachability_ = np.empty(n_samples)\n self.reachability_.fill(np.inf)\n self.core_distances_ = np.empty(n_samples)\n self.core_distances_.fill(np.nan)\n # Start all points as noise ##\n self.labels_ = np.full(n_samples, -1, dtype=int)\n\n nbrs = NearestNeighbors(n_neighbors=self.min_samples,\n algorithm=self.algorithm,\n leaf_size=self.leaf_size, metric=self.metric,\n metric_params=self.metric_params, p=self.p,\n n_jobs=self.n_jobs)\n\n nbrs.fit(X)\n self.core_distances_[:] = nbrs.kneighbors(X,\n self.min_samples)[0][:, -1]\n\n self.ordering_ = self._calculate_optics_order(X, nbrs)\n\n return self\n\n # OPTICS helper functions\n\n def _calculate_optics_order(self, X, nbrs):\n # Main OPTICS loop. Not parallelizable. The order that entries are\n # written to the 'ordering_' list is important!\n processed = np.zeros(X.shape[0], dtype=bool)\n ordering = np.zeros(X.shape[0], dtype=int)\n ordering_idx = 0\n for point in range(X.shape[0]):\n if processed[point]:\n continue\n if self.core_distances_[point] <= self.max_eps:\n while not processed[point]:\n processed[point] = True\n ordering[ordering_idx] = point\n ordering_idx += 1\n point = self._set_reach_dist(point, processed, X, nbrs)\n else: # For very noisy points\n ordering[ordering_idx] = point\n ordering_idx += 1\n processed[point] = True\n return ordering\n\n def _set_reach_dist(self, point_index, processed, X, nbrs):\n P = X[point_index:point_index + 1]\n indices = nbrs.radius_neighbors(P, radius=self.max_eps,\n return_distance=False)[0]\n\n # Getting indices of neighbors that have not been processed\n unproc = np.compress((~np.take(processed, indices)).ravel(),\n indices, axis=0)\n # Keep n_jobs = 1 in the following lines...please\n if not unproc.size:\n # Everything is already processed. Return to main loop\n return point_index\n\n dists = pairwise_distances(P, np.take(X, unproc, axis=0),\n self.metric, n_jobs=1).ravel()\n\n rdists = np.maximum(dists, self.core_distances_[point_index])\n new_reach = np.minimum(np.take(self.reachability_, unproc), rdists)\n self.reachability_[unproc] = new_reach\n\n # Define return order based on reachability distance\n return (unproc[self.quick_scan(np.take(self.reachability_, unproc),\n dists)])\n\n def isclose(self, a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a-b) <= max([rel_tol*max([abs(a), abs(b)]), abs_tol])\n\n def quick_scan(self, rdists, dists):\n rdist = np.inf\n dist = np.inf\n n = len(rdists)\n for i in range(n):\n if rdists[i] < rdist:\n rdist = rdists[i]\n dist = dists[i]\n idx = i\n elif self.isclose(rdists[i], rdist):\n if dists[i] < dist:\n dist = dists[i]\n idx = i\n return idx\n\n\nclass SMOBD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{smobd,\n author={Cao, Q. and Wang, S.},\n booktitle={2011 International Conference on\n Information Management, Innovation\n Management and Industrial\n Engineering},\n title={Applying Over-sampling Technique Based\n on Data Density and Cost-sensitive\n SVM to Imbalanced Learning},\n year={2011},\n volume={2},\n number={},\n pages={543-548},\n keywords={data handling;learning (artificial\n intelligence);support vector machines;\n oversampling technique application;\n data density;cost sensitive SVM;\n imbalanced learning;SMOTE algorithm;\n data distribution;density information;\n Support vector machines;Classification\n algorithms;Noise measurement;Arrays;\n Noise;Algorithm design and analysis;\n Training;imbalanced learning;\n cost-sensitive SVM;SMOTE;data density;\n SMOBD},\n doi={10.1109/ICIII.2011.276},\n ISSN={2155-1456},\n month={Nov},}\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_extensive,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n eta1=0.5,\n t=1.8,\n min_samples=5,\n max_eps=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n eta1 (float): control parameter of density estimation\n t (float): control parameter of noise filtering\n min_samples (int): minimum samples parameter for OPTICS\n max_eps (float): maximum environment radius paramter for OPTICS\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_in_range(eta1, 'eta1', [0.0, 1.0])\n self.check_greater_or_equal(t, 't', 0)\n self.check_greater_or_equal(min_samples, 'min_samples', 1)\n self.check_greater_or_equal(max_eps, 'max_eps', 0.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.eta1 = eta1\n self.t = t\n self.min_samples = min_samples\n self.max_eps = max_eps\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'eta1': [0.1, 0.5, 0.9],\n 't': [1.5, 2.5],\n 'min_samples': [5],\n 'max_eps': [0.1, 0.5, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determine the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # running the OPTICS technique based on the sklearn implementation\n # TODO: replace to sklearn call once it is stable\n min_samples = min([len(X_min)-1, self.min_samples])\n o = OPTICS(min_samples=min_samples,\n max_eps=self.max_eps,\n n_jobs=self.n_jobs)\n o.fit(X_min)\n cd = o.core_distances_\n rd = o.reachability_\n\n # noise filtering\n cd_average = np.mean(cd)\n rd_average = np.mean(rd)\n noise = np.logical_and(cd > cd_average*self.t, rd > rd_average*self.t)\n\n # fitting a nearest neighbor model to be able to find\n # neighbors in radius\n n_neighbors = min([len(X_min), self.min_samples+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X_min)\n\n # determining the density\n factor_1 = cd\n factor_2 = np.array([len(x) for x in nn.radius_neighbors(\n X_min, radius=self.max_eps, return_distance=False)])\n\n if max(factor_1) == 0 or max(factor_2) == 0:\n return X.copy(), y.copy()\n\n factor_1 = factor_1/max(factor_1)\n factor_2 = factor_2/max(factor_2)\n\n df = factor_1*self.eta1 + factor_2*(1 - self.eta1)\n\n # setting the density at noisy samples to zero\n for i in range(len(noise)):\n if noise[i]:\n df[i] = 0\n\n if sum(df) == 0 or any(np.isnan(df)) or any(np.isinf(df)):\n return X.copy(), y.copy()\n\n # normalizing the density\n df_dens = df/sum(df)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)), p=df_dens)\n neighbor_idx = self.random_state.choice(indices[idx][1:])\n samples.append(self.sample_between_points_componentwise(\n X_min[idx], X_min[neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'eta1': self.eta1,\n 't': self.t,\n 'min_samples': self.min_samples,\n 'max_eps': self.max_eps,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SUNDO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{sundo,\n author={Cateni, S. and Colla, V. and Vannucci, M.},\n booktitle={2011 11th International Conference on\n Intelligent Systems Design and\n Applications},\n title={Novel resampling method for the\n classification of imbalanced datasets for\n industrial and other real-world problems},\n year={2011},\n volume={},\n number={},\n pages={402-407},\n keywords={decision trees;pattern classification;\n sampling methods;support vector\n machines;resampling method;imbalanced\n dataset classification;industrial\n problem;real world problem;\n oversampling technique;undersampling\n technique;support vector machine;\n decision tree;binary classification;\n synthetic dataset;public dataset;\n industrial dataset;Support vector\n machines;Training;Accuracy;Databases;\n Intelligent systems;Breast cancer;\n Decision trees;oversampling;\n undersampling;imbalanced dataset},\n doi={10.1109/ISDA.2011.6121689},\n ISSN={2164-7151},\n month={Nov}}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_application]\n\n def __init__(self, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return [{}]\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_1 = len(X_min)\n n_0 = len(X) - n_1\n N = int(np.rint(0.5*n_0 - 0.5*n_1 + 0.5))\n\n if N == 0:\n return X.copy(), y.copy()\n\n # generating minority samples\n samples = []\n\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_maj)\n\n stds = np.std(X_min, axis=0)\n # At one point the algorithm says to keep those points which are\n # the most distant from majority samples, and not leaving any minority\n # sample isolated. This can be implemented by generating multiple\n # samples for each point and keep the one most distant from the\n # majority samples.\n for _ in range(N):\n i = self.random_state.randint(len(X_min))\n best_sample = None\n best_sample_dist = 0\n for _ in range(3):\n s = self.random_state.normal(X_min[i], stds)\n dist, ind = nn.kneighbors(s.reshape(1, -1))\n if dist[0][0] > best_sample_dist:\n best_sample_dist = dist[0][0]\n best_sample = s\n samples.append(best_sample)\n\n # Extending the minority dataset with the new samples\n X_min_extended = np.vstack([X_min, np.vstack(samples)])\n\n # Removing N elements from the majority dataset\n\n # normalize\n mms = MinMaxScaler()\n X_maj_normalized = mms.fit_transform(X_maj)\n\n # computing the distance matrix\n dm = pairwise_distances(X_maj_normalized, X_maj_normalized)\n\n # len(X_maj) offsets for the diagonal 0 elements, 2N because\n # every distances appears twice\n threshold = sorted(dm.flatten())[min(\n [len(X_maj) + 2*N, len(dm)*len(dm) - 1])]\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # extracting the coordinates of pairs closer than threshold\n pairs_to_break = np.where(dm < threshold)\n pairs_to_break = np.vstack(pairs_to_break)\n\n # sorting the pairs, otherwise both points would be removed\n pairs_to_break.sort(axis=0)\n\n # uniqueing the coordinates - the final number might be less than N\n to_remove = np.unique(pairs_to_break[0])\n\n # removing the selected elements\n X_maj_cleaned = np.delete(X_maj, to_remove, axis=0)\n\n return (np.vstack([X_min_extended, X_maj_cleaned]),\n np.hstack([np.repeat(self.min_label, len(X_min_extended)),\n np.repeat(self.maj_label, len(X_maj_cleaned))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MSYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{msyn,\n author=\"Fan, Xiannian\n and Tang, Ke\n and Weise, Thomas\",\n editor=\"Huang, Joshua Zhexue\n and Cao, Longbing\n and Srivastava, Jaideep\",\n title=\"Margin-Based Over-Sampling Method for\n Learning from Imbalanced Datasets\",\n booktitle=\"Advances in Knowledge Discovery and\n Data Mining\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"309--320\",\n abstract=\"Learning from imbalanced datasets has\n drawn more and more attentions from\n both theoretical and practical aspects.\n Over- sampling is a popular and simple\n method for imbalanced learning. In this\n paper, we show that there is an\n inherently potential risk associated\n with the over-sampling algorithms in\n terms of the large margin principle.\n Then we propose a new synthetic over\n sampling method, named Margin-guided\n Synthetic Over-sampling (MSYN), to\n reduce this risk. The MSYN improves\n learning with respect to the data\n distributions guided by the\n margin-based rule. Empirical study\n verities the efficacy of MSYN.\",\n isbn=\"978-3-642-20847-8\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n pressure=1.5,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n pressure (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(pressure, 'pressure', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.pressure = pressure\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'pressure': [2.5, 2.0, 1.5],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n min_indices = np.where(y == self.min_label)[0]\n maj_indices = np.where(y == self.maj_label)[0]\n\n # generating samples\n smote = SMOTE(proportion=self.pressure,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n X_res, y_res = smote.sample(X, y)\n X_new, _ = X_res[len(X):], y_res[len(X):]\n\n if len(X_new) == 0:\n m = \"Sampling is not needed\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # Compute nearest hit and miss for both classes\n nn = NearestNeighbors(n_neighbors=len(X), n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n # computing nearest hit and miss distances, these will be used to\n # compute thetas\n nearest_hit_dist = np.array([dist[i][next(j for j in range(\n 1, len(X)) if y[i] == y[ind[i][j]])] for i in range(len(X))])\n nearest_miss_dist = np.array([dist[i][next(j for j in range(\n 1, len(X)) if y[i] != y[ind[i][j]])] for i in range(len(X))])\n\n # computing the thetas without new samples being involved\n theta_A_sub_alpha = 0.5*(nearest_miss_dist - nearest_hit_dist)\n theta_min = theta_A_sub_alpha[min_indices]\n theta_maj = theta_A_sub_alpha[maj_indices]\n\n # computing the f_3 score for all new samples\n f_3 = []\n for x in X_new:\n # determining the distances of the new sample from the training set\n distances = np.linalg.norm(X - x, axis=1)\n\n # computing nearest hit and miss distances involving the new\n # elements\n mask = nearest_hit_dist[min_indices] < distances[min_indices]\n nearest_hit_dist_min = np.where(mask,\n nearest_hit_dist[min_indices],\n distances[min_indices])\n nearest_miss_dist_min = nearest_miss_dist[min_indices]\n nearest_hit_dist_maj = nearest_hit_dist[maj_indices]\n mask = nearest_miss_dist[maj_indices] < distances[maj_indices]\n nearest_miss_dist_maj = np.where(mask,\n nearest_miss_dist[maj_indices],\n distances[maj_indices])\n\n # computing the thetas incorporating the new elements\n theta_x_min = 0.5*(nearest_miss_dist_min - nearest_hit_dist_min)\n theta_x_maj = 0.5*(nearest_miss_dist_maj - nearest_hit_dist_maj)\n\n # determining the delta scores and computing f_3\n Delta_P = np.sum(theta_x_min - theta_min)\n Delta_N = np.sum(theta_x_maj - theta_maj)\n\n f_3.append(-Delta_N/(Delta_P + 0.01))\n\n f_3 = np.array(f_3)\n\n # determining the elements with the minimum f_3 scores to add\n _, new_ind = zip(\n *sorted(zip(f_3, np.arange(len(f_3))), key=lambda x: x[0]))\n new_ind = list(new_ind[:(len(X_maj) - len(X_min))])\n\n return (np.vstack([X, X_new[new_ind]]),\n np.hstack([y, np.repeat(self.min_label, len(new_ind))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'pressure': self.pressure,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SVM_balance(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{svm_balance,\n author = {Farquad, M.A.H. and Bose, Indranil},\n title = {Preprocessing Unbalanced Data Using Support\n Vector Machine},\n journal = {Decis. Support Syst.},\n issue_date = {April, 2012},\n volume = {53},\n number = {1},\n month = apr,\n year = {2012},\n issn = {0167-9236},\n pages = {226--233},\n numpages = {8},\n url = {http://dx.doi.org/10.1016/j.dss.2012.01.016},\n doi = {10.1016/j.dss.2012.01.016},\n acmid = {2181554},\n publisher = {Elsevier Science Publishers B. V.},\n address = {Amsterdam, The Netherlands, The Netherlands},\n keywords = {COIL data, Hybrid method, Preprocessor, SVM,\n Unbalanced data},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X, y = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n if sum(y == self.min_label) < 2:\n return X.copy(), y.copy()\n else:\n cv = min([5, sum(y == self.min_label)])\n\n ss = StandardScaler()\n X_norm = ss.fit_transform(X)\n\n C_params = [0.01, 0.1, 1.0, 10.0]\n best_score = 0\n best_C = 0.01\n for C in C_params:\n _logger.info(self.__class__.__name__ + \": \" +\n \"Evaluating SVM with C=%f\" % C)\n svc = SVC(C=C, kernel='rbf', gamma='auto')\n score = np.mean(cross_val_score(svc, X_norm, y, cv=cv))\n if score > best_score:\n best_score = score\n best_C = C\n svc = SVC(C=best_C, kernel='rbf', gamma='auto')\n svc.fit(X_norm, y)\n\n return X, svc.predict(X_norm)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass TRIM_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{trim_smote,\n author=\"Puntumapon, Kamthorn\n and Waiyamai, Kitsana\",\n editor=\"Tan, Pang-Ning\n and Chawla, Sanjay\n and Ho, Chin Kuan\n and Bailey, James\",\n title=\"A Pruning-Based Approach for Searching\n Precise and Generalized Region for\n Synthetic Minority Over-Sampling\",\n booktitle=\"Advances in Knowledge Discovery\n and Data Mining\",\n year=\"2012\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"371--382\",\n isbn=\"978-3-642-30220-6\"\n }\n\n Notes:\n * It is not described precisely how the filtered data is used for\n sample generation. The method is proposed to be a preprocessing\n step, and it states that it applies sample generation to each\n group extracted.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n min_precision=0.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_in_range(min_precision, 'min_precision', [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.min_precision = min_precision\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'min_precision': [0.3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def trim(self, y):\n \"\"\"\n Determines the trim value.\n\n Args:\n y (np.array): array of target labels\n\n Returns:\n float: the trim value\n \"\"\"\n return np.sum(y == self.min_label)**2/len(y)\n\n def precision(self, y):\n \"\"\"\n Determines the precision value.\n\n Args:\n y (np.array): array of target labels\n\n Returns:\n float: the precision value\n \"\"\"\n return np.sum(y == self.min_label)/len(y)\n\n def determine_splitting_point(self, X, y, split_on_border=False):\n \"\"\"\n Determines the splitting point.\n\n Args:\n X (np.matrix): a subset of the training data\n y (np.array): an array of target labels\n split_on_border (bool): wether splitting on class borders is\n considered\n\n Returns:\n tuple(int, float), bool: (splitting feature, splitting value),\n make the split\n \"\"\"\n trim_value = self.trim(y)\n d = len(X[0])\n max_t_minus_gain = 0.0\n split = None\n\n # checking all dimensions of X\n for i in range(d):\n # sort the elements in dimension i\n sorted_X_y = sorted(zip(X[:, i], y), key=lambda pair: pair[0])\n sorted_y = [yy for _, yy in sorted_X_y]\n\n # number of minority samples on the left\n left_min = 0\n # number of minority samples on the right\n right_min = np.sum(sorted_y == self.min_label)\n\n # check all possible splitting points sequentiall\n for j in range(0, len(sorted_y)-1):\n if sorted_y[j] == self.min_label:\n # adjusting the number of minority and majority samples\n left_min = left_min + 1\n right_min = right_min - 1\n # checking of we can split on the border and do not split\n # tieing feature values\n if ((split_on_border is False\n or (split_on_border is True\n and not sorted_y[j-1] == sorted_y[j]))\n and sorted_X_y[j][0] != sorted_X_y[j+1][0]):\n # compute trim value of the left\n trim_left = left_min**2/(j+1)\n # compute trim value of the right\n trim_right = right_min**2/(len(sorted_y) - j - 1)\n # let's check the gain\n if max([trim_left, trim_right]) > max_t_minus_gain:\n max_t_minus_gain = max([trim_left, trim_right])\n split = (i, sorted_X_y[j][0])\n # return splitting values and the value of the logical condition\n # in line 9\n if split is not None:\n return split, max_t_minus_gain > trim_value\n else:\n return (0, 0), False\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n leafs = [(X, y)]\n candidates = []\n seeds = []\n\n # executing the trimming\n # loop in line 2 of the paper\n _logger.info(self.__class__.__name__ +\n \": \" + \"do the trimming process\")\n while len(leafs) > 0 or len(candidates) > 0:\n add_to_leafs = []\n # executing the loop starting in line 3\n for leaf in leafs:\n # the function implements the loop starting in line 6\n # splitting on class border is forced\n split, gain = self.determine_splitting_point(\n leaf[0], leaf[1], True)\n if len(leaf[0]) == 1:\n # small leafs with 1 element (no splitting point)\n # are dropped as noise\n continue\n else:\n # condition in line 9\n if gain:\n # making the split\n mask_left = (leaf[0][:, split[0]] <= split[1])\n X_left = leaf[0][mask_left]\n y_left = leaf[1][mask_left]\n mask_right = np.logical_not(mask_left)\n X_right = leaf[0][mask_right]\n y_right = leaf[1][mask_right]\n\n # condition in line 11\n if np.sum(y_left == self.min_label) > 0:\n add_to_leafs.append((X_left, y_left))\n # condition in line 13\n if np.sum(y_right == self.min_label) > 0:\n add_to_leafs.append((X_right, y_right))\n else:\n # line 16\n candidates.append(leaf)\n # we implement line 15 and 18 by replacing the list of leafs by\n # the list of new leafs.\n leafs = add_to_leafs\n\n # iterating through all candidates (loop starting in line 21)\n for c in candidates:\n # extracting splitting points, this time split on border\n # is not forced\n split, gain = self.determine_splitting_point(c[0], c[1], False)\n if len(c[0]) == 1:\n # small leafs are dropped as noise\n continue\n else:\n # checking condition in line 27\n if gain:\n # doing the split\n mask_left = (c[0][:, split[0]] <= split[1])\n X_left, y_left = c[0][mask_left], c[1][mask_left]\n mask_right = np.logical_not(mask_left)\n X_right, y_right = c[0][mask_right], c[1][mask_right]\n # checking logic in line 29\n if np.sum(y_left == self.min_label) > 0:\n leafs.append((X_left, y_left))\n # checking logic in line 31\n if np.sum(y_right == self.min_label) > 0:\n leafs.append((X_right, y_right))\n else:\n # adding candidate to seeds (line 35)\n seeds.append(c)\n # line 33 and line 36 are implemented by emptying the candidates\n # list\n candidates = []\n\n # filtering the resulting set\n filtered_seeds = [s for s in seeds if self.precision(\n s[1]) > self.min_precision]\n\n # handling the situation when no seeds were found\n if len(seeds) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"no seeds identified\")\n return X.copy(), y.copy()\n\n # fix for bad choice of min_precision\n multiplier = 0.9\n while len(filtered_seeds) == 0:\n filtered_seeds = [s for s in seeds if self.precision(\n s[1]) > self.min_precision*multiplier]\n multiplier = multiplier*0.9\n if multiplier < 0.1:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"no clusters passing the filtering\")\n return X.copy(), y.copy()\n\n seeds = filtered_seeds\n\n X_seed = np.vstack([s[0] for s in seeds])\n y_seed = np.hstack([s[1] for s in seeds])\n\n _logger.info(self.__class__.__name__ + \": \" + \"do the sampling\")\n # generating samples by SMOTE\n X_seed_min = X_seed[y_seed == self.min_label]\n if len(X_seed_min) <= 1:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"X_seed_min contains less than 2 samples\")\n return X.copy(), y.copy()\n\n n_neighbors = min([len(X_seed_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_seed_min)\n distances, indices = nn.kneighbors(X_seed_min)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.randint(len(X_seed_min))\n random_neighbor_idx = self.random_state.choice(\n indices[random_idx][1:])\n samples.append(self.sample_between_points(\n X_seed_min[random_idx], X_seed_min[random_neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'min_precision': self.min_precision,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_RSB(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{smote_rsb,\n author=\"Ramentol, Enislay\n and Caballero, Yail{\\'e}\n and Bello, Rafael\n and Herrera, Francisco\",\n title=\"SMOTE-RSB*: a hybrid preprocessing approach\n based on oversampling and undersampling for\n high imbalanced data-sets using SMOTE and\n rough sets theory\",\n journal=\"Knowledge and Information Systems\",\n year=\"2012\",\n month=\"Nov\",\n day=\"01\",\n volume=\"33\",\n number=\"2\",\n pages=\"245--265\",\n issn=\"0219-3116\",\n doi=\"10.1007/s10115-011-0465-6\",\n url=\"https://doi.org/10.1007/s10115-011-0465-6\"\n }\n\n Notes:\n * I think the description of the algorithm in Fig 5 of the paper\n is not correct. The set \"resultSet\" is initialized with the\n original instances, and then the While loop in the Algorithm\n run until resultSet is empty, which never holds. Also, the\n resultSet is only extended in the loop. Our implementation\n is changed in the following way: we generate twice as many\n instances are required to balance the dataset, and repeat\n the loop until the number of new samples added to the training\n set is enough to balance the dataset.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=2.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_maj = X[y == self.maj_label]\n X_min = X[y == self.min_label]\n\n # Step 1: do the sampling\n smote = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n X_samp, y_samp = smote.sample(X, y)\n X_samp, y_samp = X_samp[len(X):], y_samp[len(X):]\n\n if len(X_samp) == 0:\n return X.copy(), y.copy()\n\n # Step 2: (original will be added later)\n result_set = []\n\n # Step 3: first the data is normalized\n maximums = np.max(X_samp, axis=0)\n minimums = np.min(X_samp, axis=0)\n\n # normalize X_new and X_maj\n norm_factor = maximums - minimums\n null_mask = norm_factor == 0\n n_null = np.sum(null_mask)\n fixed = np.max(np.vstack([maximums[null_mask], np.repeat(1, n_null)]),\n axis=0)\n\n norm_factor[null_mask] = fixed\n\n X_samp_norm = X_samp / norm_factor\n X_maj_norm = X_maj / norm_factor\n\n # compute similarity matrix\n similarity_matrix = 1.0 - pairwise_distances(X_samp_norm,\n X_maj_norm,\n metric='minkowski',\n p=1)/len(X[0])\n\n # Step 4: counting the similar examples\n similarity_value = 0.4\n syn = len(X_samp)\n cont = np.zeros(syn)\n\n already_added = np.repeat(False, len(X_samp))\n\n while (len(result_set) < len(X_maj) - len(X_min)\n and similarity_value <= 0.9):\n for i in range(syn):\n cont[i] = np.sum(similarity_matrix[i, :] > similarity_value)\n if cont[i] == 0 and not already_added[i]:\n result_set.append(X_samp[i])\n already_added[i] = True\n similarity_value = similarity_value + 0.05\n\n # Step 5: returning the results depending the number of instances\n # added to the result set\n if len(result_set) > 0:\n return (np.vstack([X, np.vstack(result_set)]),\n np.hstack([y, np.repeat(self.min_label,\n len(result_set))]))\n else:\n return np.vstack([X, X_samp]), np.hstack([y, y_samp])\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ProWSyn(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{prowsyn,\n author=\"Barua, Sukarna\n and Islam, Md. Monirul\n and Murase, Kazuyuki\",\n editor=\"Pei, Jian\n and Tseng, Vincent S.\n and Cao, Longbing\n and Motoda, Hiroshi\n and Xu, Guandong\",\n title=\"ProWSyn: Proximity Weighted Synthetic\n Oversampling Technique for\n Imbalanced Data Set Learning\",\n booktitle=\"Advances in Knowledge Discovery\n and Data Mining\",\n year=\"2013\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"317--328\",\n isbn=\"978-3-642-37456-2\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n L=5,\n theta=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n L (int): number of levels\n theta (float): smoothing factor in weight formula\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(L, \"L\", 1)\n self.check_greater_or_equal(theta, \"theta\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.L = L\n self.theta = theta\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'L': [3, 5, 7],\n 'theta': [0.1, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and\n target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # Step 1 - a bit generalized\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n m = \"Sampling is not needed\"\n _logger.warning(self.__class__.__name__ + \": \" + m)\n return X.copy(), y.copy()\n\n # Step 2\n P = np.where(y == self.min_label)[0]\n X_maj = X[y == self.maj_label]\n\n Ps = []\n proximity_levels = []\n\n # Step 3\n for i in range(self.L):\n if len(P) == 0:\n break\n # Step 3 a\n n_neighbors = min([len(P), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X[P])\n distances, indices = nn.kneighbors(X_maj)\n\n # Step 3 b\n P_i = np.unique(np.hstack([i for i in indices]))\n\n # Step 3 c - proximity levels are encoded in the Ps list index\n Ps.append(P[P_i])\n proximity_levels.append(i+1)\n\n # Step 3 d\n P = np.delete(P, P_i)\n\n # Step 4\n if len(P) > 0:\n Ps.append(P)\n\n # Step 5\n if len(P) > 0:\n proximity_levels.append(i)\n proximity_levels = np.array(proximity_levels)\n\n # Step 6\n weights = np.array([np.exp(-self.theta*(proximity_levels[i] - 1))\n for i in range(len(proximity_levels))])\n # weights is the probability distribution of sampling in the\n # clusters identified\n weights = weights/np.sum(weights)\n\n suitable = False\n for i in range(len(weights)):\n if weights[i] > 0 and len(Ps[i]) > 1:\n suitable = True\n\n if not suitable:\n return X.copy(), y.copy()\n\n # do the sampling, from each cluster proportionally to the distribution\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(weights)), p=weights)\n if len(Ps[cluster_idx]) > 1:\n random_idx1, random_idx2 = self.random_state.choice(\n Ps[cluster_idx], 2, replace=False)\n samples.append(self.sample_between_points(\n X[random_idx1], X[random_idx2]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'L': self.L,\n 'theta': self.theta,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SL_graph_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{sl_graph_smote,\n author = {Bunkhumpornpat,\n Chumpol and Subpaiboonkit, Sitthichoke},\n booktitle= {13th International Symposium on Communications\n and Information Technologies},\n year = {2013},\n month = {09},\n pages = {570-575},\n title = {Safe level graph for synthetic minority\n over-sampling techniques},\n isbn = {978-1-4673-5578-0}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X[y == self.min_label])\n\n # Computing safe level values\n safe_level_values = np.array(\n [np.sum(y[i] == self.min_label) for i in indices])\n\n # Computing skewness\n skewness = skew(safe_level_values)\n\n if skewness < 0:\n # left skewed\n s = Safe_Level_SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n else:\n # right skewed\n s = Borderline_SMOTE1(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n return s.sample(X, y)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NRSBoundary_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{nrsboundary_smote,\n author= {Feng, Hu and Hang, Li},\n title= {A Novel Boundary Oversampling Algorithm Based on\n Neighborhood Rough Set Model: NRSBoundary-SMOTE},\n journal= {Mathematical Problems in Engineering},\n year= {2013},\n pages= {10},\n doi= {10.1155/2013/694809},\n url= {http://dx.doi.org/10.1155/694809}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n w=0.005,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n w (float): used to set neighborhood radius\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(w, \"w\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.w = w\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'w': [0.005, 0.01, 0.05]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # step 1\n bound_set = []\n pos_set = []\n\n # step 2\n X_min_indices = np.where(y == self.min_label)[0]\n X_min = X[X_min_indices]\n\n # step 3\n dm = pairwise_distances(X, X)\n d_max = np.max(dm, axis=1)\n max_dist = np.max(dm)\n np.fill_diagonal(dm, max_dist)\n d_min = np.min(dm, axis=1)\n\n delta = d_min + self.w*(d_max - d_min)\n\n # number of neighbors is not interesting here, as we use the\n # radius_neighbors function to extract the neighbors in a given radius\n n_neighbors = min([self.n_neighbors + 1, len(X)])\n nn = NearestNeighbors(n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n for i in range(len(X)):\n indices = nn.radius_neighbors(X[i].reshape(1, -1),\n delta[i],\n return_distance=False)\n\n n_minority = np.sum(y[indices[0]] == self.min_label)\n n_majority = np.sum(y[indices[0]] == self.maj_label)\n if y[i] == self.min_label and not n_minority == len(indices[0]):\n bound_set.append(i)\n elif y[i] == self.maj_label and n_majority == len(indices[0]):\n pos_set.append(i)\n\n bound_set = np.array(bound_set)\n pos_set = np.array(pos_set)\n\n if len(pos_set) == 0 or len(bound_set) == 0:\n return X.copy(), y.copy()\n\n # step 4 and 5\n # computing the nearest neighbors of the bound set from the\n # minority set\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(X[bound_set])\n\n # do the sampling\n samples = []\n trials = 0\n w = self.w\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(len(bound_set))\n random_neighbor_idx = self.random_state.choice(indices[idx][1:])\n x_new = self.sample_between_points(\n X[bound_set[idx]], X_min[random_neighbor_idx])\n\n # checking the conflict\n dist_from_pos_set = np.linalg.norm(X[pos_set] - x_new, axis=1)\n if np.all(dist_from_pos_set > delta[pos_set]):\n # no conflict\n samples.append(x_new)\n trials = trials + 1\n if trials > 1000 and len(samples) == 0:\n trials = 0\n w = w*0.9\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'w': self.w,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LVQ_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{lvq_smote,\n title={LVQ-SMOTE – Learning Vector Quantization\n based Synthetic Minority Over–sampling\n Technique for biomedical data},\n author={Munehiro Nakamura and Yusuke Kajiwara\n and Atsushi Otsuka and Haruhiko Kimura},\n booktitle={BioData Mining},\n year={2013}\n }\n\n Notes:\n * This implementation is only a rough approximation of the method\n described in the paper. The main problem is that the paper uses\n many datasets to find similar patterns in the codebooks and\n replicate patterns appearing in other datasets to the imbalanced\n datasets based on their relative position compared to the codebook\n elements. What we do is clustering the minority class to extract\n a codebook as kmeans cluster means, then, find pairs of codebook\n elements which have the most similar relative position to a\n randomly selected pair of codebook elements, and translate nearby\n minority samples from the neighborhood one pair of codebook\n elements to the neighborood of another pair of codebook elements.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clusters=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_clusters (int): number of clusters in vector quantization\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 3)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [4, 8, 12]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # clustering X_min to extract codebook\n n_clusters = min([len(X_min), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n codebook = kmeans.cluster_centers_\n\n # get nearest neighbors of minority samples to codebook samples\n n_neighbors = min([len(X_min), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n distances, indices = nn.kneighbors(codebook)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # randomly selecting a pair of codebook elements\n cb_0, cb_1 = self.random_state.choice(\n list(range(len(codebook))), 2, replace=False)\n diff = codebook[cb_0] - codebook[cb_1]\n min_dist = np.inf\n min_0 = None\n # finding another pair of codebook elements with similar offset\n for i in range(len(codebook)):\n for j in range(len(codebook)):\n if cb_0 != i and cb_0 != j and cb_1 != i and cb_1 != j:\n dd = np.linalg.norm(diff - (codebook[i] - codebook[j]))\n if dd < min_dist:\n min_dist = dd\n min_0 = self.random_state.choice([i, j])\n\n # translating a random neighbor of codebook element min_0 to\n # the neighborhood of point_0\n random_index = self.random_state.randint(len(indices[min_0]))\n sample = X_min[indices[min_0][random_index]]\n point_0 = codebook[cb_0] + (sample - codebook[min_0])\n\n samples.append(point_0)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SOI_CJ(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{soi_cj,\n author = {Sánchez, Atlántida I. and Morales, Eduardo and\n Gonzalez, Jesus},\n year = {2013},\n month = {01},\n pages = {},\n title = {Synthetic Oversampling of Instances Using\n Clustering},\n volume = {22},\n booktitle = {International Journal of Artificial\n Intelligence Tools}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n method='interpolation',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of nearest neighbors in the SMOTE\n sampling\n method (str): 'interpolation'/'jittering'\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_isin(method, 'method', ['interpolation', 'jittering'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'method': ['interpolation', 'jittering']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def clustering(self, X, y):\n \"\"\"\n Implementation of the clustering technique described in the paper.\n\n Args:\n X (np.matrix): array of training instances\n y (np.array): target labels\n\n Returns:\n list(set): list of minority clusters\n \"\"\"\n nn_all = NearestNeighbors(n_jobs=self.n_jobs)\n nn_all.fit(X)\n\n X_min = X[y == self.min_label]\n\n # extract nearest neighbors of all samples from the set of\n # minority samples\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # initialize clusters by minority samples\n clusters = []\n for i in range(len(X_min)):\n # empty cluster added\n clusters.append(set())\n # while the closest instance is from the minority class, adding it\n # to the cluster\n for j in indices[i]:\n if y[j] == self.min_label:\n clusters[i].add(j)\n else:\n break\n\n # cluster merging phase\n is_intersection = True\n while is_intersection:\n is_intersection = False\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n # computing intersection\n intersection = clusters[i].intersection(clusters[j])\n if len(intersection) > 0:\n is_intersection = True\n # computing distance matrix\n dm = pairwise_distances(\n X[list(clusters[i])], X[list(clusters[j])])\n # largest distance\n max_dist_pair = np.where(dm == np.max(dm))\n # elements with the largest distance\n max_i = X[list(clusters[i])[max_dist_pair[0][0]]]\n max_j = X[list(clusters[j])[max_dist_pair[1][0]]]\n\n # finding midpoint and radius\n mid_point = (max_i + max_j)/2.0\n radius = np.linalg.norm(mid_point - max_i)\n\n # extracting points within the hypersphare of\n # radius \"radius\"\n mid_point_reshaped = mid_point.reshape(1, -1)\n ind = nn_all.radius_neighbors(mid_point_reshaped,\n radius,\n return_distance=False)\n\n n_min = np.sum(y[ind[0]] == self.min_label)\n if n_min > len(ind[0])/2:\n # if most of the covered elements come from the\n # minority class, merge clusters\n clusters[i].update(clusters[j])\n clusters[j] = set()\n else:\n # otherwise move the difference to the\n # bigger cluster\n if len(clusters[i]) > len(clusters[j]):\n clusters[j].difference_update(intersection)\n else:\n clusters[i].difference_update(intersection)\n\n # returning non-empty clusters\n return [c for c in clusters if len(c) > 0]\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n std_min = np.std(X_min, axis=0)\n\n # do the clustering\n _logger.info(self.__class__.__name__ + \": \" + \"Executing clustering\")\n clusters = self.clustering(X, y)\n\n # filtering the clusters, at least two points in a cluster are needed\n # for both interpolation and jittering (due to the standard deviation)\n clusters_filtered = [list(c) for c in clusters if len(c) > 2]\n\n if len(clusters_filtered) > 0:\n # if there are clusters having at least 2 elements, do the sampling\n cluster_nums = [len(c) for c in clusters_filtered]\n cluster_weights = cluster_nums/np.sum(cluster_nums)\n cluster_stds = [np.std(X[clusters_filtered[i]], axis=0)\n for i in range(len(clusters_filtered))]\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Executing sample generation\")\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(clusters_filtered)), p=cluster_weights)\n if self.method == 'interpolation':\n clust = clusters_filtered[cluster_idx]\n idx_0, idx_1 = self.random_state.choice(clust,\n 2,\n replace=False)\n X_0, X_1 = X[idx_0], X[idx_1]\n samples.append(\n self.sample_between_points_componentwise(X_0, X_1))\n elif self.method == 'jittering':\n clust_std = cluster_stds[cluster_idx]\n std = np.min(np.vstack([std_min, clust_std]), axis=0)\n clust = clusters_filtered[cluster_idx]\n idx = self.random_state.choice(clust)\n X_samp = self.sample_by_jittering_componentwise(X[idx],\n std)\n samples.append(X_samp)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.array([self.min_label]*len(samples))]))\n else:\n # otherwise fall back to standard smote\n _logger.warning(self.__class__.__name__ + \": \" +\n \"No clusters with more than 2 elements\")\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ROSE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{rose,\n author=\"Menardi, Giovanna\n and Torelli, Nicola\",\n title=\"Training and assessing classification rules with\n imbalanced data\",\n journal=\"Data Mining and Knowledge Discovery\",\n year=\"2014\",\n month=\"Jan\",\n day=\"01\",\n volume=\"28\",\n number=\"1\",\n pages=\"92--122\",\n issn=\"1573-756X\",\n doi=\"10.1007/s10618-012-0295-5\",\n url=\"https://doi.org/10.1007/s10618-012-0295-5\"\n }\n\n Notes:\n * It is not entirely clear if the authors propose kernel density\n estimation or the fitting of simple multivariate Gaussians\n on the minority samples. The latter seems to be more likely,\n I implement that approach.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self, proportion=1.0, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0.0)\n\n self.proportion = proportion\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # Estimating the H matrix\n std = np.std(X_min, axis=0)\n d = len(X[0])\n n = len(X_min)\n H = std*(4.0/((d + 1)*n))**(1.0/(d + 4))\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.randint(len(X_min))\n samples.append(self.sample_by_gaussian_jittering(\n X_min[random_idx], H))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_OUT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE: An\n enhancement strategy to handle imbalance in\n data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # nearest neighbors among minority points\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min)\n\n min_distances, min_indices = nn_min.kneighbors(X_min)\n # nearest neighbors among majority points\n n_neighbors = min([len(X_maj), self.n_neighbors+1])\n nn_maj = NearestNeighbors(\n n_neighbors=n_neighbors, n_jobs=self.n_jobs).fit(X_maj)\n maj_distances, maj_indices = nn_maj.kneighbors(X_min)\n\n # generate samples\n samples = []\n for _ in range(n_to_sample):\n # implementation of Algorithm 1 in the paper\n random_idx = self.random_state.choice(\n np.arange(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n v = X_maj[self.random_state.choice(maj_indices[random_idx])]\n dif1 = u - v\n uu = u + self.random_state.random_sample()*0.3*dif1\n x = X_min[self.random_state.choice(min_indices[random_idx][1:])]\n dif2 = uu - x\n w = x + self.random_state.random_sample()*0.5*dif2\n\n samples.append(w)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_Cosine(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE:\n An enhancement strategy to handle imbalance\n in data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # Fitting the nearest neighbors models to the minority and\n # majority data using two different metrics for the minority\n nn_min_euc = NearestNeighbors(n_neighbors=len(X_min),\n n_jobs=self.n_jobs)\n nn_min_euc.fit(X_min)\n nn_min_euc_dist, nn_min_euc_ind = nn_min_euc.kneighbors(X_min)\n\n nn_min_cos = NearestNeighbors(n_neighbors=len(X_min),\n metric='cosine',\n n_jobs=self.n_jobs)\n nn_min_cos.fit(X_min)\n nn_min_cos_dist, nn_min_cos_ind = nn_min_cos.kneighbors(X_min)\n\n nn_maj = NearestNeighbors(n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs)\n nn_maj.fit(X_maj)\n nn_maj_dist, nn_maj_ind = nn_maj.kneighbors(X_min)\n\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.choice(\n np.arange(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n # get the rank of each minority sample according to their distance\n # from u\n to_sort_euc = zip(\n nn_min_euc_ind[random_idx], np.arange(len(X_min)))\n _, sorted_by_euc_ind = zip(*(sorted(to_sort_euc,\n key=lambda x: x[0])))\n to_sort_cos = zip(\n nn_min_cos_ind[random_idx], np.arange(len(X_min)))\n _, sorted_by_cos_ind = zip(*(sorted(to_sort_cos,\n key=lambda x: x[0])))\n # adding the ranks to get the composite similarity measure (called\n # voting in the paper)\n ranked_min_indices = sorted_by_euc_ind + sorted_by_cos_ind\n # sorting the ranking\n to_sort = zip(ranked_min_indices, np.arange(len(X_min)))\n _, sorted_ranking = zip(*(sorted(to_sort, key=lambda x: x[0])))\n # get the indices of the n_neighbors nearest neighbors according\n # to the composite metrics\n min_indices = sorted_ranking[1:(self.n_neighbors + 1)]\n\n v = X_maj[self.random_state.choice(nn_maj_ind[random_idx])]\n dif1 = u - v\n uu = u + self.random_state.random_sample()*0.3*dif1\n x = X_min[self.random_state.choice(min_indices[1:])]\n dif2 = uu - x\n w = x + self.random_state.random_sample()*0.5*dif2\n samples.append(w)\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Selected_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_out_smote_cosine_selected_smote,\n title={SMOTE-Out, SMOTE-Cosine, and Selected-SMOTE: An\n enhancement strategy to handle imbalance in\n data level},\n author={Fajri Koto},\n journal={2014 International Conference on Advanced\n Computer Science and Information System},\n year={2014},\n pages={280-284}\n }\n\n Notes:\n * Significant attribute selection was not described in the paper,\n therefore we have implemented something meaningful.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n perc_sign_attr=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n strategy (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n perc_sign_attr (float): [0,1] - percentage of significant\n attributes\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_neighbors, 'n_neighbors', 1)\n self.check_in_range(perc_sign_attr, 'perc_sign_attr', [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.perc_sign_attr = perc_sign_attr\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'perc_sign_attr': [0.3, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority_indices = np.where(y == self.min_label)[0]\n\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn_min_euc = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X_min)\n\n nn_min_dist, nn_min_ind = nn_min_euc.kneighbors(X_min)\n\n # significant attribute selection was not described in the paper\n # I have implemented significant attribute selection by checking\n # the overlap between ranges of minority and majority class attributes\n # the attributes with bigger overlap respecting their ranges\n # are considered more significant\n min_ranges_a = np.min(X_min, axis=0)\n min_ranges_b = np.max(X_min, axis=0)\n maj_ranges_a = np.min(X_maj, axis=0)\n maj_ranges_b = np.max(X_maj, axis=0)\n\n # end points of overlaps\n max_a = np.max(np.vstack([min_ranges_a, maj_ranges_a]), axis=0)\n min_b = np.min(np.vstack([min_ranges_b, maj_ranges_b]), axis=0)\n\n # size of overlap\n overlap = min_b - max_a\n\n # replacing negative values (no overlap) by zero\n overlap = np.where(overlap < 0, 0, overlap)\n # percentage of overlap compared to the ranges of attributes in the\n # minority set\n percentages = overlap/(min_ranges_b - min_ranges_a)\n # fixing zero division if some attributes have zero range\n percentages = np.nan_to_num(percentages)\n # number of significant attributes to determine\n num_sign_attr = min(\n [1, int(np.rint(self.perc_sign_attr*len(percentages)))])\n\n significant_attr = (percentages >= sorted(\n percentages)[-num_sign_attr]).astype(int)\n\n samples = []\n for _ in range(n_to_sample):\n random_idx = self.random_state.choice(range(len(minority_indices)))\n u = X[minority_indices[random_idx]]\n v = X_min[self.random_state.choice(nn_min_ind[random_idx][1:])]\n samples.append(self.sample_between_points_componentwise(\n u, v, significant_attr))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'perc_sign_attr': self.perc_sign_attr,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass LN_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ln_smote,\n author={Maciejewski, T. and Stefanowski, J.},\n booktitle={2011 IEEE Symposium on Computational\n Intelligence and Data Mining (CIDM)},\n title={Local neighbourhood extension of SMOTE for\n mining imbalanced data},\n year={2011},\n volume={},\n number={},\n pages={104-111},\n keywords={Bayes methods;data mining;pattern\n classification;local neighbourhood\n extension;imbalanced data mining;\n focused resampling technique;SMOTE\n over-sampling method;naive Bayes\n classifiers;Noise measurement;Noise;\n Decision trees;Breast cancer;\n Sensitivity;Data mining;Training},\n doi={10.1109/CIDM.2011.5949434},\n ISSN={},\n month={April}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): parameter of the NearestNeighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n if self.n_neighbors + 2 > len(X):\n n_neighbors = len(X) - 2\n else:\n n_neighbors = self.n_neighbors\n\n if n_neighbors < 2:\n return X.copy(), y.copy()\n\n # nearest neighbors of each instance to each instance in the dataset\n nn = NearestNeighbors(n_neighbors=n_neighbors + 2, n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X)\n\n minority_indices = np.where(y == self.min_label)[0]\n\n # dimensionality\n d = len(X[0])\n\n def safe_level(p_idx, n_idx=None):\n \"\"\"\n computing the safe level of samples\n\n Args:\n p_idx (int): index of positive sample\n n_idx (int): index of other sample\n\n Returns:\n int: safe level\n \"\"\"\n if n_idx is None:\n # implementation for 1 sample only\n return np.sum(y[indices[p_idx][1:-1]] == self.min_label)\n else:\n # implementation for 2 samples\n if ((not y[n_idx] != self.maj_label)\n and p_idx in indices[n_idx][1:-1]):\n # -1 because p_idx will be replaced\n n_positives = np.sum(\n y[indices[n_idx][1:-1]] == self.min_label) - 1\n if y[indices[n_idx][-1]] == self.min_label:\n # this is the effect of replacing p_idx by the next\n # (k+1)th neighbor\n n_positives = n_positives + 1\n return n_positives\n return np.sum(y[indices[n_idx][1:-1]] == self.min_label)\n\n def random_gap(slp, sln, n_label):\n \"\"\"\n determining random gap\n\n Args:\n slp (int): safe level of p\n sln (int): safe level of n\n n_label (int): label of n\n\n Returns:\n float: gap\n \"\"\"\n delta = 0\n if sln == 0 and slp > 0:\n return delta\n else:\n sl_ratio = slp/sln\n if sl_ratio == 1:\n delta = self.random_state.random_sample()\n elif sl_ratio > 1:\n delta = self.random_state.random_sample()/sl_ratio\n else:\n delta = 1.0 - self.random_state.random_sample()*sl_ratio\n if not n_label == self.min_label:\n delta = delta*sln/(n_neighbors)\n return delta\n\n # generating samples\n trials = 0\n samples = []\n while len(samples) < n_to_sample:\n p_idx = self.random_state.choice(minority_indices)\n # extract random neighbor of p\n n_idx = self.random_state.choice(indices[p_idx][1:-1])\n\n # checking can-create criteria\n slp = safe_level(p_idx)\n sln = safe_level(p_idx, n_idx)\n\n if (not slp == 0) or (not sln == 0):\n # can create\n p = X[p_idx]\n n = X[n_idx]\n x_new = p.copy()\n\n for a in range(d):\n delta = random_gap(slp, sln, y[n_idx])\n diff = n[a] - p[a]\n x_new[a] = p[a] + delta*diff\n samples.append(x_new)\n\n trials = trials + 1\n if len(samples)/trials < 1.0/n_to_sample:\n _logger.info(self.__class__.__name__ + \": \" +\n \"no instances with slp > 0 and sln > 0 found\")\n return X.copy(), y.copy()\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MWMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @ARTICLE{mwmote,\n author={Barua, S. and Islam, M. M. and Yao, X. and\n Murase, K.},\n journal={IEEE Transactions on Knowledge and Data\n Engineering},\n title={MWMOTE--Majority Weighted Minority Oversampling\n Technique for Imbalanced Data Set Learning},\n year={2014},\n volume={26},\n number={2},\n pages={405-425},\n keywords={learning (artificial intelligence);pattern\n clustering;sampling methods;AUC;area under\n curve;ROC;receiver operating curve;G-mean;\n geometric mean;minority class cluster;\n clustering approach;weighted informative\n minority class samples;Euclidean distance;\n hard-to-learn informative minority class\n samples;majority class;synthetic minority\n class samples;synthetic oversampling\n methods;imbalanced learning problems;\n imbalanced data set learning;\n MWMOTE-majority weighted minority\n oversampling technique;Sampling methods;\n Noise measurement;Boosting;Simulation;\n Complexity theory;Interpolation;Abstracts;\n Imbalanced learning;undersampling;\n oversampling;synthetic sample generation;\n clustering},\n doi={10.1109/TKDE.2012.232},\n ISSN={1041-4347},\n month={Feb}}\n\n Notes:\n * The original method was not prepared for the case of having clusters\n of 1 elements.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n k1=5,\n k2=5,\n k3=5,\n M=10,\n cf_th=5.0,\n cmax=10.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k1 (int): parameter of the NearestNeighbors component\n k2 (int): parameter of the NearestNeighbors component\n k3 (int): parameter of the NearestNeighbors component\n M (int): number of clusters\n cf_th (float): cutoff threshold\n cmax (float): maximum closeness value\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(k1, 'k1', 1)\n self.check_greater_or_equal(k2, 'k2', 1)\n self.check_greater_or_equal(k3, 'k3', 1)\n self.check_greater_or_equal(M, 'M', 1)\n self.check_greater_or_equal(cf_th, 'cf_th', 0)\n self.check_greater_or_equal(cmax, 'cmax', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k1 = k1\n self.k2 = k2\n self.k3 = k3\n self.M = M\n self.cf_th = cf_th\n self.cmax = cmax\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k1': [5, 9],\n 'k2': [5, 9],\n 'k3': [5, 9],\n 'M': [4, 10],\n 'cf_th': [5.0],\n 'cmax': [10.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n minority = np.where(y == self.min_label)[0]\n\n # Step 1\n n_neighbors = min([len(X), self.k1 + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X)\n dist1, ind1 = nn.kneighbors(X)\n\n # Step 2\n arr = [i for i in minority if np.sum(y[ind1[i][1:]] == self.min_label)]\n filtered_minority = np.array(arr)\n\n if len(filtered_minority) == 0:\n _logger.info(self.__class__.__name__ + \": \" +\n \"filtered_minority array is empty\")\n return X.copy(), y.copy()\n\n # Step 3 - ind2 needs to be indexed by indices of the lengh of X_maj\n nn_maj = NearestNeighbors(n_neighbors=self.k2, n_jobs=self.n_jobs)\n nn_maj.fit(X_maj)\n dist2, ind2 = nn_maj.kneighbors(X[filtered_minority])\n\n # Step 4\n border_majority = np.unique(ind2.flatten())\n\n # Step 5 - ind3 needs to be indexed by indices of the length of X_min\n n_neighbors = min([self.k3, len(X_min)])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist3, ind3 = nn_min.kneighbors(X_maj[border_majority])\n\n # Step 6 - informative minority indexes X_min\n informative_minority = np.unique(ind3.flatten())\n\n def closeness_factor(y, x, cf_th=self.cf_th, cmax=self.cmax):\n \"\"\"\n Closeness factor according to the Eq (6)\n\n Args:\n y (np.array): training instance (border_majority)\n x (np.array): training instance (informative_minority)\n cf_th (float): cutoff threshold\n cmax (float): maximum values\n\n Returns:\n float: closeness factor\n \"\"\"\n d = np.linalg.norm(y - x)/len(y)\n if d == 0.0:\n d = 0.1\n if 1.0/d < cf_th:\n f = 1.0/d\n else:\n f = cf_th\n return f/cf_th*cmax\n\n # Steps 7 - 9\n _logger.info(self.__class__.__name__ + \": \" +\n 'computing closeness factors')\n closeness_factors = np.zeros(\n shape=(len(border_majority), len(informative_minority)))\n for i in range(len(border_majority)):\n bm_i = border_majority[i]\n for j in range(len(informative_minority)):\n im_j = informative_minority[j]\n closeness_factors[i, j] = closeness_factor(X_maj[bm_i],\n X_min[im_j])\n\n _logger.info(self.__class__.__name__ + \": \" +\n 'computing information weights')\n information_weights = np.zeros(\n shape=(len(border_majority), len(informative_minority)))\n for i in range(len(border_majority)):\n norm_factor = np.sum(closeness_factors[i, :])\n for j in range(len(informative_minority)):\n cf_ij = closeness_factors[i, j]\n information_weights[i, j] = cf_ij**2/norm_factor\n\n selection_weights = np.sum(information_weights, axis=0)\n selection_probabilities = selection_weights/np.sum(selection_weights)\n\n # Step 10\n _logger.info(self.__class__.__name__ + \": \" + 'do clustering')\n n_clusters = min([len(X_min), self.M])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n imin_labels = kmeans.labels_[informative_minority]\n\n clusters = [np.where(imin_labels == i)[0]\n for i in range(np.max(kmeans.labels_)+1)]\n\n # Step 11\n samples = []\n\n # Step 12\n for i in range(n_to_sample):\n random_index = self.random_state.choice(informative_minority,\n p=selection_probabilities)\n cluster_label = kmeans.labels_[random_index]\n cluster = clusters[cluster_label]\n random_index_in_cluster = self.random_state.choice(cluster)\n X_random = X_min[random_index]\n X_random_cluster = X_min[random_index_in_cluster]\n samples.append(self.sample_between_points(X_random,\n X_random_cluster))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k1': self.k1,\n 'k2': self.k2,\n 'k3': self.k3,\n 'M': self.M,\n 'cf_th': self.cf_th,\n 'cmax': self.cmax,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass PDFOS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{pdfos,\n title = \"PDFOS: PDF estimation based over-sampling for\n imbalanced two-class problems\",\n journal = \"Neurocomputing\",\n volume = \"138\",\n pages = \"248 - 259\",\n year = \"2014\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2014.02.006\",\n author = \"Ming Gao and Xia Hong and Sheng Chen and Chris\n J. Harris and Emad Khalaf\",\n keywords = \"Imbalanced classification, Probability density\n function based over-sampling, Radial basis\n function classifier, Orthogonal forward\n selection, Particle swarm optimisation\"\n }\n\n Notes:\n * Not prepared for low-rank data.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_density_estimation]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def _sample_by_kernel_density_estimation(self,\n X,\n n_to_sample,\n n_optimize=100):\n \"\"\"\n Sample n_to_sample instances by kernel density estimation\n\n Args:\n X_min (np.array): minority data\n n_to_sample (int): number of instances to sample\n n_optimize (int): number of vectors used for the optimization\n process\n \"\"\"\n # dimensionality of the data\n m = len(X[0])\n\n # computing the covariance matrix of the data\n S = np.cov(X, rowvar=False)\n message = \"Condition number of covariance matrix: %f\"\n message = message % np.linalg.cond(S)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n message = \"Inputs size: %d\" % len(X)\n _logger.info(self.__class__.__name__ + \": \" + message)\n _logger.info(self.__class__.__name__ + \": \" + \"Input dim: %d\" % m)\n\n S_mrank = np.linalg.matrix_rank(S, tol=1e-2)\n message = \"Matrix rank of covariance matrix: %d\" % S_mrank\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # checking the rank of the matrix\n if S_mrank < m:\n message = \"The covariance matrix is singular, fixing it by PCA\"\n _logger.info(self.__class__.__name__ + \": \" + message)\n message = \"dim: %d, rank: %d, size: %d\" % (m, S_mrank, len(X))\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n n_components = max([min([S_mrank, len(X)])-1, 2])\n if n_components == len(X[0]):\n return X.copy()\n\n pca = PCA(n_components=n_components)\n X_low_dim = pca.fit_transform(X)\n X_samp = self._sample_by_kernel_density_estimation(\n X_low_dim, n_to_sample, n_optimize)\n return pca.inverse_transform(X_samp)\n\n S_inv = np.linalg.inv(S)\n det = np.linalg.det(S)\n\n _logger.info(self.__class__.__name__ + \": \" + \"Determinant: %f\" % det)\n\n def eq_9(i, j, sigma, X):\n \"\"\"\n Eq (9) in the paper\n \"\"\"\n tmp = np.dot(np.dot((X[j] - X[i]), S_inv), (X[j] - X[i]))\n numerator = (np.sqrt(2)*sigma)**(-m)*np.exp(-(1/(4*sigma**2))*tmp)\n denominator = ((2*np.pi)**(m/2))\n return numerator/denominator\n\n def eq_5(i, j, sigma, X):\n \"\"\"\n Eq (5) in the paper\n \"\"\"\n tmp = np.dot(np.dot((X[j] - X[i]), S_inv), (X[j] - X[i]))\n numerator = sigma**(-m)*np.exp(-(1/(2*sigma**2))*tmp)\n denominator = ((2.0*np.pi)**(m/2))\n return numerator/denominator\n\n def eq_5_0(sigma, X):\n \"\"\"\n Eq (5) with the same vectors feeded in\n \"\"\"\n return sigma**(-m)/((2.0*np.pi)**(m/2))\n\n def eq_8(i, j, sigma, X):\n \"\"\"\n Eq (8) in the paper\n \"\"\"\n e9 = eq_9(i, j, sigma, X)\n e5 = eq_5(i, j, sigma, X)\n return e9 - 2*e5\n\n def M(sigma, X):\n \"\"\"\n Eq (7) in the paper\n \"\"\"\n total = 0.0\n for i in range(len(X)):\n for j in range(len(X)):\n total = total + eq_8(i, j, sigma, X)\n\n a = total/len(X)**2\n b = 2.0*eq_5_0(sigma, X)/len(X)\n return a + b\n\n # finding the best sigma parameter\n best_sigma = 0\n error = np.inf\n # the dataset is reduced to make the optimization more efficient\n domain = range(len(X))\n n_to_choose = min([len(X), n_optimize])\n X_reduced = X[self.random_state.choice(domain,\n n_to_choose,\n replace=False)]\n\n # we suppose that the data is normalized, thus, this search space\n # should be meaningful\n for sigma in np.logspace(-5, 2, num=20):\n e = M(sigma, X_reduced)\n if e < error:\n error = e\n best_sigma = sigma\n _logger.info(self.__class__.__name__ + \": \" +\n \"best sigma found: %f\" % best_sigma)\n\n # generating samples according to the\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X))\n samples.append(self.random_state.multivariate_normal(\n X[idx], best_sigma*S))\n\n return np.vstack(samples)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # scaling the data to aid numerical stability\n ss = StandardScaler()\n X_ss = ss.fit_transform(X)\n\n X_min = X_ss[y == self.min_label]\n\n # generating samples by kernel density estimation\n samples = self._sample_by_kernel_density_estimation(X_min,\n n_to_sample,\n n_optimize=100)\n\n return (np.vstack([X, ss.inverse_transform(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass IPADE_ID(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ipade_id,\n title = \"Addressing imbalanced classification with\n instance generation techniques: IPADE-ID\",\n journal = \"Neurocomputing\",\n volume = \"126\",\n pages = \"15 - 28\",\n year = \"2014\",\n note = \"Recent trends in Intelligent Data Analysis Online\n Data Processing\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2013.01.050\",\n author = \"Victoria López and Isaac Triguero and Cristóbal\n J. Carmona and Salvador García and\n Francisco Herrera\",\n keywords = \"Differential evolution, Instance generation,\n Nearest neighbor, Decision tree, Imbalanced\n datasets\"\n }\n\n Notes:\n * According to the algorithm, if the addition of a majority sample\n doesn't improve the AUC during the DE optimization process,\n the addition of no further majority points is tried.\n * In the differential evolution the multiplication by a random number\n seems have a deteriorating effect, new scaling parameter added to\n fix this.\n * It is not specified how to do the evaluation.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n F=0.1,\n G=0.1,\n OT=20,\n max_it=40,\n dt_classifier=DecisionTreeClassifier(random_state=2),\n base_classifier=DecisionTreeClassifier(random_state=2),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n F (float): control parameter of differential evolution\n G (float): control parameter of the evolution\n OT (int): number of optimizations\n max_it (int): maximum number of iterations for DE_optimization\n dt_classifier (obj): decision tree classifier object\n base_classifier (obj): classifier object\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater(F, 'F', 0)\n self.check_greater(G, 'G', 0)\n self.check_greater(OT, 'OT', 0)\n self.check_greater(max_it, 'max_it', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.F = F\n self.G = G\n self.OT = OT\n self.max_it = max_it\n self.dt_classifier = dt_classifier\n self.base_classifier = base_classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the OT and max_it parameters control the discovery of the feature\n # space it is enough to try sufficiently large numbers\n dt_classifiers = [DecisionTreeClassifier(random_state=2)]\n base_classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'F': [0.1, 0.2],\n 'G': [0.1, 0.2],\n 'OT': [30],\n 'max_it': [40],\n 'dt_classifier': dt_classifiers,\n 'base_classifier': base_classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n mms = MinMaxScaler()\n X = mms.fit_transform(X)\n\n min_indices = np.where(y == self.min_label)[0]\n maj_indices = np.where(y == self.maj_label)[0]\n\n def DE_optimization(GS,\n GS_y,\n X,\n y,\n min_indices,\n maj_indices,\n classifier,\n for_validation):\n \"\"\"\n Implements the DE_optimization method of the paper.\n\n Args:\n GS (np.matrix): actual best training set\n GS_y (np.array): corresponding class labels\n X (np.matrix): complete training set\n y (np.array): all class labels\n min_indices (np.array): array of minority class labels in y\n maj_indices (np.array): array of majority class labels in y\n classifier (object): base classifier\n for_validation (np.array): array of indices for X used for\n validation\n\n Returns:\n np.matrix: optimized training set\n \"\"\"\n # evaluate training set\n AUC_GS = evaluate_ID(\n GS, GS_y, X[for_validation], y[for_validation], classifier)\n\n # optimizing the training set\n for _ in range(self.max_it):\n GS_hat = []\n # doing the differential evolution\n for i in range(len(GS)):\n if GS_y[i] == self.min_label:\n r1, r2, r3 = self.random_state.choice(min_indices,\n 3,\n replace=False)\n else:\n r1, r2, r3 = self.random_state.choice(maj_indices,\n 3,\n replace=False)\n\n random_value = self.random_state.random_sample()\n force_G = X[r1] - X[i]\n force_F = X[r2] - X[r3]\n value = GS[i] + self.G*random_value * \\\n force_G + self.F*force_F\n GS_hat.append(np.clip(value, 0.0, 1.0))\n\n # evaluating the current setting\n AUC_GS_hat = evaluate_ID(GS_hat,\n GS_y,\n X[for_validation],\n y[for_validation],\n classifier)\n\n if AUC_GS_hat > AUC_GS:\n GS = GS_hat\n AUC_GS = AUC_GS_hat\n\n return GS\n\n def evaluate_ID(GS, GS_y, TR, TR_y, base_classifier):\n \"\"\"\n Implements the evaluate_ID function of the paper.\n\n Args:\n GS (np.matrix): actual training set\n GS_y (np.array): list of corresponding class labels\n TR (np.matrix): complete training set\n TR_y (np.array): all class labels\n base_classifier (object): classifier to be used\n\n Returns:\n float: ROC AUC score\n \"\"\"\n base_classifier.fit(GS, GS_y)\n pred = base_classifier.predict_proba(TR)[:, np.where(\n base_classifier.classes_ == self.min_label)[0][0]]\n if len(np.unique(TR_y)) != 2:\n return 0.0\n return roc_auc_score(TR_y, pred)\n\n def evaluate_class(GS, GS_y, TR, TR_y, base_classifier):\n \"\"\"\n Implements the evaluate_ID function of the paper.\n\n Args:\n GS (np.matrix): actual training set\n GS_y (np.array): list of corresponding class labels\n TR (np.matrix): complete training set\n TR_y (np.array): all class labels\n base_classifier (object): classifier to be used\n\n Returns:\n float: accuracy score\n \"\"\"\n base_classifier.fit(GS, GS_y)\n pred = base_classifier.predict(TR)\n return accuracy_score(TR_y, pred)\n\n # Phase 1: Initialization\n _logger.info(self.__class__.__name__ + \": \" + \"Initialization\")\n self.dt_classifier.fit(X, y)\n leafs = self.dt_classifier.apply(X)\n unique_leafs = np.unique(leafs)\n used_in_GS = np.repeat(False, len(X))\n for_validation = np.where(np.logical_not(used_in_GS))[0]\n\n # extracting mean elements of the leafs\n GS = []\n GS_y = []\n for u in unique_leafs:\n indices = np.where(leafs == u)[0]\n GS.append(np.mean(X[indices], axis=0))\n GS_y.append(mode(y[indices]))\n if len(indices) == 1:\n used_in_GS[indices[0]] = True\n\n # updating the indices of the validation set excluding those used in GS\n for_validation = np.where(np.logical_not(used_in_GS))[0]\n _logger.info(self.__class__.__name__ + \": \" +\n \"Size of validation set %d\" % len(for_validation))\n if len(np.unique(y[for_validation])) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in validation set\")\n return X.copy(), y.copy()\n if len(np.unique(GS_y)) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in reduced dataset\")\n return X.copy(), y.copy()\n\n # DE optimization takes place\n _logger.info(self.__class__.__name__ + \": \" + \"DE optimization\")\n base_classifier = self.base_classifier.__class__(\n **(self.base_classifier.get_params()))\n GS = DE_optimization(GS, GS_y, X, y, min_indices,\n maj_indices, base_classifier, for_validation)\n # evaluate results\n base_classifier = self.base_classifier.__class__(\n **(self.base_classifier.get_params()))\n AUC = evaluate_ID(GS, GS_y, X[for_validation],\n y[for_validation], base_classifier)\n\n # Phase 2: Addition of new instances\n register_class = {self.min_label: 'optimizable',\n self.maj_label: 'optimizable'}\n number_of_optimizations = {self.min_label: 0,\n self.maj_label: 0}\n accuracy_class = {self.min_label: 0, self.maj_label: 0}\n\n _logger.info(self.__class__.__name__ + \": \" + \"Starting optimization\")\n while (AUC < 1.0\n and (register_class[self.min_label] == 'optimizable'\n or register_class[self.maj_label] == 'optimizable')):\n less_accuracy = np.inf\n # loop in line 8\n for i in [self.min_label, self.maj_label]:\n # condition in line 9\n if register_class[i] == 'optimizable':\n y_mask = y[for_validation] == i\n class_for_validation = for_validation[y_mask]\n bp = self.base_classifier.get_params()\n base_classifier = self.base_classifier.__class__(**(bp))\n accuracy_class[i] = evaluate_class(GS,\n GS_y,\n X[class_for_validation],\n y[class_for_validation],\n base_classifier)\n if accuracy_class[i] < less_accuracy:\n less_accuracy = accuracy_class[i]\n target_class = i\n # conditional in line 17\n if (target_class == self.min_label\n and number_of_optimizations[target_class] > 0):\n # it is not clear where does GS_trial coming from in line 18\n GS = DE_optimization(GS,\n GS_y,\n X,\n y,\n min_indices,\n maj_indices,\n base_classifier,\n for_validation)\n else:\n if target_class == self.min_label:\n idx = self.random_state.choice(min_indices)\n else:\n idx = self.random_state.choice(maj_indices)\n\n GS_trial = np.vstack([GS, X[idx]])\n GS_trial_y = np.hstack([GS_y, y[idx]])\n # removing idx from the validation set in order to keep\n # the validation fair\n for_validation_trial = for_validation.tolist()\n if idx in for_validation:\n for_validation_trial.remove(idx)\n\n for_validation_trial = np.array(\n for_validation_trial).astype(int)\n # doing optimization\n GS_trial = DE_optimization(GS_trial,\n GS_trial_y,\n X,\n y,\n min_indices,\n maj_indices,\n base_classifier,\n for_validation)\n\n # line 23\n bp = self.base_classifier.get_params()\n base_classifier = self.base_classifier.__class__(**(bp))\n\n AUC_trial = evaluate_ID(GS_trial,\n GS_trial_y,\n X[for_validation],\n y[for_validation],\n base_classifier)\n # conditional in line 24\n if AUC_trial > AUC:\n AUC = AUC_trial\n GS = GS_trial\n GS_y = GS_trial_y\n for_validation = for_validation_trial\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"Size of validation set %d\" % len(for_validation))\n if len(np.unique(y[for_validation])) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in validation set\")\n return X.copy(), y.copy()\n if len(np.unique(GS_y)) == 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"No minority samples in reduced dataset\")\n return X.copy(), y.copy()\n\n number_of_optimizations[target_class] = 0\n else:\n # conditional in line 29\n if (target_class == self.min_label\n and number_of_optimizations[target_class] < self.OT):\n number_of_optimizations[target_class] += 1\n else:\n register_class[target_class] = 'non-optimizable'\n\n return mms.inverse_transform(GS), GS_y\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'F': self.F,\n 'G': self.G,\n 'OT': self.OT,\n 'max_it': self.max_it,\n 'n_jobs': self.n_jobs,\n 'dt_classifier': self.dt_classifier,\n 'base_classifier': self.base_classifier,\n 'random_state': self._random_state_init}\n\n\nclass RWO_sampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{rwo_sampling,\n author = {Zhang, Huaxzhang and Li, Mingfang},\n year = {2014},\n month = {11},\n pages = {},\n title = {RWO-Sampling: A Random Walk Over-Sampling Approach\n to Imbalanced Data Classification},\n volume = {20},\n booktitle = {Information Fusion}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n stds = np.diag(np.std(X_min, axis=0)/np.sqrt(len(X_min)))\n\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(X_min))\n samples.append(self.random_state.multivariate_normal(X_min[idx],\n stds))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.array([self.min_label]*len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NEATER(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{neater,\n author={Almogahed, B. A. and Kakadiaris, I. A.},\n booktitle={2014 22nd International Conference on\n Pattern Recognition},\n title={NEATER: Filtering of Over-sampled Data\n Using Non-cooperative Game Theory},\n year={2014},\n volume={},\n number={},\n pages={1371-1376},\n keywords={data handling;game theory;information\n filtering;NEATER;imbalanced data\n problem;synthetic data;filtering of\n over-sampled data using non-cooperative\n game theory;Games;Game theory;Vectors;\n Sociology;Statistics;Silicon;\n Mathematical model},\n doi={10.1109/ICPR.2014.245},\n ISSN={1051-4651},\n month={Aug}}\n\n Notes:\n * Evolving both majority and minority probabilities as nothing ensures\n that the probabilities remain in the range [0,1], and they need to\n be normalized.\n * The inversely weighted function needs to be cut at some value (like\n the alpha level), otherwise it will overemphasize the utility of\n having differing neighbors next to each other.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n smote_n_neighbors=5,\n b=5,\n alpha=0.1,\n h=20,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n smote_n_neighbors (int): number of neighbors in SMOTE sampling\n b (int): number of neighbors\n alpha (float): smoothing term\n h (int): number of iterations in evolution\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(smote_n_neighbors, \"smote_n_neighbors\", 1)\n self.check_greater_or_equal(b, \"b\", 1)\n self.check_greater_or_equal(alpha, \"alpha\", 0)\n self.check_greater_or_equal(h, \"h\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.smote_n_neighbors = smote_n_neighbors\n self.b = b\n self.alpha = alpha\n self.h = h\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'smote_n_neighbors': [3, 5, 7],\n 'b': [3, 5, 7],\n 'alpha': [0.1],\n 'h': [20]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Applying SMOTE and ADASYN\n X_0, y_0 = SMOTE(proportion=self.proportion,\n n_neighbors=self.smote_n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n X_1, y_1 = ADASYN(n_neighbors=self.b,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n X_new = np.vstack([X_0, X_1[len(X):]])\n y_new = np.hstack([y_0, y_1[len(y):]])\n\n X_syn = X_new[len(X):]\n\n if len(X_syn) == 0:\n return X.copy(), y.copy()\n\n X_all = X_new\n y_all = y_new\n\n # binary indicator indicating synthetic instances\n synthetic = np.hstack(\n [np.array([False]*len(X)), np.array([True]*len(X_syn))])\n\n # initializing strategy probabilities\n prob = np.zeros(shape=(len(X_all), 2))\n prob.fill(0.5)\n for i in range(len(X)):\n if y[i] == self.min_label:\n prob[i, 0], prob[i, 1] = 0.0, 1.0\n else:\n prob[i, 0], prob[i, 1] = 1.0, 0.0\n\n # Finding nearest neighbors, +1 as X_syn is part of X_all and nearest\n # neighbors will be themselves\n nn = NearestNeighbors(n_neighbors=self.b + 1, n_jobs=self.n_jobs)\n nn.fit(X_all)\n distances, indices = nn.kneighbors(X_syn)\n\n # computing distances\n dm = pairwise_distances(X_syn, X_all)\n dm[dm == 0] = 1e-8\n dm = 1.0/dm\n dm[dm > self.alpha] = self.alpha\n\n def wprob_mixed(prob, i):\n ind = indices[i][1:]\n term_0 = 1*prob[i][0]*prob[ind, 0]\n term_1 = dm[i, ind]*(prob[i][1]*prob[ind, 0] +\n prob[i][0]*prob[ind, 1])\n term_2 = 1*prob[i][1]*prob[ind, 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def wprob_min(prob, i):\n term_0 = 0*prob[indices[i][1:], 0]\n term_1 = dm[i, indices[i][1:]]*(1*prob[indices[i][1:], 0] +\n 0*prob[indices[i][1:], 1])\n term_2 = 1*prob[indices[i][1:], 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def wprob_maj(prob, i):\n term_0 = 1*prob[indices[i][1:], 0]\n term_1 = dm[i, indices[i][1:]]*(0*prob[indices[i][1:], 0] +\n 1*prob[indices[i][1:], 1])\n term_2 = 0*prob[indices[i][1:], 1]\n return np.sum(term_0 + term_1 + term_2)\n\n def utilities(prob):\n \"\"\"\n Computes the utilit function\n\n Args:\n prob (np.matrix): strategy probabilities\n\n Returns:\n np.array, np.array, np.array: utility values, minority\n utilities, majority\n utilities\n \"\"\"\n\n domain = range(len(X_syn))\n util_mixed = np.array([wprob_mixed(prob, i) for i in domain])\n util_mixed = np.hstack([np.array([0]*len(X)), util_mixed])\n\n util_min = np.array([wprob_min(prob, i) for i in domain])\n util_min = np.hstack([np.array([0]*len(X)), util_min])\n\n util_maj = np.array([wprob_maj(prob, i) for i in domain])\n util_maj = np.hstack([np.array([0]*len(X)), util_maj])\n\n return util_mixed, util_min, util_maj\n\n def evolution(prob, synthetic, alpha=self.alpha):\n \"\"\"\n Executing one step of the probabilistic evolution\n\n Args:\n prob (np.matrix): strategy probabilities\n synthetic (np.array): flags of synthetic examples\n alpha (float): smoothing function\n\n Returns:\n np.matrix: updated probabilities\n \"\"\"\n util_mixed, util_min, util_maj = utilities(prob)\n\n prob_new = prob.copy()\n synthetic_values = prob[:, 1] * \\\n (alpha + util_min)/(alpha + util_mixed)\n prob_new[:, 1] = np.where(synthetic, synthetic_values, prob[:, 1])\n\n synthetic_values = prob[:, 0] * \\\n (alpha + util_maj)/(alpha + util_mixed)\n prob_new[:, 0] = np.where(synthetic, synthetic_values, prob[:, 0])\n\n norm_factor = np.sum(prob_new, axis=1)\n\n prob_new[:, 0] = prob_new[:, 0]/norm_factor\n prob_new[:, 1] = prob_new[:, 1]/norm_factor\n\n return prob_new\n\n # executing the evolution\n for _ in range(self.h):\n prob = evolution(prob, synthetic)\n\n # determining final labels\n y_all[len(X):] = np.argmax(prob[len(X):], axis=1)\n\n return X_all, y_all\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'smote_n_neighbors': self.smote_n_neighbors,\n 'b': self.b,\n 'alpha': self.alpha,\n 'h': self.h,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DEAGO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{deago,\n author={Bellinger, C. and Japkowicz, N. and\n Drummond, C.},\n booktitle={2015 IEEE 14th International\n Conference on Machine Learning\n and Applications (ICMLA)},\n title={Synthetic Oversampling for Advanced\n Radioactive Threat Detection},\n year={2015},\n volume={},\n number={},\n pages={948-953},\n keywords={radioactive waste;advanced radioactive\n threat detection;gamma-ray spectral\n classification;industrial nuclear\n facilities;Health Canadas national\n monitoring networks;Vancouver 2010;\n Isotopes;Training;Monitoring;\n Gamma-rays;Machine learning algorithms;\n Security;Neural networks;machine\n learning;classification;class\n imbalance;synthetic oversampling;\n artificial neural networks;\n autoencoders;gamma-ray spectra},\n doi={10.1109/ICMLA.2015.58},\n ISSN={},\n month={Dec}}\n\n Notes:\n * There is no hint on the activation functions and amounts of noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_density_estimation,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n e=100,\n h=0.3,\n sigma=0.1,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n e (int): number of epochs\n h (float): fraction of number of hidden units\n sigma (float): training noise\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(e, \"e\", 1)\n self.check_greater(h, \"h\", 0)\n self.check_greater(sigma, \"sigma\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.e = e\n self.h = h\n self.sigma = sigma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'e': [40],\n 'h': [0.1, 0.2, 0.3, 0.4, 0.5],\n 'sigma': [0.05, 0.1, 0.2]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # ugly hack to get reproducible results from keras with\n # tensorflow backend\n if isinstance(self._random_state_init, int):\n import os\n os.environ['PYTHONHASHSEED'] = str(self._random_state_init)\n import keras as K\n np.random.seed(self._random_state_init)\n import random\n random.seed(self._random_state_init)\n # from tensorflow import set_random_seed\n import tensorflow\n try:\n tensorflow.set_random_seed(self._random_state_init)\n except Exception as e:\n tensorflow.random.set_seed(self._random_state_init)\n else:\n seed = 127\n import os\n os.environ['PYTHONHASHSEED'] = str(seed)\n import keras as K\n np.random.seed(seed)\n import random\n random.seed(seed)\n # from tensorflow import set_random_seed\n import tensorflow\n try:\n tensorflow.compat.v1.set_random_seed(seed)\n except Exception as e:\n tensorflow.random.set_seed(self._random_state_init)\n\n from keras import backend as K\n import tensorflow as tf\n try:\n session_conf = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.compat.v1.Session(\n graph=tf.compat.v1.get_default_graph(), config=session_conf)\n K.set_session(sess)\n except Exception as e:\n session_conf = tf.compat.v1.ConfigProto(\n intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n sess = tf.compat.v1.Session(\n graph=tf.compat.v1.get_default_graph(), config=session_conf)\n tf.compat.v1.keras.backend.set_session(sess)\n\n if not hasattr(self, 'Input'):\n from keras.layers import Input, Dense, GaussianNoise\n from keras.models import Model\n from tensorflow.keras.callbacks import EarlyStopping\n\n self.Input = Input\n self.Dense = Dense\n self.GaussianNoise = GaussianNoise\n self.Model = Model\n self.EarlyStopping = EarlyStopping\n\n # sampling by smote\n X_samp, y_samp = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # samples to map to the manifold extracted by the autoencoder\n X_init = X_samp[len(X):]\n\n if len(X_init) == 0:\n return X.copy(), y.copy()\n\n # normalizing\n X_min = X[y == self.min_label]\n ss = StandardScaler()\n X_min_normalized = ss.fit_transform(X_min)\n X_init_normalized = ss.transform(X_init)\n\n # extracting dimensions\n d = len(X[0])\n encoding_d = max([2, int(np.rint(d*self.h))])\n\n message = \"Input dimension: %d, encoding dimension: %d\"\n message = message % (d, encoding_d)\n _logger.info(self.__class__.__name__ + \": \" + message\n )\n\n # constructing the autoencoder\n callbacks = [self.EarlyStopping(monitor='val_loss', patience=2)]\n\n input_layer = self.Input(shape=(d,))\n noise = self.GaussianNoise(self.sigma)(input_layer)\n encoded = self.Dense(encoding_d, activation='relu')(noise)\n decoded = self.Dense(d, activation='linear')(encoded)\n\n dae = self.Model(input_layer, decoded)\n dae.compile(optimizer='adadelta', loss='mean_squared_error')\n actual_epochs = max([self.e, int(5000.0/len(X_min))])\n\n if len(X_min) > 10:\n val_perc = 0.2\n val_num = int(val_perc*len(X_min))\n X_min_train = X_min_normalized[:-val_num]\n X_min_val = X_min_normalized[-val_num:]\n\n dae.fit(X_min_train,\n X_min_train,\n epochs=actual_epochs,\n validation_data=(X_min_val, X_min_val),\n callbacks=callbacks,\n verbose=0)\n else:\n dae.fit(X_min_normalized, X_min_normalized,\n epochs=actual_epochs, verbose=0)\n\n # mapping the initial samples to the manifold\n samples = ss.inverse_transform(dae.predict(X_init_normalized))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'e': self.e,\n 'h': self.h,\n 'sigma': self.sigma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Gazzah(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{gazzah,\n author={Gazzah, S. and Hechkel, A. and Essoukri\n Ben Amara, N. },\n booktitle={2015 IEEE 12th International\n Multi-Conference on Systems,\n Signals Devices (SSD15)},\n title={A hybrid sampling method for\n imbalanced data},\n year={2015},\n volume={},\n number={},\n pages={1-6},\n keywords={computer vision;image classification;\n learning (artificial intelligence);\n sampling methods;hybrid sampling\n method;imbalanced data;\n diversification;computer vision\n domain;classical machine learning\n systems;intraclass variations;\n system performances;classification\n accuracy;imbalanced training data;\n training data set;over-sampling;\n minority class;SMOTE star topology;\n feature vector deletion;intra-class\n variations;distribution criterion;\n biometric data;true positive rate;\n Training data;Principal component\n analysis;Databases;Support vector\n machines;Training;Feature extraction;\n Correlation;Imbalanced data sets;\n Intra-class variations;Data analysis;\n Principal component analysis;\n One-against-all SVM},\n doi={10.1109/SSD.2015.7348093},\n ISSN={},\n month={March}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_components=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_components (int): number of components in PCA analysis\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_components, \"n_components\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [2, 3, 4, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # do the oversampling\n pf_smote = polynom_fit_SMOTE(proportion=self.proportion,\n random_state=self.random_state)\n X_samp, y_samp = pf_smote.sample(X, y)\n X_min_samp = X_samp[len(X):]\n\n if len(X_min_samp) == 0:\n return X.copy(), y.copy()\n\n # do the undersampling\n X_maj = X[y == self.maj_label]\n\n # fitting the PCA model\n pca = PCA(n_components=min([len(X[0]), self.n_components]))\n X_maj_trans = pca.fit_transform(X_maj)\n R = np.sqrt(np.sum(np.var(X_maj_trans, axis=0)))\n # determining the majority samples to remove\n to_remove = np.where([np.linalg.norm(x) > R for x in X_maj_trans])[0]\n _logger.info(self.__class__.__name__ + \": \" +\n \"Removing %d majority samples\" % len(to_remove))\n # removing the majority samples\n X_maj = np.delete(X_maj, to_remove, axis=0)\n\n if len(X_min_samp) == 0:\n _logger.info(\"no samples added\")\n return X.copy(), y.copy()\n\n return (np.vstack([X_maj, X_min_samp]),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min_samp))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MCT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{mct,\n author = {Jiang, Liangxiao and Qiu, Chen and Li, Chaoqun},\n year = {2015},\n month = {03},\n pages = {1551004},\n title = {A Novel Minority Cloning Technique for\n Cost-Sensitive Learning},\n volume = {29},\n booktitle = {International Journal of Pattern Recognition\n and Artificial Intelligence}\n }\n\n Notes:\n * Mode is changed to median, distance is changed to Euclidean to\n support continuous features, and normalized.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_copy]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # having continuous variables, the mode is replaced by median\n x_med = np.median(X_min, axis=0)\n distances = np.array([np.linalg.norm(x_med - x) for x in X_min])\n sums = np.sum(distances)\n if sums != 0:\n distances = distances/sums\n\n # distribution of copies is determined (Euclidean distance is a\n # dissimilarity measure which is changed to similarity by subtracting\n # from 1.0)\n distribution = (1.0 - distances)/(np.sum(1.0 - distances))\n\n if any(np.isnan(distribution)):\n _logger.warning(self.__class__.__name__ + \": \" +\n \"NaN in the probability distribution\")\n return X.copy(), y.copy()\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n samples.append(X_min[self.random_state.choice(\n np.arange(len(X_min)), p=distribution)])\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ADG(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{adg,\n author = {Pourhabib, A. and Mallick, Bani K. and Ding, Yu},\n year = {2015},\n month = {16},\n pages = {2695--2724},\n title = {A Novel Minority Cloning Technique for\n Cost-Sensitive Learning},\n volume = {16},\n journal = {Journal of Machine Learning Research}\n }\n\n Notes:\n * This method has a lot of parameters, it becomes fairly hard to\n cross-validate thoroughly.\n * Fails if matrix is singular when computing alpha_star, fixed\n by PCA.\n * Singularity might be caused by repeating samples.\n * Maintaining the kernel matrix becomes unfeasible above a couple\n of thousand vectors.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n kernel='inner',\n lam=1.0,\n mu=1.0,\n k=12,\n gamma=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n kernel (str): 'inner'/'rbf_x', where x is a float, the bandwidth\n lam (float): lambda parameter of the method\n mu (float): mu parameter of the method\n k (int): number of samples to generate in each iteration\n gamma (float): gamma parameter of the method\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n\n if kernel != 'inner' and not kernel.startswith('rbf'):\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Kernel function %s not supported' % kernel)\n elif kernel.startswith('rbf'):\n par = float(kernel.split('_')[-1])\n if par <= 0.0:\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Kernel parameter %f is not supported' % par)\n\n self.check_greater(lam, 'lam', 0)\n self.check_greater(mu, 'mu', 0)\n self.check_greater_or_equal(k, 'k', 1)\n self.check_greater(gamma, 'gamma', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.kernel = kernel\n self.lam = lam\n self.mu = mu\n self.k = k\n self.gamma = gamma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'kernel': ['inner', 'rbf_0.5',\n 'rbf_1.0', 'rbf_2.0'],\n 'lam': [1.0, 2.0],\n 'mu': [1.0, 2.0],\n 'k': [12],\n 'gamma': [1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n def bic_score(kmeans, X):\n \"\"\"\n Compute BIC score for clustering\n\n Args:\n kmeans (sklearn.KMeans): kmeans object\n X (np.matrix): clustered data\n\n Returns:\n float: bic value\n\n Inspired by https://stats.stackexchange.com/questions/90769/using-bic-to-estimate-the-number-of-k-in-kmeans\n \"\"\" # noqa\n # extract descriptors of the clustering\n cluster_centers = kmeans.cluster_centers_\n cluster_labels = kmeans.labels_\n n_clusters = kmeans.n_clusters\n n_in_clusters = np.bincount(cluster_labels)\n N, d = X.shape\n\n # compute variance for all clusters beforehand\n\n def sum_norm_2(i):\n return np.sum(np.linalg.norm(X[cluster_labels == i] -\n cluster_centers[i])**2)\n\n cluster_variances = [sum_norm_2(i) for i in range(n_clusters)]\n term_0 = (1.0)/((N - n_clusters) * d)\n term_1 = np.sum(cluster_variances)\n clustering_variance = term_0 * term_1\n\n const_term = 0.5 * n_clusters * np.log(N) * (d+1)\n\n def bic_comp(i):\n term_0 = n_in_clusters[i] * np.log(n_in_clusters[i])\n term_1 = n_in_clusters[i] * np.log(N)\n term_2 = (((n_in_clusters[i] * d) / 2)\n * np.log(2*np.pi*clustering_variance))\n term_3 = ((n_in_clusters[i] - 1) * d / 2)\n\n return term_0 - term_1 - term_2 - term_3\n\n bic = np.sum([bic_comp(i) for i in range(n_clusters)]) - const_term\n\n return bic\n\n def xmeans(X, r=(1, 10)):\n \"\"\"\n Clustering with BIC based n_cluster selection\n\n Args:\n X (np.matrix): data to cluster\n r (tuple): lower and upper bound on the number of clusters\n\n Returns:\n sklearn.KMeans: clustering with lowest BIC score\n \"\"\"\n best_bic = np.inf\n best_clustering = None\n\n # do clustering for all n_clusters in the specified range\n for k in range(r[0], min([r[1], len(X)])):\n kmeans = KMeans(n_clusters=k,\n random_state=self.random_state).fit(X)\n\n bic = bic_score(kmeans, X)\n if bic < best_bic:\n best_bic = bic\n best_clustering = kmeans\n\n return best_clustering\n\n def xgmeans(X, r=(1, 10)):\n \"\"\"\n Gaussian mixture with BIC to select the optimal number\n of components\n\n Args:\n X (np.matrix): data to cluster\n r (tuple): lower and upper bound on the number of components\n\n Returns:\n sklearn.GaussianMixture: Gaussian mixture model with the\n lowest BIC score\n \"\"\"\n best_bic = np.inf\n best_mixture = None\n\n # do model fitting for all n_components in the specified range\n for k in range(r[0], min([r[1], len(X)])):\n gmm = GaussianMixture(\n n_components=k, random_state=self.random_state).fit(X)\n bic = gmm.bic(X)\n if bic < best_bic:\n best_bic = bic\n best_mixture = gmm\n\n return best_mixture\n\n def evaluate_matrices(X, y, kernel=np.inner):\n \"\"\"\n The function evaluates the matrices specified in the method.\n\n Args:\n X (np.matrix): features\n y (np.array): target labels\n kernel (function): the kernel function to be used\n\n Returns:\n np.matrix, np.matrix, int, int, np.matrix, np.array,\n np.matrix, np.matrix, np.matrix\n np.array, np.matrix, np.matrix, np.matrix, np.matrix:\n X_minux, X_plus, l_minus, l_plus, X, y, K, M_plus, M_minus,\n M, K_plus, K_minus, N_plus, n_minus using the notations of\n the paper, X and y are ordered by target labels\n \"\"\"\n X_minus = X[y == self.maj_label]\n X_plus = X[y == self.min_label]\n l_minus = len(X_minus)\n l_plus = len(X_plus)\n\n X = np.vstack([X_minus, X_plus])\n y = np.hstack([np.array([self.maj_label]*l_minus),\n np.array([self.min_label]*l_plus)])\n\n K = pairwise_distances(X, X, metric=kernel)\n M_plus = np.mean(K[:, len(X_minus):], axis=1)\n M_minus = np.mean(K[:, :len(X_minus)], axis=1)\n M = np.dot(M_minus - M_plus, M_minus - M_plus)\n\n K_minus = K[:, :len(X_minus)]\n K_plus = K[:, len(X_minus):]\n\n return (X_minus, X_plus, l_minus, l_plus, X, y, K,\n M_plus, M_minus, M, K_plus, K_minus)\n\n # Implementation of the technique, following the steps and notations\n # of the paper\n q = n_to_sample\n\n # instantiating the proper kernel function, the parameter of the RBF\n # is supposed to be the denominator in the Gaussian\n if self.kernel == 'inner':\n kernel_function = np.inner\n else:\n kf = self.kernel.split('_')\n if kf[0] == 'rbf':\n d = float(kf[1])\n def kernel_function(\n x, y): return np.exp(-np.linalg.norm(x - y)**2/d)\n\n # Initial evaluation of the matrices\n (X_minus, X_plus, l_minus, l_plus, X, y, K, M_plus, M_minus,\n M, K_plus, K_minus) = evaluate_matrices(X,\n y,\n kernel=kernel_function)\n # The computing of N matrix is factored into two steps, computing\n # N_plus and N_minus this is used to improve efficiency\n K_plus2 = np.dot(K_plus, K_plus.T)\n K_plus_sum = np.sum(K_plus, axis=1)\n K_plus_diad = np.outer(K_plus_sum, K_plus_sum)/l_plus\n\n K_minus2 = np.dot(K_minus, K_minus.T)\n K_minus_sum = np.sum(K_minus, axis=1)\n K_minus_diad = np.outer(K_minus_sum, K_minus_sum)/l_minus\n\n N = K_plus2 - K_plus_diad + K_minus2 - K_minus_diad\n\n X_plus_hat = X_plus.copy()\n l_minus = len(X_minus)\n\n early_stop = False\n total_added = 0\n # executing the sample generation\n while q > 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"Starting iteration with q=%d\" % q)\n # step 1\n clusters = xmeans(X_plus_hat)\n l_c = np.array([np.sum(clusters.labels_ == i)\n for i in range(clusters.n_clusters)])\n\n # step 2\n k_c = ((1.0/l_c)/(np.sum(1.0/l_c))*self.k).astype(int)\n k_c[k_c == 0] = 1\n lam_c, mu_c = self.lam/l_c, self.mu/l_c\n\n # step 3\n omega = - np.sum([k_c[i]*(lam_c[i])**2/(4*mu_c[i]**2)\n for i in range(len(k_c))])\n nu_c = - 0.5*k_c*lam_c\n M_plus_c = [np.mean(K[:, np.arange(len(X_minus), len(X))[\n clusters.labels_ == i]]) for i in range(len(k_c))]\n\n # step 4\n A = (M - self.gamma*N) - omega*K\n b = np.sum([(M_minus - M_plus_c[i])*nu_c[i]\n for i in range(len(k_c))], axis=0)\n try:\n alpha_star = np.linalg.solve(A, b)\n except Exception as e:\n # handling the issue of singular matrix\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Singular matrix\")\n # deleting huge data structures\n if q == n_to_sample:\n if len(X[0]) == 1:\n return None, None\n K, K_plus, K_minus = None, None, None\n n_components = int(np.sqrt(len(X[0])))\n pca = PCA(n_components=n_components).fit(X)\n\n message = \"reducing dimensionality to %d\" % n_components\n _logger.warning(self.__class__.__name__ + \": \" + message)\n X_trans = pca.transform(X)\n adg = ADG(proportion=self.proportion,\n kernel=self.kernel,\n lam=self.lam,\n mu=self.mu,\n k=self.k,\n gamma=self.gamma,\n random_state=self.random_state)\n X_samp, y_samp = adg.sample(X_trans, y)\n if X_samp is not None:\n return pca.inverse_transform(X_samp), y_samp\n else:\n return X.copy(), y.copy()\n else:\n q = int(q/2)\n continue\n\n # step 5\n mixture = xgmeans(X_plus)\n\n # step 6\n try:\n Z = mixture.sample(q)[0]\n except Exception as e:\n message = \"sampling error in sklearn.mixture.GaussianMixture\"\n _logger.warning(\n self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n # step 7\n # computing the kernel matrix of generated samples with all samples\n K_10 = pairwise_distances(Z, X, metric=kernel_function)\n mask_inner_prod = np.where(np.inner(K_10, alpha_star) > 0)[0]\n Z_hat = Z[mask_inner_prod]\n\n if len(Z_hat) == 0:\n q = int(q/2)\n continue\n\n _logger.info(self.__class__.__name__ + \": \" +\n \"number of vectors added: %d/%d\" % (len(Z_hat), q))\n\n # step 8\n # this step is not used for anything, the identified clusters are\n # only used in step 13 of the paper, however, the values set\n # (M_plus^c) are overwritten in step 3 of the next iteration\n\n # step 9\n X_plus_hat = np.vstack([X_plus_hat, Z_hat])\n l_plus = len(X_plus_hat)\n\n # step 11 - 16\n # these steps have been reorganized a bit for efficient\n # calculations\n\n pairwd = pairwise_distances(Z_hat, Z_hat, metric=kernel_function)\n K = np.block([[K, K_10[mask_inner_prod].T],\n [K_10[mask_inner_prod], pairwd]])\n\n K_minus = K[:, :l_minus]\n K_plus = K[:, l_minus:]\n\n # step 10\n X = np.vstack([X_minus, X_plus_hat])\n y = np.hstack([y, np.repeat(self.min_label, len(Z_hat))])\n\n if early_stop is True:\n break\n\n M_plus = np.mean(K_plus, axis=1)\n M_minus = np.mean(K_minus, axis=1)\n\n # step 13 is already involved in the core of the loop\n M = np.dot(M_minus - M_plus, M_minus - M_plus)\n\n l_new = len(Z_hat)\n total_added = total_added + l_new\n\n K_minus2_01 = np.dot(K_minus[:-l_new:], K_minus[-l_new:].T)\n K_minus2 = np.block([[K_minus2, K_minus2_01],\n [K_minus2_01.T, np.dot(K_minus[-l_new:],\n K_minus[-l_new:].T)]])\n K_minus_sum = M_minus*len(K_minus)\n\n K_plus2 = K_plus2 + np.dot(K_plus[:-l_new, l_new:],\n K_plus[:-l_new, l_new:].T)\n\n K_plus2_01 = np.dot(K_plus[:-l_new], K_plus[-l_new:].T)\n\n K_plus2 = np.block([[K_plus2, K_plus2_01],\n [K_plus2_01.T, np.dot(K_plus[-l_new:],\n K_plus[-l_new:].T)]])\n\n K_plus_sum = M_plus*len(K_plus)\n\n N = K_plus2 - np.outer(K_plus_sum/l_plus, K_plus_sum) + \\\n K_minus2 - np.outer(K_minus_sum/l_minus, K_minus_sum)\n\n # step 17\n if l_new/total_added < 0.01:\n early_stop = True\n else:\n q = int(q/2)\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'kernel': self.kernel,\n 'lam': self.lam,\n 'mu': self.mu,\n 'k': self.k,\n 'gamma': self.gamma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_IPF(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_ipf,\n title = \"SMOTE–IPF: Addressing the noisy and borderline\n examples problem in imbalanced\n classification by a re-sampling method\n with filtering\",\n journal = \"Information Sciences\",\n volume = \"291\",\n pages = \"184 - 203\",\n year = \"2015\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2014.08.051\",\n author = \"José A. Sáez and Julián Luengo and Jerzy\n Stefanowski and Francisco Herrera\",\n keywords = \"Imbalanced classification,\n Borderline examples,\n Noisy data,\n Noise filters,\n SMOTE\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_folds=9,\n k=3,\n p=0.01,\n voting='majority',\n classifier=DecisionTreeClassifier(random_state=2),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in SMOTE sampling\n n_folds (int): the number of partitions\n k (int): used in stopping condition\n p (float): percentage value ([0,1]) used in stopping condition\n voting (str): 'majority'/'consensus'\n classifier (obj): classifier object\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_folds, \"n_folds\", 2)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_greater_or_equal(p, \"p\", 0)\n self.check_isin(voting, \"voting\", ['majority', 'consensus'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_folds = n_folds\n self.k = k\n self.p = p\n self.voting = voting\n self.classifier = classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_folds': [9],\n 'k': [3],\n 'p': [0.01],\n 'voting': ['majority', 'consensus'],\n 'classifier': classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # do SMOTE sampling\n X_samp, y_samp = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n n_folds = min([self.n_folds, np.sum(y == self.min_label)])\n\n condition = 0\n while True:\n # validating the sampled dataset\n validator = StratifiedKFold(n_folds)\n predictions = []\n for train_index, _ in validator.split(X_samp, y_samp):\n self.classifier.fit(X_samp[train_index], y_samp[train_index])\n predictions.append(self.classifier.predict(X_samp))\n\n # do decision based on one of the voting schemes\n if self.voting == 'majority':\n pred_votes = (np.mean(predictions, axis=0) > 0.5).astype(int)\n to_remove = np.where(np.not_equal(pred_votes, y_samp))[0]\n elif self.voting == 'consensus':\n pred_votes = (np.mean(predictions, axis=0) > 0.5).astype(int)\n sum_votes = np.sum(predictions, axis=0)\n to_remove = np.where(np.logical_and(np.not_equal(\n pred_votes, y_samp), np.equal(sum_votes, self.n_folds)))[0]\n else:\n message = 'Voting scheme %s is not implemented' % self.voting\n raise ValueError(self.__class__.__name__ + \": \" + message)\n\n # delete samples incorrectly classified\n _logger.info(self.__class__.__name__ + \": \" +\n 'Removing %d elements' % len(to_remove))\n X_samp = np.delete(X_samp, to_remove, axis=0)\n y_samp = np.delete(y_samp, to_remove)\n\n # if the number of samples removed becomes small or k iterations\n # were done quit\n if len(to_remove) < len(X_samp)*self.p:\n condition = condition + 1\n else:\n condition = 0\n if condition >= self.k:\n break\n\n return X_samp, y_samp\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_folds': self.n_folds,\n 'k': self.k,\n 'p': self.p,\n 'voting': self.voting,\n 'n_jobs': self.n_jobs,\n 'classifier': self.classifier,\n 'random_state': self._random_state_init}\n\n\nclass KernelADASYN(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{kernel_adasyn,\n author={Tang, B. and He, H.},\n booktitle={2015 IEEE Congress on Evolutionary\n Computation (CEC)},\n title={KernelADASYN: Kernel based adaptive\n synthetic data generation for\n imbalanced learning},\n year={2015},\n volume={},\n number={},\n pages={664-671},\n keywords={learning (artificial intelligence);\n pattern classification;\n sampling methods;KernelADASYN;\n kernel based adaptive synthetic\n data generation;imbalanced\n learning;standard classification\n algorithms;data distribution;\n minority class decision rule;\n expensive minority class data\n misclassification;kernel based\n adaptive synthetic over-sampling\n approach;imbalanced data\n classification problems;kernel\n density estimation methods;Kernel;\n Estimation;Accuracy;Measurement;\n Standards;Training data;Sampling\n methods;Imbalanced learning;\n adaptive over-sampling;kernel\n density estimation;pattern\n recognition;medical and\n healthcare data learning},\n doi={10.1109/CEC.2015.7256954},\n ISSN={1089-778X},\n month={May}}\n\n Notes:\n * The method of sampling was not specified, Markov Chain Monte Carlo\n has been implemented.\n * Not prepared for improperly conditioned covariance matrix.\n \"\"\"\n\n categories = [OverSampling.cat_density_estimation,\n OverSampling.cat_extensive,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n k=5,\n h=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n k (int): number of neighbors in the nearest neighbors component\n h (float): kernel bandwidth\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, 'k', 1)\n self.check_greater(h, 'h', 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.h = h\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [5, 7, 9],\n 'h': [0.01, 0.02, 0.05, 0.1, 0.2,\n 0.5, 1.0, 2.0, 10.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting the nearest neighbors model\n nn = NearestNeighbors(n_neighbors=min([len(X_min), self.k+1]),\n n_jobs=self.n_jobs)\n nn.fit(X)\n distances, indices = nn.kneighbors(X_min)\n\n # computing majority score\n r = np.array([np.sum(y[indices[i][1:]] == self.maj_label)\n for i in range(len(X_min))])\n\n if np.sum(r > 0) < 2:\n message = (\"majority score is 0 for all or all but one \"\n \"minority samples\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n r = r/np.sum(r)\n\n # kernel density function\n def p_x(x):\n \"\"\"\n Returns minority density value at x\n\n Args:\n x (np.array): feature vector\n\n Returns:\n float: density value\n \"\"\"\n result = 1.0/(len(X_min)*self.h)\n result = result*(1.0/(np.sqrt(2*np.pi)*self.h)**len(X[0]))\n\n exp_term = np.exp(-0.5*np.linalg.norm(x - X_min, axis=1)**2/self.h)\n return result*np.inner(r, exp_term)\n\n samples = []\n it = 0\n\n # parameters of the Monte Carlo sampling\n burn_in = 1000\n periods = 50\n\n # covariance is used to generate a random sample in the neighborhood\n covariance = np.cov(X_min[r > 0], rowvar=False)\n\n if len(covariance) > 1 and np.linalg.cond(covariance) > 10000:\n message = (\"reducing dimensions due to inproperly conditioned\"\n \"covariance matrix\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if len(X[0]) <= 2:\n _logger.info(self.__class__.__name__ +\n \": \" + \"matrix ill-conditioned\")\n return X.copy(), y.copy()\n\n n_components = int(np.rint(len(covariance)/2))\n\n pca = PCA(n_components=n_components)\n X_trans = pca.fit_transform(X)\n\n ka = KernelADASYN(proportion=self.proportion,\n k=self.k,\n h=self.h,\n random_state=self.random_state)\n\n X_samp, y_samp = ka.sample(X_trans, y)\n return pca.inverse_transform(X_samp), y_samp\n\n # starting Markov-Chain Monte Carlo for sampling\n x_old = X_min[self.random_state.choice(np.where(r > 0)[0])]\n p_old = p_x(x_old)\n\n # Cholesky decomposition\n L = np.linalg.cholesky(covariance)\n\n while len(samples) < n_to_sample:\n x_new = x_old + \\\n np.dot(self.random_state.normal(size=len(x_old)), L)\n p_new = p_x(x_new)\n\n alpha = p_new/p_old\n u = self.random_state.random_sample()\n if u < alpha:\n x_old = x_new\n p_old = p_new\n else:\n pass\n\n it = it + 1\n if it % periods == 0 and it > burn_in:\n samples.append(x_old)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'h': self.h,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MOT2LD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{mot2ld,\n author=\"Xie, Zhipeng\n and Jiang, Liyang\n and Ye, Tengju\n and Li, Xiaoli\",\n editor=\"Renz, Matthias\n and Shahabi, Cyrus\n and Zhou, Xiaofang\n and Cheema, Muhammad Aamir\",\n title=\"A Synthetic Minority Oversampling Method\n Based on Local Densities in Low-Dimensional\n Space for Imbalanced Learning\",\n booktitle=\"Database Systems for Advanced\n Applications\",\n year=\"2015\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"3--18\",\n isbn=\"978-3-319-18123-3\"\n }\n\n Notes:\n * Clusters might contain 1 elements, and all points can be filtered\n as noise.\n * Clusters might contain 0 elements as well, if all points are filtered\n as noise.\n * The entire clustering can become empty.\n * TSNE is very slow when the number of instances is over a couple\n of 1000\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_components=2,\n k=5,\n d_cut='auto',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_components (int): number of components for stochastic\n neighborhood embedding\n k (int): number of neighbors in the nearest neighbor component\n d_cut (float/str): distance cut value/'auto' for automated\n selection\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_components, 'n_component', 1)\n self.check_greater_or_equal(k, 'k', 1)\n if isinstance(d_cut, float) or isinstance(d_cut, int):\n if d_cut <= 0:\n raise ValueError(self.__class__.__name__ +\n \": \" + 'Non-positive d_cut is not allowed')\n elif d_cut != 'auto':\n raise ValueError(self.__class__.__name__ + \": \" +\n 'd_cut value %s not implemented' % d_cut)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.k = k\n self.d_cut = d_cut\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [2],\n 'k': [3, 5, 7],\n 'd_cut': ['auto']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n _logger.info(self.__class__.__name__ + \": \" +\n (\"starting TSNE n: %d d: %d\" % (len(X), len(X[0]))))\n # do the stochastic embedding\n X_tsne = TSNE(self.n_components,\n random_state=self.random_state,\n perplexity=10,\n n_iter_without_progress=100,\n n_iter=500,\n verbose=3).fit_transform(X)\n X_min = X_tsne[y == self.min_label]\n _logger.info(self.__class__.__name__ + \": \" + \"TSNE finished\")\n\n # fitting nearest neighbors model for all training data\n n_neighbors = min([len(X_min), self.k + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_tsne)\n distances, indices = nn.kneighbors(X_min)\n\n if isinstance(self.d_cut, float):\n d_cut = self.d_cut\n elif self.d_cut == 'auto':\n d_cut = np.max(distances[:, 1])\n\n # fitting nearest neighbors model to the minority data\n nn_min = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n distances_min, indices_min = nn_min.kneighbors(X_min)\n\n def n_rad_neighbors(x):\n x = x.reshape(1, -1)\n return len(nn.radius_neighbors(x, d_cut, return_distance=False)[0])\n\n # extracting the number of neighbors in a given radius\n rho = np.array([n_rad_neighbors(x) for x in X_min])\n closest_highest = []\n delta = []\n\n # implementation of the density peak clustering algorithm\n # based on http://science.sciencemag.org/content/344/6191/1492.full\n for i in range(len(rho)):\n closest_neighbors = indices_min[i]\n closest_densities = rho[closest_neighbors]\n closest_highs = np.where(closest_densities > rho[i])[0]\n\n if len(closest_highs) > 0:\n closest_highest.append(closest_highs[0])\n delta.append(distances_min[i][closest_highs[0]])\n else:\n closest_highest.append(-1)\n delta.append(np.max(distances_min))\n\n to_sort = zip(rho, delta, np.arange(len(rho)))\n r, d, idx = zip(*sorted(to_sort, key=lambda x: x[0]))\n r, d, idx = np.array(r), np.array(d), np.array(idx)\n\n if len(d) < 3:\n return X.copy(), y.copy()\n\n widths = np.arange(1, int(len(r)/2))\n peak_indices = np.array(ssignal.find_peaks_cwt(d, widths=widths))\n\n if len(peak_indices) == 0:\n _logger.info(self.__class__.__name__ + \": \" + \"no peaks found\")\n return X.copy(), y.copy()\n\n cluster_center_indices = idx[peak_indices]\n cluster_centers = X_min[cluster_center_indices]\n\n # finding closest cluster center to minority points and deriving\n # cluster labels\n nn_cluster = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn_cluster.fit(cluster_centers)\n dist_cluster, ind_cluster = nn_cluster.kneighbors(X_min)\n cluster_labels = ind_cluster[:, 0]\n\n # computing local minority counts and determining noisy samples\n def n_min_y(i):\n return np.sum(y[indices[i][1:]] == self.min_label)\n\n local_minority_count = np.array(\n [n_min_y(i) for i in range(len(X_min))])\n\n noise = np.where(np.logical_or(rho == 1, local_minority_count == 0))[0]\n\n # determining importance scores\n importance = local_minority_count/rho\n prob = importance\n prob[noise] = 0.0\n prob = prob/np.sum(prob)\n\n # extracting cluster indices\n cluster_indices = [np.where(cluster_labels == i)[0]\n for i in range(np.max(cluster_labels) + 1)]\n # removing noise from clusters\n cluster_indices = [list(set(c).difference(set(noise)))\n for c in cluster_indices]\n\n # checking if clustering is empty\n empty_clustering = True\n for i in range(len(cluster_indices)):\n if len(cluster_indices[i]) > 0:\n empty_clustering = False\n\n if empty_clustering:\n _logger.info(self.__class__.__name__ + \": \" + \"Empty clustering\")\n return X.copy(), y.copy()\n\n cluster_sizes = np.array([len(c) for c in cluster_indices])\n cluster_indices_size_0 = np.where(cluster_sizes == 0)[0]\n for i in range(len(prob)):\n if cluster_labels[i] in cluster_indices_size_0:\n prob[i] = 0.0\n prob = prob/np.sum(prob)\n\n # carrying out the sampling\n X_min = X[y == self.min_label]\n samples = []\n while len(samples) < n_to_sample:\n # random sample according to the distribution computed\n random_idx = self.random_state.choice(np.arange(len(X_min)),\n p=prob)\n\n # cluster label of the random minority sample\n cluster_label = cluster_labels[random_idx]\n if cluster_label == -1:\n continue\n\n if len(cluster_indices[cluster_label]) == 0:\n continue\n elif len(cluster_indices[cluster_label]) == 1:\n # if the cluster has only 1 elements, it is repeated\n samples.append(X_min[random_idx])\n continue\n else:\n # otherwise a random cluster index is selected for sample\n # generation\n clus = cluster_indices[cluster_label]\n random_neigh_in_clus_idx = self.random_state.choice(clus)\n while random_idx == random_neigh_in_clus_idx:\n random_neigh_in_clus_idx = self.random_state.choice(clus)\n\n X_rand = X_min[random_idx]\n X_in_clus = X_min[random_neigh_in_clus_idx]\n samples.append(self.sample_between_points(X_rand, X_in_clus))\n\n return (np.vstack([np.delete(X, noise, axis=0), np.vstack(samples)]),\n np.hstack([np.delete(y, noise),\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'k': self.k,\n 'd_cut': self.d_cut,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass V_SYNTH(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{v_synth,\n author = {Young,Ii, William A. and Nykl, Scott L. and\n Weckman, Gary R. and Chelberg, David M.},\n title = {Using Voronoi Diagrams to Improve\n Classification Performances when Modeling\n Imbalanced Datasets},\n journal = {Neural Comput. Appl.},\n issue_date = {July 2015},\n volume = {26},\n number = {5},\n month = jul,\n year = {2015},\n issn = {0941-0643},\n pages = {1041--1054},\n numpages = {14},\n url = {http://dx.doi.org/10.1007/s00521-014-1780-0},\n doi = {10.1007/s00521-014-1780-0},\n acmid = {2790665},\n publisher = {Springer-Verlag},\n address = {London, UK, UK},\n keywords = {Data engineering, Data mining, Imbalanced\n datasets, Knowledge extraction,\n Numerical algorithms, Synthetic\n over-sampling},\n }\n\n Notes:\n * The proposed encompassing bounding box generation is incorrect.\n * Voronoi diagram generation in high dimensional spaces is instable\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_components=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_components (int): number of components for PCA\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_components, \"n_component\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_components = n_components\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_components': [3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # creating the bounding box\n mins = np.min(X, axis=0)\n maxs = np.max(X, axis=0)\n mins = mins - 0.1*np.abs(mins)\n maxs = maxs + 0.1*np.abs(maxs)\n\n dim = len(X[0])\n\n def random_min_maxs():\n return np.where(self.random_state.randint(0, 1, size=dim) == 0,\n mins,\n maxs)\n\n n_bounding_box = min([100, len(X[0])])\n bounding_box = [random_min_maxs() for i in range(n_bounding_box)]\n X_bb = np.vstack([X, bounding_box])\n\n # applying PCA to reduce the dimensionality of the data\n n_components = min([len(X[0]), self.n_components])\n pca = PCA(n_components=n_components)\n X_pca = pca.fit_transform(X_bb)\n y_pca = np.hstack([y, np.repeat(-1, len(bounding_box))])\n\n dm = pairwise_distances(X_pca)\n to_remove = []\n for i in range(len(dm)):\n for j in range(i+1, len(dm)):\n if dm[i, j] < 0.001:\n to_remove.append(i)\n X_pca = np.delete(X_pca, to_remove, axis=0)\n y_pca = np.delete(y_pca, to_remove)\n\n # doing the Voronoi tessellation\n voronoi = sspatial.Voronoi(X_pca)\n\n # extracting those ridge point pairs which are candidates for\n # generating an edge between two cells of different class labels\n candidate_face_generators = []\n for i, r in enumerate(voronoi.ridge_points):\n if r[0] < len(y) and r[1] < len(y) and not y[r[0]] == y[r[1]]:\n candidate_face_generators.append(i)\n\n if len(candidate_face_generators) == 0:\n return X.copy(), y.copy()\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n # randomly choosing a pair from the ridge point pairs of different\n # labels\n random_face = self.random_state.choice(candidate_face_generators)\n\n # extracting the vertices of the face between the points\n ridge_vertices = voronoi.ridge_vertices[random_face]\n face_vertices = voronoi.vertices[ridge_vertices]\n\n # creating a random vector for sampling the face (supposed to be\n # convex)\n w = self.random_state.random_sample(size=len(X_pca[0]))\n w = w/np.sum(w)\n\n # initiating a sample point on the face\n sample_point_on_face = np.zeros(len(X_pca[0]))\n for i in range(len(X_pca[0])):\n sample_point_on_face += w[i]*face_vertices[i]\n\n # finding the ridge point with the minority label\n if y[voronoi.ridge_points[random_face][0]] == self.min_label:\n h = voronoi.points[voronoi.ridge_points[random_face][0]]\n else:\n h = voronoi.points[voronoi.ridge_points[random_face][1]]\n\n # generating a point between the minority ridge point and the\n # random point on the face\n samples.append(self.sample_between_points(sample_point_on_face,\n h))\n\n return (np.vstack([X, pca.inverse_transform(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_components': self.n_components,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass OUPS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{oups,\n title = \"A priori synthetic over-sampling methods for\n increasing classification sensitivity in\n imbalanced data sets\",\n journal = \"Expert Systems with Applications\",\n volume = \"66\",\n pages = \"124 - 135\",\n year = \"2016\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2016.09.010\",\n author = \"William A. Rivera and Petros Xanthopoulos\",\n keywords = \"SMOTE, OUPS, Class imbalance,\n Classification\"\n }\n\n Notes:\n * In the description of the algorithm a fractional number p (j) is\n used to index a vector.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if self.class_stats[self.min_label] < 2:\n message = (\"The number of minority samples (%d) is not enough for\"\n \" sampling\")\n message = message % self.class_stats[self.min_label]\n _logger.warning(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # extracting propensity scores\n lr = LogisticRegression(solver='lbfgs',\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n lr.fit(X, y)\n propensity = lr.predict_proba(X)\n propensity = propensity[:, np.where(\n lr.classes_ == self.min_label)[0][0]]\n\n # sorting indices according to propensity scores\n prop_sorted = sorted(zip(propensity, np.arange(\n len(propensity))), key=lambda x: -x[0])\n\n p = np.sum(y == self.maj_label)/np.sum(y == self.min_label)\n n = 0\n samples = []\n # implementing Algorithm 1 in the cited paper with some minor changes\n # to enable the proper sampling of p numbers\n while n < len(propensity) and len(samples) < n_to_sample:\n if (y[prop_sorted[n][1]] == self.min_label\n and n < len(propensity) - 1):\n num = 1\n p_tmp = p\n while p_tmp > 0 and n + num < len(propensity):\n if self.random_state.random_sample() < p_tmp:\n samples.append(self.sample_between_points(\n X[prop_sorted[n][1]], X[prop_sorted[n+num][1]]))\n p_tmp = p_tmp - 1\n num = num + 1\n n = n + 1\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_D(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{smote_d,\n author=\"Torres, Fredy Rodr{\\'i}guez\n and Carrasco-Ochoa, Jes{\\'u}s A.\n and Mart{\\'i}nez-Trinidad, Jos{\\'e} Fco.\",\n editor=\"Mart{\\'i}nez-Trinidad, Jos{\\'e} Francisco\n and Carrasco-Ochoa, Jes{\\'u}s Ariel\n and Ayala Ramirez, Victor\n and Olvera-L{\\'o}pez, Jos{\\'e} Arturo\n and Jiang, Xiaoyi\",\n title=\"SMOTE-D a Deterministic Version of SMOTE\",\n booktitle=\"Pattern Recognition\",\n year=\"2016\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"177--188\",\n isbn=\"978-3-319-39393-3\"\n }\n\n Notes:\n * Copying happens if two points are the neighbors of each other.\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, k=3, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k (int): number of neighbors in nearest neighbors component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.k+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting standard deviations of distances\n stds = np.std(dist[:, 1:], axis=1)\n\n # estimating sampling density\n if np.sum(stds) > 0:\n p_i = stds/np.sum(stds)\n else:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"zero distribution\")\n return X.copy(), y.copy()\n\n # the other component of sampling density\n p_ij = dist[:, 1:]/np.sum(dist[:, 1:], axis=1)[:, None]\n\n # number of samples to generate between minority points\n counts_ij = n_to_sample*p_i[:, None]*p_ij\n\n # do the sampling\n samples = []\n for i in range(len(p_i)):\n for j in range(min([len(X_min)-1, self.k])):\n while counts_ij[i][j] > 0:\n if self.random_state.random_sample() < counts_ij[i][j]:\n translation = X_min[ind[i][j+1]] - X_min[i]\n weight = counts_ij[i][j] + 1\n samples.append(\n X_min[i] + translation/counts_ij[i][j]+1)\n counts_ij[i][j] = counts_ij[i][j] - 1\n\n if len(samples) > 0:\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n else:\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_PSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_pso,\n title = \"PSO-based method for SVM classification on\n skewed data sets\",\n journal = \"Neurocomputing\",\n volume = \"228\",\n pages = \"187 - 197\",\n year = \"2017\",\n note = \"Advanced Intelligent Computing: Theory and\n Applications\",\n issn = \"0925-2312\",\n doi = \"https://doi.org/10.1016/j.neucom.2016.10.041\",\n author = \"Jair Cervantes and Farid Garcia-Lamont and\n Lisbeth Rodriguez and Asdrúbal López and\n José Ruiz Castilla and Adrian Trueba\",\n keywords = \"Skew data sets, SVM, Hybrid algorithms\"\n }\n\n Notes:\n * I find the description of the technique a bit confusing, especially\n on the bounds of the search space of velocities and positions.\n Equations 15 and 16 specify the lower and upper bounds, the lower\n bound is in fact a vector while the upper bound is a distance.\n I tried to implement something meaningful.\n * I also find the setting of accelerating constant 2.0 strange, most\n of the time the velocity will be bounded due to this choice.\n * Also, training and predicting probabilities with a non-linear\n SVM as the evaluation function becomes fairly expensive when the\n number of training vectors reaches a couple of thousands. To\n reduce computational burden, minority and majority vectors far\n from the other class are removed to reduce the size of both\n classes to a maximum of 500 samples. Generally, this shouldn't\n really affect the results as the technique focuses on the samples\n near the class boundaries.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n k=3,\n eps=0.05,\n n_pop=10,\n w=1.0,\n c1=2.0,\n c2=2.0,\n num_it=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n k (int): number of neighbors in nearest neighbors component, this\n is also the multiplication factor of minority support\n vectors\n eps (float): use to specify the initially generated support\n vectors along minority-majority lines\n n_pop (int): size of population\n w (float): intertia constant\n c1 (float): acceleration constant of local optimum\n c2 (float): acceleration constant of population optimum\n num_it (int): number of iterations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_greater(eps, \"eps\", 0)\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_greater_or_equal(w, \"w\", 0)\n self.check_greater_or_equal(c1, \"c1\", 0)\n self.check_greater_or_equal(c2, \"c2\", 0)\n self.check_greater_or_equal(num_it, \"num_it\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.k = k\n self.eps = eps\n self.n_pop = n_pop\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.num_it = num_it\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({'k': [3, 5, 7],\n 'eps': [0.05],\n 'n_pop': [5],\n 'w': [0.5, 1.0],\n 'c1': [1.0, 2.0],\n 'c2': [1.0, 2.0],\n 'num_it': [5]}, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n # saving original dataset\n X_orig = X\n y_orig = y\n\n # scaling the records\n mms = MinMaxScaler()\n X_scaled = mms.fit_transform(X)\n\n # removing majority and minority samples far from the training data if\n # needed to increase performance\n performance_threshold = 500\n\n n_maj_to_remove = np.sum(\n y == self.maj_label) - performance_threshold\n if n_maj_to_remove > 0:\n # if majority samples are to be removed\n nn = NearestNeighbors(n_neighbors=1,\n n_jobs=self.n_jobs)\n nn.fit(X_scaled[y == self.min_label])\n dist, ind = nn.kneighbors(X_scaled)\n di = sorted([(dist[i][0], i)\n for i in range(len(ind))], key=lambda x: x[0])\n to_remove = []\n # finding the proper number of samples farest from the minority\n # samples\n for i in reversed(range(len(di))):\n if y[di[i][1]] == self.maj_label:\n to_remove.append(di[i][1])\n if len(to_remove) >= n_maj_to_remove:\n break\n # removing the samples\n X_scaled = np.delete(X_scaled, to_remove, axis=0)\n y = np.delete(y, to_remove)\n\n n_min_to_remove = np.sum(\n y == self.min_label) - performance_threshold\n if n_min_to_remove > 0:\n # if majority samples are to be removed\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_scaled[y == self.maj_label])\n dist, ind = nn.kneighbors(X_scaled)\n di = sorted([(dist[i][0], i)\n for i in range(len(ind))], key=lambda x: x[0])\n to_remove = []\n # finding the proper number of samples farest from the minority\n # samples\n for i in reversed(range(len(di))):\n if y[di[i][1]] == self.min_label:\n to_remove.append(di[i][1])\n if len(to_remove) >= n_min_to_remove:\n break\n # removing the samples\n X_scaled = np.delete(X_scaled, to_remove, axis=0)\n y = np.delete(y, to_remove)\n\n # fitting SVM to extract initial support vectors\n svc = SVC(kernel='rbf', probability=True,\n gamma='auto', random_state=self.random_state)\n svc.fit(X_scaled, y)\n\n # extracting the support vectors\n SV_min = np.array(\n [i for i in svc.support_ if y[i] == self.min_label])\n SV_maj = np.array(\n [i for i in svc.support_ if y[i] == self.maj_label])\n\n X_SV_min = X_scaled[SV_min]\n X_SV_maj = X_scaled[SV_maj]\n\n # finding nearest majority support vectors\n n_neighbors = min([len(X_SV_maj), self.k])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_SV_maj)\n dist, ind = nn.kneighbors(X_SV_min)\n\n # finding the initial particle and specifying the search space\n X_min_gen = []\n search_space = []\n init_velocity = []\n for i in range(len(SV_min)):\n for j in range(min([len(X_SV_maj), self.k])):\n min_vector = X_SV_min[i]\n maj_vector = X_SV_maj[ind[i][j]]\n # the upper bound of the search space if specified by the\n # closest majority support vector\n upper_bound = X_SV_maj[ind[i][0]]\n # the third element of the search space specification is\n # the distance of the vector and the closest\n # majority support vector, which specifies the radius of\n # the search\n norms = np.linalg.norm(min_vector - upper_bound)\n search_space.append([min_vector, maj_vector, norms])\n # initial particles\n X_min_gen.append(min_vector + self.eps *\n (maj_vector - min_vector))\n # initial velocities\n init_velocity.append(self.eps*(maj_vector - min_vector))\n\n X_min_gen = np.vstack(X_min_gen)\n init_velocity = np.vstack(init_velocity)\n\n # evaluates a specific particle\n def evaluate(X_train, y_train, X_test, y_test):\n \"\"\"\n Trains support vector classifier and evaluates it\n\n Args:\n X_train (np.matrix): training vectors\n y_train (np.array): target labels\n X_test (np.matrix): test vectors\n y_test (np.array): test labels\n \"\"\"\n svc.fit(X_train, y_train)\n y_pred = svc.predict_proba(X_test)[:, np.where(\n svc.classes_ == self.min_label)[0][0]]\n return roc_auc_score(y_test, y_pred)\n\n # initializing the particle swarm and the particle and population level\n # memory\n particle_swarm = [X_min_gen.copy() for _ in range(self.n_pop)]\n velocities = [init_velocity.copy() for _ in range(self.n_pop)]\n local_best = [X_min_gen.copy() for _ in range(self.n_pop)]\n local_best_scores = [0.0]*self.n_pop\n global_best = X_min_gen.copy()\n global_best_score = 0.0\n\n def evaluate_particle(X_scaled, p, y):\n X_extended = np.vstack([X_scaled, p])\n y_extended = np.hstack([y, np.repeat(self.min_label, len(p))])\n return evaluate(X_extended, y_extended, X_scaled, y)\n\n for i in range(self.num_it):\n _logger.info(self.__class__.__name__ + \": \" + \"Iteration %d\" % i)\n # evaluate population\n scores = [evaluate_particle(X_scaled, p, y)\n for p in particle_swarm]\n\n # update best scores\n for i, s in enumerate(scores):\n if s > local_best_scores[i]:\n local_best_scores[i] = s\n local_best[i] = particle_swarm[i]\n if s > global_best_score:\n global_best_score = s\n global_best = particle_swarm[i]\n\n # update velocities\n for i, p in enumerate(particle_swarm):\n term_0 = self.w*velocities[i]\n random_1 = self.random_state.random_sample()\n random_2 = self.random_state.random_sample()\n term_1 = self.c1*random_1*(local_best[i] - p)\n term_2 = self.c2*random_2*(global_best - p)\n\n velocities[i] = term_0 + term_1 + term_2\n\n # bound velocities according to search space constraints\n for v in velocities:\n for i in range(len(v)):\n v_i_norm = np.linalg.norm(v[i])\n if v_i_norm > search_space[i][2]/2.0:\n v[i] = v[i]/v_i_norm*search_space[i][2]/2.0\n\n # update positions\n for i, p in enumerate(particle_swarm):\n particle_swarm[i] = particle_swarm[i] + velocities[i]\n\n # bound positions according to search space constraints\n for p in particle_swarm:\n for i in range(len(p)):\n ss = search_space[i]\n\n trans_vector = p[i] - ss[0]\n trans_norm = np.linalg.norm(trans_vector)\n normed_trans = trans_vector/trans_norm\n\n if trans_norm > ss[2]:\n p[i] = ss[0] + normed_trans*ss[2]\n\n X_ret = np.vstack([X_orig, mms.inverse_transform(global_best)])\n y_ret = np.hstack(\n [y_orig, np.repeat(self.min_label, len(global_best))])\n\n return (X_ret, y_ret)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'k': self.k,\n 'eps': self.eps,\n 'n_pop': self.n_pop,\n 'w': self.w,\n 'c1': self.c1,\n 'c2': self.c2,\n 'num_it': self.num_it,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CURE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{cure_smote,\n author=\"Ma, Li\n and Fan, Suohai\",\n title=\"CURE-SMOTE algorithm and hybrid algorithm for\n feature selection and parameter optimization\n based on random forests\",\n journal=\"BMC Bioinformatics\",\n year=\"2017\",\n month=\"Mar\",\n day=\"14\",\n volume=\"18\",\n number=\"1\",\n pages=\"169\",\n issn=\"1471-2105\",\n doi=\"10.1186/s12859-017-1578-z\",\n url=\"https://doi.org/10.1186/s12859-017-1578-z\"\n }\n\n Notes:\n * It is not specified how to determine the cluster with the\n \"slowest growth rate\"\n * All clusters can be removed as noise.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_clusters=5,\n noise_th=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_clusters (int): number of clusters to generate\n noise_th (int): below this number of elements the cluster is\n considered as noise\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 1)\n self.check_greater_or_equal(noise_th, \"noise_th\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_clusters = n_clusters\n self.noise_th = noise_th\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_clusters': [5, 10, 15],\n 'noise_th': [1, 3]}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data\n mms = MinMaxScaler()\n X_scaled = mms.fit_transform(X)\n\n X_min = X_scaled[y == self.min_label]\n\n # initiating clustering\n clusters = [np.array([i]) for i in range(len(X_min))]\n dm = pairwise_distances(X_min)\n\n # setting the diagonal of the distance matrix to infinity\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # starting the clustering iteration\n iteration = 0\n while len(clusters) > self.n_clusters:\n iteration = iteration + 1\n\n # delete a cluster with slowest growth rate, determined by\n # the cluster size\n if iteration % self.n_clusters == 0:\n # extracting cluster sizes\n cluster_sizes = np.array([len(c) for c in clusters])\n # removing one of the clusters with the smallest size\n to_remove = np.where(cluster_sizes == np.min(cluster_sizes))[0]\n to_remove = self.random_state.choice(to_remove)\n del clusters[to_remove]\n # adjusting the distance matrix accordingly\n dm = np.delete(dm, to_remove, axis=0)\n dm = np.delete(dm, to_remove, axis=1)\n\n # finding the cluster pair with the smallest distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # merging the clusters\n clusters[merge_a] = np.hstack(\n [clusters[merge_a], clusters[merge_b]])\n # removing one of them\n del clusters[merge_b]\n # adjusting the distances in the distance matrix\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]), axis=0)\n dm[:, merge_a] = dm[merge_a]\n # removing the row and column corresponding to one of\n # the merged clusters\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # updating the diagonal\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # removing clusters declared as noise\n to_remove = []\n for i in range(len(clusters)):\n if len(clusters[i]) < self.noise_th:\n to_remove.append(i)\n clusters = [clusters[i]\n for i in range(len(clusters)) if i not in to_remove]\n\n # all clusters can be noise\n if len(clusters) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"all clusters removed as noise\")\n return X.copy(), y.copy()\n\n # generating samples\n samples = []\n for _ in range(n_to_sample):\n cluster_idx = self.random_state.randint(len(clusters))\n center = np.mean(X_min[clusters[cluster_idx]], axis=0)\n representative = X_min[self.random_state.choice(\n clusters[cluster_idx])]\n samples.append(self.sample_between_points(center, representative))\n\n return (np.vstack([X, mms.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_clusters': self.n_clusters,\n 'noise_th': self.noise_th,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SOMO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{somo,\n title = \"Self-Organizing Map Oversampling (SOMO) for\n imbalanced data set learning\",\n journal = \"Expert Systems with Applications\",\n volume = \"82\",\n pages = \"40 - 52\",\n year = \"2017\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2017.03.073\",\n author = \"Georgios Douzas and Fernando Bacao\"\n }\n\n Notes:\n * It is not specified how to handle those cases when a cluster contains\n 1 minority samples, the mean of within-cluster distances is set to\n 100 in these cases.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_grid=10,\n sigma=0.2,\n learning_rate=0.5,\n n_iter=100,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_grid (int): size of grid\n sigma (float): sigma of SOM\n learning_rate (float) learning rate of SOM\n n_iter (int): number of iterations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, 'proportion', 0)\n self.check_greater_or_equal(n_grid, 'n_grid', 2)\n self.check_greater(sigma, 'sigma', 0)\n self.check_greater(learning_rate, 'learning_rate', 0)\n self.check_greater_or_equal(n_iter, 'n_iter', 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_grid = n_grid\n self.sigma = sigma\n self.learning_rate = learning_rate\n self.n_iter = n_iter\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_grid': [5, 9, 13],\n 'sigma': [0.4],\n 'learning_rate': [0.3, 0.5],\n 'n_iter': [100]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n N_inter = n_to_sample/2\n N_intra = n_to_sample/2\n\n import minisom\n\n # training SOM\n som = minisom.MiniSom(self.n_grid,\n self.n_grid,\n len(X[0]),\n sigma=self.sigma,\n learning_rate=self.learning_rate,\n random_seed=3)\n som.train_random(X, self.n_iter)\n\n # constructing the grid\n grid_min = {}\n grid_maj = {}\n for i in range(len(y)):\n tmp = som.winner(X[i])\n idx = (tmp[0], tmp[1])\n if idx not in grid_min:\n grid_min[idx] = []\n if idx not in grid_maj:\n grid_maj[idx] = []\n if y[i] == self.min_label:\n grid_min[idx].append(i)\n else:\n grid_maj[idx].append(i)\n\n # converting the grid to arrays\n for i in grid_min:\n grid_min[i] = np.array(grid_min[i])\n for i in grid_maj:\n grid_maj[i] = np.array(grid_maj[i])\n\n # filtering\n filtered = {}\n for i in grid_min:\n if i not in grid_maj:\n filtered[i] = True\n else:\n filtered[i] = (len(grid_maj[i]) + 1)/(len(grid_min[i])+1) < 1.0\n\n # computing densities\n densities = {}\n for i in filtered:\n if filtered[i]:\n if len(grid_min[i]) > 1:\n paird = pairwise_distances(X[grid_min[i]])\n densities[i] = len(grid_min[i])/np.mean(paird)**2\n else:\n densities[i] = 10\n\n # all clusters can be filtered\n if len(densities) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"all clusters filtered\")\n return X.copy(), y.copy()\n\n # computing neighbour densities, using 4 neighborhood\n neighbors = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n pair_densities = {}\n for i in densities:\n for n in neighbors:\n j = (i[0] + n[0], i[1] + n[1]),\n if j in densities:\n pair_densities[(i, j)] = densities[i] + densities[j]\n\n # computing weights\n density_keys = list(densities.keys())\n density_vals = np.array(list(densities.values()))\n\n # determining pair keys and density values\n pair_keys = list(pair_densities.keys())\n pair_vals = np.array(list(pair_densities.values()))\n\n # determining densities\n density_vals = (1.0/density_vals)/np.sum(1.0/density_vals)\n pair_dens_vals = (1.0/pair_vals)/np.sum(1.0/pair_vals)\n\n # computing num of samples to generate\n if len(pair_vals) > 0:\n dens_num = N_intra\n pair_num = N_inter\n else:\n dens_num = N_inter + N_intra\n pair_num = 0\n\n # generating the samples according to the extracted distributions\n samples = []\n while len(samples) < dens_num:\n cluster_idx = density_keys[self.random_state.choice(\n np.arange(len(density_keys)), p=density_vals)]\n cluster = grid_min[cluster_idx]\n sample_a, sample_b = self.random_state.choice(cluster, 2)\n samples.append(self.sample_between_points(\n X[sample_a], X[sample_b]))\n\n while len(samples) < pair_num:\n idx = pair_keys[self.random_state.choice(\n np.arange(len(pair_keys)), p=pair_dens_vals)]\n cluster_a = grid_min[idx[0]]\n cluster_b = grid_min[idx[1]]\n X_a = X[self.random_state.choice(cluster_a)]\n X_b = X[self.random_state.choice(cluster_b)]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_grid': self.n_grid,\n 'sigma': self.sigma,\n 'learning_rate': self.learning_rate,\n 'n_iter': self.n_iter,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ISOMAP_Hybrid(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{isomap_hybrid,\n author = {Gu, Qiong and Cai, Zhihua and Zhu, Li},\n title = {Classification of Imbalanced Data Sets by\n Using the Hybrid Re-sampling Algorithm\n Based on Isomap},\n booktitle = {Proceedings of the 4th International\n Symposium on Advances in\n Computation and Intelligence},\n series = {ISICA '09},\n year = {2009},\n isbn = {978-3-642-04842-5},\n location = {Huangshi, China},\n pages = {287--296},\n numpages = {10},\n doi = {10.1007/978-3-642-04843-2_31},\n acmid = {1691478},\n publisher = {Springer-Verlag},\n address = {Berlin, Heidelberg},\n keywords = {Imbalanced data set, Isomap, NCR,\n Smote, re-sampling},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_components=3,\n smote_n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_components (int): number of components\n smote_n_neighbors (int): number of neighbors in SMOTE sampling\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_components, \"n_components\", 1)\n self.check_greater_or_equal(smote_n_neighbors, \"smote_n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_components = n_components\n self.smote_n_neighbors = smote_n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_components': [2, 3, 4],\n 'smote_n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n self.isomap = Isomap(n_neighbors=self.n_neighbors,\n n_components=self.n_components,\n n_jobs=self.n_jobs)\n\n X_trans = self.isomap.fit_transform(X, y)\n\n X_sm, y_sm = SMOTE(proportion=self.proportion,\n n_neighbors=self.smote_n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X_trans, y)\n\n nc = NeighborhoodCleaningRule(n_jobs=self.n_jobs)\n return nc.remove_noise(X_sm, y_sm)\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transforms new data by the trained isomap\n\n Args:\n X (np.matrix): new data\n\n Returns:\n np.matrix: the transformed data\n \"\"\"\n return self.isomap.transform(X)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_components': self.n_components,\n 'smote_n_neighbors': self.smote_n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CE_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ce_smote,\n author={Chen, S. and Guo, G. and Chen, L.},\n booktitle={2010 IEEE 24th International\n Conference on Advanced Information\n Networking and Applications\n Workshops},\n title={A New Over-Sampling Method Based on\n Cluster Ensembles},\n year={2010},\n volume={},\n number={},\n pages={599-604},\n keywords={data mining;Internet;pattern\n classification;pattern clustering;\n over sampling method;cluster\n ensembles;classification method;\n imbalanced data handling;CE-SMOTE;\n clustering consistency index;\n cluster boundary minority samples;\n imbalanced public data set;\n Mathematics;Computer science;\n Electronic mail;Accuracy;Nearest\n neighbor searches;Application\n software;Data mining;Conferences;\n Web sites;Information retrieval;\n classification;imbalanced data\n sets;cluster ensembles;\n over-sampling},\n doi={10.1109/WAINA.2010.40},\n ISSN={},\n month={April}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_borderline,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n h=10,\n k=5,\n alpha=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n h (int): size of ensemble\n k (int): number of clusters/neighbors\n alpha (float): [0,1] threshold to select boundary samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(h, \"h\", 1)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_in_range(alpha, \"alpha\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.h = h\n self.k = k\n self.alpha = alpha\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'h': [5, 10, 15],\n 'k': [3, 5, 7],\n 'alpha': [0.2, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # do the clustering and labelling\n d = len(X[0])\n labels = []\n for _ in range(self.h):\n f = self.random_state.randint(int(d/2), d)\n features = self.random_state.choice(np.arange(d), f)\n n_clusters = min([len(X), self.k])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X[:, features])\n labels.append(kmeans.labels_)\n\n # do the cluster matching, clustering 0 will be considered the one to\n # match the others to the problem of finding cluster matching is\n # basically the \"assignment problem\"\n base_label = 0\n for i in range(len(labels)):\n if not i == base_label:\n cost_matrix = np.zeros(shape=(self.k, self.k))\n for j in range(self.k):\n mask_j = labels[base_label] == j\n for k in range(self.k):\n mask_k = labels[i] == k\n mask_jk = np.logical_and(mask_j, mask_k)\n cost_matrix[j, k] = np.sum(mask_jk)\n # solving the assignment problem\n row_ind, _ = soptimize.linear_sum_assignment(-cost_matrix)\n # doing the relabeling\n relabeling = labels[i].copy()\n for j in range(len(row_ind)):\n relabeling[labels[i] == k] = j\n labels[i] = relabeling\n\n # compute clustering consistency index\n labels = np.vstack(labels)\n cci = np.apply_along_axis(lambda x: max(\n set(x.tolist()), key=x.tolist().count), 0, labels)\n cci = np.sum(labels == cci, axis=0)\n cci = cci/self.h\n\n # determining minority boundary samples\n P_boundary = X[np.logical_and(\n y == self.min_label, cci < self.alpha)]\n\n # there might be no boundary samples\n if len(P_boundary) <= 1:\n _logger.warning(self.__class__.__name__ + \": \" + \"empty boundary\")\n return X.copy(), y.copy()\n\n # finding nearest neighbors of boundary samples\n n_neighbors = min([len(P_boundary), self.k])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(P_boundary)\n dist, ind = nn.kneighbors(P_boundary)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.randint(len(ind))\n point_a = P_boundary[idx]\n point_b = P_boundary[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(point_a, point_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'h': self.h,\n 'k': self.k,\n 'alpha': self.alpha,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Edge_Det_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{Edge_Det_SMOTE,\n author={Kang, Y. and Won, S.},\n booktitle={ICCAS 2010},\n title={Weight decision algorithm for oversampling\n technique on class-imbalanced learning},\n year={2010},\n volume={},\n number={},\n pages={182-186},\n keywords={edge detection;learning (artificial\n intelligence);weight decision\n algorithm;oversampling technique;\n class-imbalanced learning;class\n imbalanced data problem;edge\n detection algorithm;spatial space\n representation;Classification\n algorithms;Image edge detection;\n Training;Noise measurement;Glass;\n Training data;Machine learning;\n Imbalanced learning;Classification;\n Weight decision;Oversampling;\n Edge detection},\n doi={10.1109/ICCAS.2010.5669889},\n ISSN={},\n month={Oct}}\n\n Notes:\n * This technique is very loosely specified.\n \"\"\"\n\n categories = [OverSampling.cat_density_based,\n OverSampling.cat_borderline,\n OverSampling.cat_extensive]\n\n def __init__(self, proportion=1.0, k=5, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n k (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(k, \"k\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.k = k\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'k': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n d = len(X[0])\n X_min = X[y == self.min_label]\n\n # organizing class labels according to feature ranking\n magnitudes = np.zeros(len(X))\n for i in range(d):\n to_sort = zip(X[:, i], np.arange(len(X)), y)\n _, idx, label = zip(*sorted(to_sort, key=lambda x: x[0]))\n # extracting edge magnitudes in this dimension\n for j in range(1, len(idx)-1):\n magnitudes[idx[j]] = magnitudes[idx[j]] + \\\n (label[j-1] - label[j+1])**2\n\n # density estimation\n magnitudes = magnitudes[y == self.min_label]\n magnitudes = np.sqrt(magnitudes)\n magnitudes = magnitudes/np.sum(magnitudes)\n\n # fitting nearest neighbors models to minority samples\n n_neighbors = min([len(X_min), self.k+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n for _ in range(n_to_sample):\n idx = self.random_state.choice(np.arange(len(X_min)), p=magnitudes)\n X_a = X_min[idx]\n X_b = X_min[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'k': self.k,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CBSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{cbso,\n author=\"Barua, Sukarna\n and Islam, Md. Monirul\n and Murase, Kazuyuki\",\n editor=\"Lu, Bao-Liang\n and Zhang, Liqing\n and Kwok, James\",\n title=\"A Novel Synthetic Minority Oversampling\n Technique for Imbalanced Data Set\n Learning\",\n booktitle=\"Neural Information Processing\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"735--744\",\n isbn=\"978-3-642-24958-7\"\n }\n\n Notes:\n * Clusters containing 1 element induce cloning of samples.\n \"\"\"\n\n categories = [OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n C_p=1.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n C_p (float): used to set the threshold of clustering\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(C_p, \"C_p\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.C_p = C_p\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'C_p': [0.8, 1.0, 1.3, 1.6]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find neighbors of minority points\n nn = NearestNeighbors(n_neighbors=self.n_neighbors + 1,\n n_jobs=self.n_jobs).fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the number of majority neighbors\n weights = [np.sum(y[ind[i][1:]] == self.maj_label)\n for i in range(len(X_min))]\n # determine distribution of generating data\n weights = weights/np.sum(weights)\n\n # do the clustering\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs).fit(X_min)\n d_avg = np.mean(nn.kneighbors(X_min)[0][:, 1])\n T_h = d_avg*self.C_p\n\n # initiating clustering\n clusters = [np.array([i]) for i in range(len(X_min))]\n dm = pairwise_distances(X_min)\n\n # setting the diagonal of the distance matrix to infinity\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # starting the clustering iteration\n while True:\n # finding the cluster pair with the smallest distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # check termination conditions\n if dm[merge_a, merge_b] > T_h or len(dm) == 1:\n break\n\n # merging the clusters\n clusters[merge_a] = np.hstack(\n [clusters[merge_a], clusters[merge_b]])\n # removing one of them\n del clusters[merge_b]\n # adjusting the distances in the distance matrix\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]), axis=0)\n dm[:, merge_a] = dm[merge_a]\n # removing the row and column corresponding to one of the\n # merged clusters\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # updating the diagonal\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # extracting cluster labels\n labels = np.zeros(len(X_min)).astype(int)\n for i in range(len(clusters)):\n for j in clusters[i]:\n labels[j] = i\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)), p=weights)\n if len(clusters[labels[idx]]) <= 1:\n samples.append(X_min[idx])\n continue\n else:\n random_idx = self.random_state.choice(clusters[labels[idx]])\n while random_idx == idx:\n random_idx = self.random_state.choice(\n clusters[labels[idx]])\n samples.append(self.sample_between_points(\n X_min[idx], X_min[random_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'C_p': self.C_p,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass E_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{e_smote,\n author={Deepa, T. and Punithavalli, M.},\n booktitle={2011 3rd International Conference on\n Electronics Computer Technology},\n title={An E-SMOTE technique for feature selection\n in High-Dimensional Imbalanced Dataset},\n year={2011},\n volume={2},\n number={},\n pages={322-324},\n keywords={bioinformatics;data mining;pattern\n classification;support vector machines;\n E-SMOTE technique;feature selection;\n high-dimensional imbalanced dataset;\n data mining;bio-informatics;dataset\n balancing;SVM classification;micro\n array dataset;Feature extraction;\n Genetic algorithms;Support vector\n machines;Data mining;Machine learning;\n Bioinformatics;Cancer;Imbalanced\n dataset;Featue Selection;E-SMOTE;\n Support Vector Machine[SVM]},\n doi={10.1109/ICECTECH.2011.5941710},\n ISSN={},\n month={April}}\n\n Notes:\n * This technique is basically unreproducible. I try to implement\n something following the idea of applying some simple genetic\n algorithm for optimization.\n * In my best understanding, the technique uses evolutionary algorithms\n for feature selection and then applies vanilla SMOTE on the\n selected features only.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction,\n OverSampling.cat_memetic,\n OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n min_features=2,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in the nearest neighbors\n component\n min_features (int): minimum number of features\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(min_features, \"min_features\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.min_features = min_features\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'min_features': [1, 2, 3]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n min_features = min(self.min_features, len(X[0]))\n\n if len(X) < 800:\n classifier = SVC(gamma='auto', random_state=self.random_state)\n else:\n classifier = DecisionTreeClassifier(\n max_depth=4, random_state=self.random_state)\n\n # parameters of the evolutionary algorithm\n n_generations = 50\n n_population = 5\n\n # creating initial mask\n mask = self.random_state.choice([True, False], len(X[0]), replace=True)\n # fixing if the mask doesn't contain any features\n if np.sum(mask) == 0:\n mask[self.random_state.randint(len(mask))] = True\n\n def crossover(mask_a, mask_b):\n \"\"\"\n Crossover operation for two masks\n\n Args:\n mask_a (np.array): binary mask 1\n mask_b (np.array): binary mask 2\n\n Returns:\n np.array: the result of crossover\n \"\"\"\n mask = mask_a.copy()\n for i in range(len(mask_b)):\n if self.random_state.randint(0, 2) == 0:\n mask[i] = mask_b[i]\n\n while np.sum(mask) < min_features:\n mask[self.random_state.randint(len(mask))] = True\n\n return mask\n\n def mutate(mask_old):\n \"\"\"\n Mutation operation for a mask\n\n Args:\n mask_old (np.array): binary mask\n\n Returns:\n np.array: the result of mutation\n \"\"\"\n mask = mask_old.copy()\n for i in range(len(mask)):\n if self.random_state.randint(0, 2) == 0:\n mask[i] = not mask[i]\n\n while np.sum(mask) < min_features:\n mask[self.random_state.randint(len(mask))] = True\n\n return mask\n\n # generating initial population\n population = [[0, mask.copy()] for _ in range(n_population)]\n for _ in range(n_generations):\n # in each generation\n for _ in range(n_population):\n # for each element of a population\n if self.random_state.randint(0, 2) == 0:\n # crossover\n i_0 = self.random_state.randint(n_population)\n i_1 = self.random_state.randint(n_population)\n mask = crossover(population[i_0][1], population[i_1][1])\n else:\n # mutation\n idx = self.random_state.randint(n_population)\n mask = mutate(population[idx][1])\n # evaluation\n message = \"evaluating mask selection with features %d/%d\"\n message = message % (np.sum(mask), len(mask))\n _logger.info(self.__class__.__name__ + \": \" + message)\n classifier.fit(X[:, mask], y)\n score = np.sum(y == classifier.predict(X[:, mask]))/len(y)\n # appending the result to the population\n population.append([score, mask])\n # sorting the population in a reversed order and keeping the\n # elements with the highest scores\n population = sorted(population, key=lambda x: -x[0])[:n_population]\n\n self.mask = population[0][1]\n # resampling the population in the given dimensions\n\n smote = SMOTE(self.proportion,\n self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n\n return smote.sample(X[:, self.mask], y)\n\n def preprocessing_transform(self, X):\n \"\"\"\n Transform new data by the learnt transformation\n\n Args:\n X (np.matrix): new data\n\n Returns:\n np.matrix: transformed data\n \"\"\"\n return X[:, self.mask]\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'min_features': self.min_features,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DBSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{dbsmote,\n author=\"Bunkhumpornpat, Chumphol\n and Sinapiromsaran, Krung\n and Lursinsap, Chidchanok\",\n title=\"DBSMOTE: Density-Based Synthetic Minority\n Over-sampling TEchnique\",\n journal=\"Applied Intelligence\",\n year=\"2012\",\n month=\"Apr\",\n day=\"01\",\n volume=\"36\",\n number=\"3\",\n pages=\"664--684\",\n issn=\"1573-7497\",\n doi=\"10.1007/s10489-011-0287-y\",\n url=\"https://doi.org/10.1007/s10489-011-0287-y\"\n }\n\n Notes:\n * Standardization is needed to use absolute eps values.\n * The clustering is likely to identify all instances as noise, fixed\n by recursive call with increaseing eps.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based]\n\n def __init__(self,\n proportion=1.0,\n eps=0.8,\n min_samples=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n eps (float): eps paramter of DBSCAN\n min_samples (int): min_samples paramter of DBSCAN\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater(eps, \"eps\", 0)\n self.check_greater_or_equal(min_samples, \"min_samples\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.eps = eps\n self.min_samples = min_samples\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'eps': [0.5, 0.8, 1.2],\n 'min_samples': [1, 3, 5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n ss = StandardScaler().fit(X)\n X_ss = ss.transform(X)\n\n # doing the clustering using DBSCAN\n X_min = X_ss[y == self.min_label]\n db = DBSCAN(self.eps, self.min_samples, n_jobs=self.n_jobs).fit(X_min)\n labels = db.labels_\n num_labels = np.max(labels)+1\n\n if num_labels == 0:\n # adjusting the parameters if no clusters were identified\n message = (\"Number of clusters is 0, trying to increase eps and \"\n \"decrease min_samples\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n if self.eps >= 2 or self.min_samples <= 2:\n message = (\"Number of clusters is 0, can't adjust parameters \"\n \"further\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n else:\n return DBSMOTE(proportion=self.proportion,\n eps=self.eps*1.5,\n min_samples=self.min_samples-1,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # determining cluster size distribution\n clusters = [np.where(labels == i)[0] for i in range(num_labels)]\n cluster_sizes = np.array([np.sum(labels == i)\n for i in range(num_labels)])\n cluster_dist = cluster_sizes/np.sum(cluster_sizes)\n\n # Bellman-Ford algorithm, inspired by\n # https://gist.github.com/joninvski/701720\n def initialize(graph, source):\n \"\"\"\n Initializes shortest path algorithm.\n\n Args:\n graph (dict): graph in dictionary representation\n source (key): source node\n\n Returns:\n dict, dict: initialized distance and path dictionaries\n \"\"\"\n d = {}\n p = {}\n for node in graph:\n d[node] = float('Inf')\n p[node] = None\n d[source] = 0\n return d, p\n\n def relax(u, v, graph, d, p):\n \"\"\"\n Checks if shorter path exists.\n\n Args:\n u (key): key of a node\n v (key): key of another node\n graph (dict): the graph object\n d (dict): the distances dictionary\n p (dict): the paths dictionary\n \"\"\"\n if d[v] > d[u] + graph[u][v]:\n d[v] = d[u] + graph[u][v]\n p[v] = u\n\n def bellman_ford(graph, source):\n \"\"\"\n Main entry point of the Bellman-Ford algorithm\n\n Args:\n graph (dict): a graph in dictionary representation\n source (key): the key of the source node\n \"\"\"\n d, p = initialize(graph, source)\n for i in range(len(graph)-1):\n for u in graph:\n for v in graph[u]:\n relax(u, v, graph, d, p)\n for u in graph:\n for v in graph[u]:\n assert d[v] <= d[u] + graph[u][v]\n return d, p\n\n # extract graphs and center-like objects\n graphs = []\n centroid_indices = []\n shortest_paths = []\n for c in range(num_labels):\n # extracting the cluster elements\n cluster = X_min[clusters[c]]\n # initializing the graph object\n graph = {}\n for i in range(len(cluster)):\n graph[i] = {}\n\n # fitting nearest neighbors model to the cluster elements\n nn = NearestNeighbors(n_neighbors=len(cluster), n_jobs=self.n_jobs)\n nn.fit(cluster)\n dist, ind = nn.kneighbors(cluster)\n\n # extracting graph edges according to directly density reachabality\n # definition\n for i in range(len(cluster)):\n n = min([len(cluster), (self.min_samples + 1)])\n index_set = ind[i][1:n]\n for j in range(len(cluster)):\n if j in index_set and dist[i][ind[i] == j][0] < self.eps:\n graph[i][j] = dist[i][ind[i] == j][0]\n graphs.append(graph)\n # finding the index of the center like object\n centroid_ind = nn.kneighbors(\n np.mean(cluster, axis=0).reshape(1, -1))[1][0][0]\n centroid_indices.append(centroid_ind)\n # extracting shortest paths from centroid object\n shortest_paths.append(bellman_ford(graph, centroid_ind))\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(\n np.arange(len(clusters)), p=cluster_dist)\n cluster = X_min[clusters[cluster_idx]]\n idx = self.random_state.choice(range(len(clusters[cluster_idx])))\n\n # executing shortest path algorithm\n distances, parents = shortest_paths[cluster_idx]\n\n # extracting path\n path = [idx]\n while not parents[path[-1]] is None:\n path.append(parents[path[-1]])\n\n if len(path) == 1:\n # if the center like object is selected\n samples.append(cluster[path[0]])\n elif len(path) == 2:\n # if the path consists of 1 edge\n X_a = cluster[path[0]]\n X_b = cluster[path[1]]\n sample = self.sample_between_points_componentwise(X_a, X_b)\n samples.append(sample)\n else:\n # if the path consists of at least two edges\n random_vertex = self.random_state.randint(len(path)-1)\n X_a = cluster[path[random_vertex]]\n X_b = cluster[path[random_vertex + 1]]\n sample = self.sample_between_points_componentwise(X_a, X_b)\n samples.append(sample)\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'eps': self.eps,\n 'min_samples': self.min_samples,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ASMOBD(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{asmobd,\n author={Senzhang Wang and Zhoujun Li and Wenhan\n Chao and Qinghua Cao},\n booktitle={The 2012 International Joint Conference\n on Neural Networks (IJCNN)},\n title={Applying adaptive over-sampling technique\n based on data density and cost-sensitive\n SVM to imbalanced learning},\n year={2012},\n volume={},\n number={},\n pages={1-8},\n doi={10.1109/IJCNN.2012.6252696},\n ISSN={2161-4407},\n month={June}}\n\n Notes:\n * In order to use absolute thresholds, the data is standardized.\n * The technique has many parameters, not easy to find the right\n combination.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_noise_removal,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n min_samples=3,\n eps=0.8,\n eta=0.5,\n T_1=1.0,\n T_2=1.0,\n t_1=4.0,\n t_2=4.0,\n a=0.05,\n smoothing='linear',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n min_samples (int): parameter of OPTICS\n eps (float): parameter of OPTICS\n eta (float): tradeoff paramter\n T_1 (float): noise threshold (see paper)\n T_2 (float): noise threshold (see paper)\n t_1 (float): noise threshold (see paper)\n t_2 (float): noise threshold (see paper)\n a (float): smoothing factor (see paper)\n smoothing (str): 'sigmoid'/'linear'\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(min_samples, \"min_samples\", 1)\n self.check_greater(eps, \"eps\", 0)\n self.check_in_range(eta, \"eta\", [0, 1])\n self.check_greater(T_1, \"T_1\", 0)\n self.check_greater(T_2, \"T_2\", 0)\n self.check_greater(t_1, \"t_1\", 0)\n self.check_greater(t_2, \"t_2\", 0)\n self.check_greater(a, \"a\", 0)\n self.check_isin(smoothing, \"smoothing\", ['sigmoid', 'linear'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.min_samples = min_samples\n self.eps = eps\n self.eta = eta\n self.T_1 = T_1\n self.T_2 = T_2\n self.t_1 = t_1\n self.t_2 = t_2\n self.a = a\n self.smoothing = smoothing\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'min_samples': [3],\n 'eps': [0.3],\n 'eta': [0.5],\n 'T_1': [0.7, 1.0, 1.4],\n 'T_2': [0.7, 1.0, 1.4],\n 't_1': [4.0],\n 't_2': [4.0],\n 'a': [0.05, 0.1],\n 'smoothing': ['sigmoid', 'linear']}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data to enable using absolute thresholds\n ss = StandardScaler().fit(X)\n X_ss = ss.transform(X)\n\n X_min = X_ss[y == self.min_label]\n\n # executing the optics algorithm\n min_samples = min([len(X_min)-1, self.min_samples])\n o = OPTICS(min_samples=min_samples,\n max_eps=self.eps,\n n_jobs=self.n_jobs)\n o.fit(X_min)\n cd = o.core_distances_\n r = o.reachability_\n\n # identifying noise\n noise = np.logical_and(cd > self.T_1, r > self.T_2)\n\n # fitting nearest neighbors models to identify the number of majority\n # samples in local environments\n nn = NearestNeighbors(n_neighbors=self.min_samples, n_jobs=self.n_jobs)\n nn.fit(X_ss)\n n_majs = []\n ratio = []\n for i in range(len(X_min)):\n ind = nn.radius_neighbors(X_min[i].reshape(\n 1, -1), radius=cd[i], return_distance=False)[0]\n n_maj = np.sum(y[ind] == self.maj_label)/len(ind)\n n_majs.append(n_maj)\n n_min = len(ind) - n_maj - 1\n if n_min == 0:\n ratio.append(np.inf)\n else:\n ratio.append(n_maj/n_min)\n\n n_maj = np.array(n_maj)\n ratio = np.array(ratio)\n\n # second constraint on noise\n noise_2 = np.logical_and(cd > np.mean(\n cd)*self.t_1, r > np.mean(r)*self.t_2)\n\n # calculating density according to the smoothing function specified\n if self.smoothing == 'sigmoid':\n balance_ratio = np.abs(2.0/(1.0 + np.exp(-self.a*ratio[i])) - 1.0)\n df = self.eta*cd + (1.0 - self.eta)*n_maj - balance_ratio\n else:\n df = self.eta*(self.eta*cd + (1.0 - self.eta)*n_maj) + \\\n (1 - self.eta)*len(X_min)/n_to_sample\n\n # unifying the conditions on noise\n not_noise = np.logical_not(np.logical_or(noise, noise_2))\n\n # checking if there are not noise samples remaining\n if np.sum(not_noise) == 0:\n message = (\"All minority samples found to be noise, increasing\"\n \"noise thresholds\")\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n return ASMOBD(proportion=self.proportion,\n min_samples=self.min_samples,\n eps=self.eps,\n eta=self.eta,\n T_1=self.T_1*1.5,\n T_2=self.T_2*1.5,\n t_1=self.t_1*1.5,\n t_2=self.t_2*1.5,\n a=self.a,\n smoothing=self.smoothing,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # removing noise and adjusting the density factors accordingly\n X_min_not_noise = X_min[not_noise]\n\n # checking if there are not-noisy samples\n if len(X_min_not_noise) <= 2:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"no not-noise minority sample remained\")\n return X.copy(), y.copy()\n\n df = np.delete(df, np.where(np.logical_not(not_noise))[0])\n density = df/np.sum(df)\n\n # fitting nearest neighbors model to non-noise minority samples\n n_neighbors = min([len(X_min_not_noise), self.min_samples + 1])\n nn_not_noise = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn_not_noise.fit(X_min_not_noise)\n dist, ind = nn_not_noise.kneighbors(X_min_not_noise)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min_not_noise)),\n p=density)\n random_neighbor_idx = self.random_state.choice(ind[idx][1:])\n X_a = X_min_not_noise[idx]\n X_b = X_min_not_noise[random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'min_samples': self.min_samples,\n 'eps': self.eps,\n 'eta': self.eta,\n 'T_1': self.T_1,\n 'T_2': self.T_2,\n 't_1': self.t_1,\n 't_2': self.t_2,\n 'a': self.a,\n 'smoothing': self.smoothing,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Assembled_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{assembled_smote,\n author={Zhou, B. and Yang, C. and Guo, H. and\n Hu, J.},\n booktitle={The 2013 International Joint Conference\n on Neural Networks (IJCNN)},\n title={A quasi-linear SVM combined with assembled\n SMOTE for imbalanced data classification},\n year={2013},\n volume={},\n number={},\n pages={1-7},\n keywords={approximation theory;interpolation;\n pattern classification;sampling\n methods;support vector machines;trees\n (mathematics);quasilinear SVM;\n assembled SMOTE;imbalanced dataset\n classification problem;oversampling\n method;quasilinear kernel function;\n approximate nonlinear separation\n boundary;mulitlocal linear boundaries;\n interpolation;data distribution\n information;minimal spanning tree;\n local linear partitioning method;\n linear separation boundary;synthetic\n minority class samples;oversampled\n dataset classification;standard SVM;\n composite quasilinear kernel function;\n artificial data datasets;benchmark\n datasets;classification performance\n improvement;synthetic minority\n over-sampling technique;Support vector\n machines;Kernel;Merging;Standards;\n Sociology;Statistics;Interpolation},\n doi={10.1109/IJCNN.2013.6707035},\n ISSN={2161-4407},\n month={Aug}}\n\n Notes:\n * Absolute value of the angles extracted should be taken.\n (implemented this way)\n * It is not specified how many samples are generated in the various\n clusters.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_borderline,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n pop=2,\n thres=0.3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n pop (int): lower threshold on cluster sizes\n thres (float): threshold on angles\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(pop, \"pop\", 1)\n self.check_in_range(thres, \"thres\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.pop = pop\n self.thres = thres\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'pop': [2, 4, 5],\n 'thres': [0.1, 0.3, 0.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # finding the set of border and non-border minority elements\n n_min_neighbors = [np.sum(y[ind[i]] == self.min_label)\n for i in range(len(ind))]\n border_mask = np.logical_not(np.array(n_min_neighbors) == n_neighbors)\n X_border = X_min[border_mask]\n X_non_border = X_min[np.logical_not(border_mask)]\n\n if len(X_border) == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"X_border is empty\")\n return X.copy(), y.copy()\n\n # initializing clustering\n clusters = [np.array([i]) for i in range(len(X_border))]\n dm = pairwise_distances(X_border)\n for i in range(len(dm)):\n dm[i, i] = np.inf\n\n # do the clustering\n while len(dm) > 1 and np.min(dm) < np.inf:\n # extracting coordinates of clusters with the minimum distance\n min_coord = np.where(dm == np.min(dm))\n merge_a = min_coord[0][0]\n merge_b = min_coord[1][0]\n\n # checking the size of clusters to see if they should be merged\n if (len(clusters[merge_a]) < self.pop\n or len(clusters[merge_b]) < self.pop):\n # if both clusters are small, do the merge\n clusters[merge_a] = np.hstack([clusters[merge_a],\n clusters[merge_b]])\n del clusters[merge_b]\n # update the distance matrix accordingly\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]),\n axis=0)\n dm[:, merge_a] = dm[merge_a]\n # remove columns\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # fix the diagonal entries\n for i in range(len(dm)):\n dm[i, i] = np.inf\n else:\n # otherwise find principal directions\n pca_a = PCA(n_components=1).fit(X_border[clusters[merge_a]])\n pca_b = PCA(n_components=1).fit(X_border[clusters[merge_b]])\n # extract the angle of principal directions\n numerator = np.dot(pca_a.components_[0], pca_b.components_[0])\n denominator = np.linalg.norm(pca_a.components_[0])\n denominator *= np.linalg.norm(pca_b.components_[0])\n angle = abs(numerator/denominator)\n # check if angle if angle is above a specific threshold\n if angle > self.thres:\n # do the merge\n clusters[merge_a] = np.hstack([clusters[merge_a],\n clusters[merge_b]])\n del clusters[merge_b]\n # update the distance matrix acoordingly\n dm[merge_a] = np.min(np.vstack([dm[merge_a], dm[merge_b]]),\n axis=0)\n dm[:, merge_a] = dm[merge_a]\n # remove columns\n dm = np.delete(dm, merge_b, axis=0)\n dm = np.delete(dm, merge_b, axis=1)\n # fixing the digaonal entries\n for i in range(len(dm)):\n dm[i, i] = np.inf\n else:\n # changing the distance of clusters to fininte\n dm[merge_a, merge_b] = np.inf\n dm[merge_b, merge_a] = np.inf\n\n # extract vectors belonging to the various clusters\n vectors = [X_border[c] for c in clusters if len(c) > 0]\n # adding non-border samples\n if len(X_non_border) > 0:\n vectors.append(X_non_border)\n\n # extract cluster sizes and calculating point distribution in clusters\n # the last element of the clusters is the set of non-border xamples\n cluster_sizes = np.array([len(v) for v in vectors])\n densities = cluster_sizes/np.sum(cluster_sizes)\n\n # extracting nearest neighbors in clusters\n def fit_knn(vectors):\n n_neighbors = min([self.n_neighbors + 1, len(vectors)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n return nn.fit(vectors).kneighbors(vectors)\n\n nns = [fit_knn(v) for v in vectors]\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.choice(len(vectors), p=densities)\n len_cluster = len(vectors[cluster_idx])\n sample_idx = self.random_state.choice(np.arange(len_cluster))\n\n if len_cluster > 1:\n choose_from = nns[cluster_idx][1][sample_idx][1:]\n random_neighbor_idx = self.random_state.choice(choose_from)\n else:\n random_neighbor_idx = sample_idx\n\n X_a = vectors[cluster_idx][sample_idx]\n X_b = vectors[cluster_idx][random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'pop': self.pop,\n 'thres': self.thres,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SDSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{sdsmote,\n author={Li, K. and Zhang, W. and Lu, Q. and\n Fang, X.},\n booktitle={2014 International Conference on\n Identification, Information and\n Knowledge in the Internet of\n Things},\n title={An Improved SMOTE Imbalanced Data\n Classification Method Based on Support\n Degree},\n year={2014},\n volume={},\n number={},\n pages={34-38},\n keywords={data mining;pattern classification;\n sampling methods;improved SMOTE\n imbalanced data classification\n method;support degree;data mining;\n class distribution;imbalanced\n data-set classification;over sampling\n method;minority class sample\n generation;minority class sample\n selection;minority class boundary\n sample identification;Classification\n algorithms;Training;Bagging;Computers;\n Testing;Algorithm design and analysis;\n Data mining;Imbalanced data-sets;\n Classification;Boundary sample;Support\n degree;SMOTE},\n doi={10.1109/IIKI.2014.14},\n ISSN={},\n month={Oct}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_borderline]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model to find closest majority points to\n # minority samples\n nn = NearestNeighbors(n_neighbors=len(X_maj), n_jobs=self.n_jobs)\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n\n # calculating the sum according to S3 in the paper\n S_i = np.sum(dist, axis=1)\n # calculating average distance according to S5\n S = np.sum(S_i)\n S_ave = S/(len(X_min)*len(X_maj))\n\n # calculate support degree\n def support_degree(x):\n return len(nn.radius_neighbors(x.reshape(1, -1),\n S_ave,\n return_distance=False))\n\n k = np.array([support_degree(X_min[i]) for i in range(len(X_min))])\n density = k/np.sum(k)\n\n # fitting nearest neighbors model to minority samples to run\n # SMOTE-like sampling\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(density)), p=density)\n random_neighbor_idx = self.random_state.choice(ind[idx][1:])\n X_a = X_min[idx]\n X_b = X_min[random_neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass DSMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{dsmote,\n author={Mahmoudi, S. and Moradi, P. and Akhlaghian,\n F. and Moradi, R.},\n booktitle={2014 4th International Conference on\n Computer and Knowledge Engineering\n (ICCKE)},\n title={Diversity and separable metrics in\n over-sampling technique for imbalanced\n data classification},\n year={2014},\n volume={},\n number={},\n pages={152-158},\n keywords={learning (artificial intelligence);\n pattern classification;sampling\n methods;diversity metric;separable\n metric;over-sampling technique;\n imbalanced data classification;\n class distribution techniques;\n under-sampling technique;DSMOTE method;\n imbalanced learning problem;diversity\n measure;separable measure;Iran\n University of Medical Science;UCI\n dataset;Accuracy;Classification\n algorithms;Vectors;Educational\n institutions;Euclidean distance;\n Data mining;Diversity measure;\n Separable Measure;Over-Sampling;\n Imbalanced Data;Classification\n problems},\n doi={10.1109/ICCKE.2014.6993409},\n ISSN={},\n month={Oct}}\n\n Notes:\n * The method is highly inefficient when the number of minority samples\n is high, time complexity is O(n^3), with 1000 minority samples it\n takes about 1e9 objective function evaluations to find 1 new sample\n points. Adding 1000 samples would take about 1e12 evaluations of\n the objective function, which is unfeasible. We introduce a new\n parameter, n_step, and during the search for the new sample at\n most n_step combinations of minority samples are tried.\n * Abnormality of minority points is defined in the paper as\n D_maj/D_min, high abnormality means that the minority point is\n close to other minority points and very far from majority points.\n This is definitely not abnormality,\n I have implemented the opposite.\n * Nothing ensures that the fisher statistics and the variance from\n the geometric mean remain comparable, which might skew the\n optimization towards one of the sub-objectives.\n * MinMax normalization doesn't work, each attribute will have a 0\n value, which will make the geometric mean of all attribute 0.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n rate=0.1,\n n_step=50,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n rate (float): [0,1] rate of minority samples to turn into majority\n n_step (int): number of random configurations to check for new\n samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(rate, \"rate\", [0, 1])\n self.check_greater_or_equal(n_step, \"n_step\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.rate = rate\n self.n_step = n_step\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'rate': [0.1, 0.2],\n 'n_step': [50]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n mms = MinMaxScaler(feature_range=(1e-6, 1.0 - 1e-6))\n X = mms.fit_transform(X)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model\n nn = NearestNeighbors(n_neighbors=len(X_maj))\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n\n # compute mean distances, the D_min is compenstaed for taking into\n # consideration self-distances in the mean\n D_maj = np.mean(dist, axis=1)\n D_min = np.mean(pairwise_distances(X_min), axis=1) * \\\n len(X_min)/(len(X_min)-1)\n\n # computing degree of abnormality\n abnormality = D_min/D_maj\n\n # sorting minority indices in decreasing order by abnormality\n to_sort = zip(abnormality, np.arange(len(abnormality)))\n abnormality, indices = zip(*sorted(to_sort, key=lambda x: -x[0]))\n rate = int(self.rate*len(abnormality))\n\n if rate > 0:\n # moving the most abnormal points to the majority class\n X_maj = np.vstack([X_maj, X_min[np.array(indices[:rate])]])\n # removing the most abnormal points form the minority class\n X_min = np.delete(X_min, indices[:rate], axis=0)\n\n # computing the mean and variance of points in the majority class\n var_maj = np.mean(np.var(X_maj, axis=0))\n mean_maj = np.mean(X_maj)\n\n # this is the original objective function, however, using this\n # is very inefficient if the number of records increases above\n # approximately 1000\n # def objective(X):\n # \"\"\"\n # The objective function to be maximized\n #\n # Args:\n # X (np.matrix): dataset\n #\n # Returns:\n # float: the value of the objective function\n # \"\"\"\n # gm= gmean(X, axis= 0)\n # gdiv= np.mean(np.linalg.norm(X - gm, axis= 1))\n # fisher= (np.mean(X) - mean_maj)**2/(np.mean(np.var(X, axis= 0)) \\\n # + var_maj)\n # return gdiv + fisher\n\n # in order to make the code more efficient, we do maintain some\n # variables containing the main componentes of the objective function\n # and apply only small corrections based on the new values being added\n # the effect should be identical\n\n # records the sum of logarithms in X_min, used to compute the geometric\n # mean\n min_log_sum = np.sum(np.log(X_min), axis=0)\n # contains the sum of values in X_min, coordinatewise\n min_sum = np.sum(X_min, axis=0)\n # contains the squares of sums of values in X_min, coordinatewise\n min_sum2 = np.sum(X_min**2, axis=0)\n # contains the sum of all numbers in X_min\n min_all_sum = np.sum(X_min)\n\n min_norm = np.linalg.norm(X_min)**2\n\n # do the sampling\n n_added = 0\n while n_added < n_to_sample:\n best_candidate = None\n highest_score = 0.0\n # we try n_step combinations of minority samples\n len_X = len(X_min)\n n_steps = min([len_X*(len_X-1)*(len_X-2), self.n_step])\n for _ in range(n_steps):\n i, j, k = self.random_state.choice(np.arange(len_X),\n 3,\n replace=False)\n gm = gmean(X_min[np.array([i, j, k])], axis=0)\n\n # computing the new objective function for the new point (gm)\n # added\n new_X_min = np.vstack([X_min, gm])\n\n # updating the components of the objective function\n new_min_log_sum = min_log_sum + np.log(gm)\n new_min_sum = min_sum + gm\n new_min_sum2 = min_sum2 + gm**2\n new_min_all_sum = min_all_sum + np.sum(gm)\n\n # computing mean, var, gmean and mean of all elements with\n # the new sample (gm)\n new_min_mean = new_min_sum/(len(new_X_min))\n new_min_var = new_min_sum2/(len(new_X_min)) - new_min_mean**2\n new_min_gmean = np.exp(new_min_log_sum/(len(new_X_min)))\n new_min_all_n = (len(new_X_min))*len(X_min[0])\n new_min_all_mean = new_min_all_sum / new_min_all_n\n\n new_min_norm = min_norm + np.linalg.norm(gm)\n\n # computing the new objective function value\n inner_prod = np.dot(new_X_min, new_min_gmean)\n gmean_norm = np.linalg.norm(new_min_gmean)**2\n term_sum = new_min_norm - 2*inner_prod + gmean_norm\n new_gdiv = np.mean(np.sqrt(term_sum))\n\n fisher_numerator = (new_min_all_mean - mean_maj)**2\n fisher_denominator = np.mean(new_min_var) + var_maj\n new_fisher = fisher_numerator / fisher_denominator\n\n score = new_gdiv + new_fisher\n\n # evaluate the objective function\n # score= objective(np.vstack([X_min, gm]))\n # check if the score is better than the best so far\n if score > highest_score:\n highest_score = score\n best_candidate = gm\n cand_min_log_sum = new_min_log_sum\n cand_min_sum = new_min_sum\n cand_min_sum2 = new_min_sum2\n cand_min_all_sum = new_min_all_sum\n cand_min_norm = new_min_norm\n\n # add the best candidate to the minority samples\n X_min = np.vstack([X_min, best_candidate])\n n_added = n_added + 1\n\n min_log_sum = cand_min_log_sum\n min_sum = cand_min_sum\n min_sum2 = cand_min_sum2\n min_all_sum = cand_min_all_sum\n min_norm = cand_min_norm\n\n return (mms.inverse_transform(np.vstack([X_maj, X_min])),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'rate': self.rate,\n 'n_step': self.n_step,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass G_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{g_smote,\n author={Sandhan, T. and Choi, J. Y.},\n booktitle={2014 22nd International Conference on\n Pattern Recognition},\n title={Handling Imbalanced Datasets by Partially\n Guided Hybrid Sampling for Pattern\n Recognition},\n year={2014},\n volume={},\n number={},\n pages={1449-1453},\n keywords={Gaussian processes;learning (artificial\n intelligence);pattern classification;\n regression analysis;sampling methods;\n support vector machines;imbalanced\n datasets;partially guided hybrid\n sampling;pattern recognition;real-world\n domains;skewed datasets;dataset\n rebalancing;learning algorithm;\n extremely low minority class samples;\n classification tasks;extracted hidden\n patterns;support vector machine;\n logistic regression;nearest neighbor;\n Gaussian process classifier;Support\n vector machines;Proteins;Pattern\n recognition;Kernel;Databases;Gaussian\n processes;Vectors;Imbalanced dataset;\n protein classification;ensemble\n classifier;bootstrapping;Sat-image\n classification;medical diagnoses},\n doi={10.1109/ICPR.2014.258},\n ISSN={1051-4651},\n month={Aug}}\n\n Notes:\n * the non-linear approach is inefficient\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n method='linear',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbors\n component\n method (str): 'linear'/'non-linear_2.0' - the float can be any\n number: standard deviation in the\n Gaussian-kernel\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n if not method == 'linear' and not method.startswith('non-linear'):\n raise ValueError(self.__class__.__name__ + \": \" +\n 'Method parameter %s is not supported' % method)\n elif method.startswith('non-linear'):\n parameter = float(method.split('_')[-1])\n if parameter <= 0:\n message = (\"Non-positive non-linear parameter %f is \"\n \"not supported\") % parameter\n raise ValueError(self.__class__.__name__ + \": \" + message)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'method': ['linear', 'non-linear_0.1',\n 'non-linear_1.0',\n 'non-linear_2.0']}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if self.method == 'linear':\n # finding H_l by linear decomposition\n cov = np.cov(X_min, rowvar=False)\n w, v = np.linalg.eig(cov)\n H_l = v[np.argmax(w)]\n else:\n # building a non-linear kernel matrix and finding H_n by its\n # decomposition\n self.sigma = float(self.method.split('_')[-1])\n kernel_matrix = pairwise_distances(X_min)\n kernel_matrix = kernel_matrix/(2.0*self.sigma**2)\n kernel_matrix = np.exp(kernel_matrix)\n try:\n w_k, v_k = np.linalg.eig(kernel_matrix)\n except Exception as e:\n return X.copy(), y.copy()\n H_n = v_k[np.argmax(w_k)]\n\n def kernel(x, y):\n return np.linalg.norm(x - y)/(2.0*self.sigma**2)\n\n # generating samples\n samples = []\n\n def angle(P, n, H_l):\n numerator = np.abs(np.dot(P[n], H_l))\n denominator = np.linalg.norm(P[n])*np.linalg.norm(H_l)\n return np.arccos(numerator/denominator)\n\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n # calculating difference vectors from all neighbors\n P = X_min[ind[idx][1:]] - X_min[idx]\n if self.method == 'linear':\n # calculating angles with the principal direction\n thetas = np.array([angle(P, n, H_l) for n in range(len(P))])\n else:\n thetas = []\n # calculating angles of the difference vectors and the\n # principal direction in feature space\n for n in range(len(P)):\n # calculating representation in feature space\n feature_vector = np.array(\n [kernel(X_min[k], P[n]) for k in range(len(X_min))])\n dp = np.dot(H_n, feature_vector)\n denom = np.linalg.norm(feature_vector)*np.linalg.norm(H_n)\n thetas.append(np.arccos(np.abs(dp)/denom))\n thetas = np.array(thetas)\n\n # using the neighbor with the difference along the most similar\n # direction to the principal direction of the data\n n = np.argmin(thetas)\n X_a = X_min[idx]\n X_b = X_min[ind[idx][1:][n]]\n samples.append(self.sample_between_points_componentwise(X_a, X_b))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NT_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{nt_smote,\n author={Xu, Y. H. and Li, H. and Le, L. P. and\n Tian, X. Y.},\n booktitle={2014 Seventh International Joint\n Conference on Computational Sciences\n and Optimization},\n title={Neighborhood Triangular Synthetic Minority\n Over-sampling Technique for Imbalanced\n Prediction on Small Samples of Chinese\n Tourism and Hospitality Firms},\n year={2014},\n volume={},\n number={},\n pages={534-538},\n keywords={financial management;pattern\n classification;risk management;sampling\n methods;travel industry;Chinese\n tourism; hospitality firms;imbalanced\n risk prediction;minority class samples;\n up-sampling approach;neighborhood\n triangular synthetic minority\n over-sampling technique;NT-SMOTE;\n nearest neighbor idea;triangular area\n sampling idea;single classifiers;data\n excavation principles;hospitality\n industry;missing financial indicators;\n financial data filtering;financial risk\n prediction;MDA;DT;LSVM;logit;probit;\n firm risk prediction;Joints;\n Optimization;imbalanced datasets;\n NT-SMOTE;neighborhood triangular;\n random sampling},\n doi={10.1109/CSO.2014.104},\n ISSN={},\n month={July}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_application]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling(3):\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # find two nearest minority samples\n nn = NearestNeighbors(n_neighbors=3, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n while len(samples) < n_to_sample:\n # select point randomly\n idx = self.random_state.randint(len(X_min))\n P_1 = X_min[idx]\n # find two closest neighbors\n P_2 = X_min[ind[idx][1]]\n P_3 = X_min[ind[idx][2]]\n # generate random point by sampling the specified triangle\n r_1 = self.random_state.random_sample()\n r_2 = self.random_state.random_sample()\n samples.append((P_3 + r_1 * ((P_1 + r_2 * (P_2 - P_1)) - P_3)))\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Lee(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{lee,\n author = {Lee, Jaedong and Kim,\n Noo-ri and Lee, Jee-Hyong},\n title = {An Over-sampling Technique with Rejection\n for Imbalanced Class Learning},\n booktitle = {Proceedings of the 9th International\n Conference on Ubiquitous\n Information Management and\n Communication},\n series = {IMCOM '15},\n year = {2015},\n isbn = {978-1-4503-3377-1},\n location = {Bali, Indonesia},\n pages = {102:1--102:6},\n articleno = {102},\n numpages = {6},\n doi = {10.1145/2701126.2701181},\n acmid = {2701181},\n publisher = {ACM},\n address = {New York, NY, USA},\n keywords = {data distribution, data preprocessing,\n imbalanced problem, rejection rule,\n synthetic minority oversampling\n technique}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n rejection_level=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in nearest neighbor\n component\n rejection_level (float): the rejection level of generated samples,\n if the fraction of majority labels in\n the local environment is higher than\n this number, the generated point is\n rejected\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(rejection_level, \"rejection_level\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.rejection_level = rejection_level\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'rejection_level': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors models to find neighbors of minority\n # samples in the total data and in the minority datasets\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist_min, ind_min = nn_min.kneighbors(X_min)\n\n # do the sampling, we impleneted a continouos tweaking of rejection\n # levels in order to fix situations when no unrejectable data can\n # be can be generated\n samples = []\n passed = 0\n trial = 0\n rejection_level = self.rejection_level\n while len(samples) < n_to_sample:\n # checking if we managed to generate a single data in 1000 trials\n if passed == trial and passed > 1000:\n rejection_level = rejection_level + 0.1\n trial = 0\n passed = 0\n trial = trial + 1\n # generating random point\n idx = self.random_state.randint(len(X_min))\n random_neighbor_idx = self.random_state.choice(ind_min[idx][1:])\n X_a = X_min[idx]\n X_b = X_min[random_neighbor_idx]\n random_point = self.sample_between_points(X_a, X_b)\n # checking if the local environment is above the rejection level\n dist_new, ind_new = nn.kneighbors(random_point.reshape(1, -1))\n maj_frac = np.sum(y[ind_new][:-1] ==\n self.maj_label)/self.n_neighbors\n if maj_frac < rejection_level:\n samples.append(random_point)\n else:\n passed = passed + 1\n\n return (np.vstack([X, samples]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'rejection_level': self.rejection_level,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SPY(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{spy,\n author={Dang, X. T. and Tran, D. H. and Hirose, O.\n and Satou, K.},\n booktitle={2015 Seventh International Conference\n on Knowledge and Systems Engineering\n (KSE)},\n title={SPY: A Novel Resampling Method for\n Improving Classification Performance in\n Imbalanced Data},\n year={2015},\n volume={},\n number={},\n pages={280-285},\n keywords={decision making;learning (artificial\n intelligence);pattern classification;\n sampling methods;SPY;resampling\n method;decision-making process;\n biomedical data classification;\n class imbalance learning method;\n SMOTE;oversampling method;UCI\n machine learning repository;G-mean\n value;borderline-SMOTE;\n safe-level-SMOTE;Support vector\n machines;Training;Bioinformatics;\n Proteins;Protein engineering;Radio\n frequency;Sensitivity;Imbalanced\n dataset;Over-sampling;\n Under-sampling;SMOTE;\n borderline-SMOTE},\n doi={10.1109/KSE.2015.24},\n ISSN={},\n month={Oct}}\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n n_neighbors=5,\n threshold=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors in nearest neighbor\n component\n threshold (float): threshold*n_neighbors gives the threshold z\n described in the paper\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(threshold, \"threshold\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.threshold = threshold\n self.n_jobs = n_jobs\n\n # random state takes no effect for this technique\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'threshold': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n y_new = y.copy()\n z = self.threshold*n_neighbors\n\n # checking the neighbors of each minority sample\n for i in range(len(X_min)):\n majority_mask = y[ind[i][1:]] == self.maj_label\n x = np.sum(majority_mask)\n # if the number of majority samples in the neighborhood is\n # smaller than a threshold\n # their labels are changed to minority\n if x < z:\n y_new[ind[i][1:][majority_mask]] = self.min_label\n\n return X.copy(), y_new\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'threshold': self.threshold,\n 'n_jobs': self.n_jobs}\n\n\nclass SMOTE_PSOBAT(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{smote_psobat,\n author={Li, J. and Fong, S. and Zhuang, Y.},\n booktitle={2015 3rd International Symposium on\n Computational and Business\n Intelligence (ISCBI)},\n title={Optimizing SMOTE by Metaheuristics with\n Neural Network and Decision Tree},\n year={2015},\n volume={},\n number={},\n pages={26-32},\n keywords={data mining;particle swarm\n optimisation;pattern classification;\n data mining;classifier;metaherustics;\n SMOTE parameters;performance\n indicators;selection optimization;\n PSO;particle swarm optimization\n algorithm;BAT;bat-inspired algorithm;\n metaheuristic optimization algorithms;\n nearest neighbors;imbalanced dataset\n problem;synthetic minority\n over-sampling technique;decision tree;\n neural network;Classification\n algorithms;Neural networks;Decision\n trees;Training;Optimization;Particle\n swarm optimization;Data mining;SMOTE;\n Swarm Intelligence;parameter\n selection optimization},\n doi={10.1109/ISCBI.2015.12},\n ISSN={},\n month={Dec}}\n\n Notes:\n * The parameters of the memetic algorithms are not specified.\n * I have checked multiple paper describing the BAT algorithm, but the\n meaning of \"Generate a new solution by flying randomly\" is still\n unclear.\n * It is also unclear if best solutions are recorded for each bat, or\n the entire population.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_memetic]\n\n def __init__(self,\n maxit=50,\n c1=0.3,\n c2=0.1,\n c3=0.1,\n alpha=0.9,\n gamma=0.9,\n method='bat',\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n maxit (int): maximum number of iterations\n c1 (float): intertia weight of PSO\n c2 (float): attraction of local maximums in PSO\n c3 (float): attraction of global maximum in PSO\n alpha (float): alpha parameter of the method\n gamma (float): gamma parameter of the method\n method (str): optimization technique to be used\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(maxit, \"maxit\", 1)\n self.check_greater_or_equal(c1, \"c1\", 0)\n self.check_greater_or_equal(c2, \"c2\", 0)\n self.check_greater_or_equal(c3, \"c3\", 0)\n self.check_greater_or_equal(alpha, \"alpha\", 0)\n self.check_greater_or_equal(gamma, \"gamma\", 0)\n self.check_isin(method, \"method\", ['pso', 'bat'])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.maxit = maxit\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n self.alpha = alpha\n self.gamma = gamma\n self.method = method\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n bat_pc = cls.generate_parameter_combinations({'maxit': [50],\n 'alpha': [0.7, 0.9],\n 'gamma': [0.7, 0.9],\n 'method': ['bat']}, raw)\n pso_pc = cls.generate_parameter_combinations({'maxit': [50],\n 'c1': [0.2, 0.5],\n 'c2': [0.1, 0.2],\n 'c3': [0.1, 0.2],\n 'method': ['pso']}, raw)\n if not raw:\n bat_pc.extend(pso_pc)\n else:\n bat_pc = {**bat_pc, **pso_pc}\n return bat_pc\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n def evaluate(K, proportion):\n \"\"\"\n Evaluate given configuration\n\n Args:\n K (int): number of neighbors in nearest neighbors component\n proportion (float): proportion of missing data to generate\n\n Returns:\n float, float: kappa and accuracy scores\n \"\"\"\n smote = SMOTE(proportion=proportion,\n n_neighbors=K,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_samp, y_samp = smote.sample(X, y)\n\n # doing k-fold cross validation\n kfold = KFold(5)\n preds = []\n tests = []\n for train, test in kfold.split(X_samp):\n dt = DecisionTreeClassifier(random_state=self.random_state)\n dt.fit(X_samp[train], y_samp[train])\n preds.append(dt.predict(X_samp[test]))\n tests.append(y_samp[test])\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n # computing the kappa score\n tp = np.sum(np.logical_and(preds == tests,\n tests == self.min_label))\n fn = np.sum(np.logical_and(preds != tests,\n tests == self.min_label))\n tn = np.sum(np.logical_and(preds == tests,\n tests == self.maj_label))\n fp = np.sum(np.logical_and(preds != tests,\n tests == self.maj_label))\n\n p_o = (tp + tn)/(tp + fn + tn + fp)\n p_e = (tp + fn)*(tp + fp)/(tp + fn + tn + fp)**2 + \\\n (fp + tn)*(fn + tn)/(tp + fn + tn + fp)**2\n\n kappa = (p_o - p_e)/(1.0 - p_e)\n\n return kappa, p_o\n\n def PSO():\n \"\"\"\n PSO optimization\n\n Returns:\n int, float: the best K and proportion values\n \"\"\"\n # a reasonable range of nearest neighbors to use with SMOTE\n k_range = [2, min([np.sum(y == self.min_label), 10])]\n # a reasonable range of proportions\n proportion_range = [0.1, 2.0]\n # population size\n n_pop = 10\n\n # initial particles\n def init_particle():\n k_rand = self.random_state.randint(k_range[0], k_range[1])\n r = self.random_state.random_sample()\n diff = proportion_range[1] - proportion_range[0]\n vect = r*diff + proportion_range[0]\n return np.array([k_rand, vect])\n ps = [init_particle() for _ in range(n_pop)]\n # initial velocities\n velocities = [np.array([0, 0]) for _ in range(n_pop)]\n # best configurations of particles\n local_best = [ps[i].copy() for i in range(n_pop)]\n # scores of best configurations of particles\n local_scores = [(0, 0) for _ in range(n_pop)]\n # global best configuration of particles\n global_best = ps[0].copy()\n # global best score\n global_scores = (0, 0)\n\n # executing the particle swarm optimization\n not_changed = 0\n for _ in range(self.maxit):\n # if the configurations didn't change for 10 iterations, stop\n if not_changed > len(ps)*10:\n break\n # evaluating each of the configurations\n for i in range(len(ps)):\n scores = evaluate(np.int(ps[i][0]), ps[i][1])\n # recording if the best scores didn't change\n not_changed = not_changed + 1\n # registering locally and globally best scores\n if (min([local_scores[i][0], scores[0]]) > 0.4\n and local_scores[i][1] > scores[1]):\n local_scores[i] = scores\n local_best[i] = ps[i].copy()\n not_changed = 0\n elif scores[0] > 0.4 and local_scores[i][0] <= 0.4:\n local_scores[i] = scores\n local_best[i] = ps[i].copy()\n not_changed = 0\n\n if (min([global_scores[0], scores[0]]) > 0.4\n and global_scores[1] > scores[1]):\n global_scores = scores\n global_best = ps[i].copy()\n not_changed = 0\n elif scores[0] > 0.4 and global_scores[0] <= 0.4:\n global_scores = scores\n global_best = ps[i].copy()\n not_changed = 0\n\n # update velocities\n for i in range(len(ps)):\n velocities[i] = self.c1*velocities[i] + \\\n (local_best[i] - ps[i])*self.c2 + \\\n (global_best - ps[i])*self.c3\n # clipping velocities if required\n while abs(velocities[i][0]) > k_range[1] - k_range[0]:\n velocities[i][0] = velocities[i][0]/2.0\n diff = proportion_range[1] - proportion_range[0]\n while abs(velocities[i][1]) > diff:\n velocities[i][1] = velocities[i][1]/2.0\n\n # update positions\n for i in range(len(ps)):\n ps[i] = ps[i] + velocities[i]\n # clipping positions according to the specified ranges\n ps[i][0] = np.clip(ps[i][0], k_range[0], k_range[1])\n ps[i][1] = np.clip(ps[i][1],\n proportion_range[0],\n proportion_range[1])\n\n return global_best\n\n def BAT():\n \"\"\"\n BAT optimization\n\n Returns:\n int, float: the best K and proportion values\n \"\"\"\n\n if sum(y == self.min_label) < 2:\n return X.copy(), y.copy()\n\n # a reasonable range of nearest neighbors to use with SMOTE\n k_range = [1, min([np.sum(y == self.min_label), 10])]\n # a reasonable range of proportions\n proportion_range = [0.1, 2.0]\n # population size\n n_pop = 10\n # maximum frequency\n f_max = 10\n\n def init_bat():\n k_rand = self.random_state.randint(k_range[0], k_range[1])\n r = self.random_state.random_sample()\n diff = proportion_range[1] - proportion_range[0]\n return np.array([k_rand, r*diff + proportion_range[0]])\n\n # initial bat positions\n bats = [init_bat() for _ in range(n_pop)]\n # initial velocities\n velocities = [np.array([0, 0]) for _ in range(10)]\n # best configurations of particles\n local_best = [[[[0.0, 0.0], bats[i].copy()]]\n for i in range(len(bats))]\n # scores of best configurations of particles\n global_best = [[0.0, 0.0], bats[0].copy()]\n # pulse frequencies\n f = self.random_state.random_sample(size=n_pop)*f_max\n # pulse rates\n r = self.random_state.random_sample(size=n_pop)\n # loudness\n A = self.random_state.random_sample(size=n_pop)\n\n # gamma parameter according to the BAT paper\n gamma = self.gamma\n # alpha parameter according to the BAT paper\n alpha = self.alpha\n\n # initial best solution\n bat_star = bats[0].copy()\n\n not_changed = 0\n for t in range(self.maxit):\n not_changed = not_changed + 1\n\n if not_changed > 10:\n break\n\n # update frequencies\n f = self.random_state.random_sample(size=n_pop)*f_max\n\n # update velocities\n for i in range(len(velocities)):\n velocities[i] = velocities[i] + (bats[i] - bat_star)*f[i]\n\n # update bats\n for i in range(len(bats)):\n bats[i] = bats[i] + velocities[i]\n bats[i][0] = np.clip(bats[i][0], k_range[0], k_range[1])\n bats[i][1] = np.clip(\n bats[i][1], proportion_range[0], proportion_range[1])\n\n for i in range(n_pop):\n # generate local solution\n if self.random_state.random_sample() > r[i]:\n n_rand = min([len(local_best[i]), 5])\n rand_int = self.random_state.randint(n_rand)\n random_best_sol = local_best[i][rand_int][1]\n rr = self.random_state.random_sample(\n size=len(bat_star))\n bats[i] = random_best_sol + rr*A[i]\n\n # evaluate and do local search\n for i in range(n_pop):\n scores = evaluate(int(bats[i][0]), bats[i][1])\n\n # checking if the scores are better than the global score\n # implementation of the multi-objective criterion in the\n # SMOTE-PSOBAT paper\n improved_global = False\n if (min([global_best[0][0], scores[0]]) > 0.4\n and global_best[0][1] > scores[1]):\n improved_global = True\n not_changed = 0\n elif scores[0] > 0.4 and global_best[0][0] <= 0.4:\n improved_global = True\n not_changed = 0\n\n # checking if the scores are better than the local scores\n # implementation of the multi-objective criterion in the\n # SMOTE-PSOBAT paper\n improved_local = False\n if (min([local_best[i][0][0][0], scores[0]]) > 0.4\n and local_best[i][0][0][1] > scores[1]):\n improved_local = True\n elif scores[0] > 0.4 and local_best[i][0][0][0] <= 0.4:\n improved_local = True\n\n # local search in the bet algorithm\n if (self.random_state.random_sample() < A[i]\n and improved_local):\n local_best[i].append([scores, bats[i].copy()])\n A[i] = A[i]*alpha\n r[i] = r[i]*(1 - np.exp(-gamma*t))\n if (self.random_state.random_sample() < A[i]\n and improved_global):\n global_best = [scores, bats[i].copy()]\n\n # ranking local solutions to keep track of the best 5\n local_best[i] = sorted(\n local_best[i], key=lambda x: -x[0][0])\n local_best[i] = local_best[i][:min(\n [len(local_best[i]), 5])]\n\n t = t + 1\n\n return global_best[1]\n\n if self.method == 'pso':\n best_combination = PSO()\n elif self.method == 'bat':\n best_combination = BAT()\n else:\n message = \"Search method %s not supported yet.\" % self.method\n raise ValueError(self.__class__.__name__ + \": \" + message)\n\n return SMOTE(proportion=best_combination[1],\n n_neighbors=int(best_combination[0]),\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'maxit': self.maxit,\n 'c1': self.c1,\n 'c2': self.c2,\n 'c3': self.c3,\n 'alpha': self.alpha,\n 'gamma': self.gamma,\n 'method': self.method,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MDO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @ARTICLE{mdo,\n author={Abdi, L. and Hashemi, S.},\n journal={IEEE Transactions on Knowledge and Data\n Engineering},\n title={To Combat Multi-Class Imbalanced Problems\n by Means of Over-Sampling Techniques},\n year={2016},\n volume={28},\n number={1},\n pages={238-251},\n keywords={covariance analysis;learning (artificial\n intelligence);modelling;pattern\n classification;sampling methods;\n statistical distributions;minority\n class instance modelling;probability\n contour;covariance structure;MDO;\n Mahalanobis distance-based oversampling\n technique;data-oriented technique;\n model-oriented solution;machine learning\n algorithm;data skewness;multiclass\n imbalanced problem;Mathematical model;\n Training;Accuracy;Eigenvalues and\n eigenfunctions;Machine learning\n algorithms;Algorithm design and analysis;\n Benchmark testing;Multi-class imbalance\n problems;over-sampling techniques;\n Mahalanobis distance;Multi-class imbalance\n problems;over-sampling techniques;\n Mahalanobis distance},\n doi={10.1109/TKDE.2015.2458858},\n ISSN={1041-4347},\n month={Jan}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_dim_reduction]\n\n def __init__(self,\n proportion=1.0,\n K2=5,\n K1_frac=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n K2 (int): number of neighbors\n K1_frac (float): the fraction of K2 to set K1\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(K2, \"K2\", 1)\n self.check_greater_or_equal(K1_frac, \"K1_frac\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.K2 = K2\n self.K1_frac = K1_frac\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'K2': [3, 5, 7],\n 'K1_frac': [0.3, 0.5, 0.7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # determining K1\n self.K1 = int(self.K2*self.K1_frac)\n K1 = min([self.K1, len(X)])\n K2 = min([self.K2 + 1, len(X)])\n\n # Algorithm 2 - chooseSamples\n nn = NearestNeighbors(n_neighbors=K2, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the number of minority samples in local neighborhoods\n n_min = np.array([np.sum(y[ind[i][1:]] == self.min_label)\n for i in range(len(X_min))])\n\n # extracting selected samples from minority ones\n X_sel = X_min[n_min >= K1]\n\n # falling back to returning input data if all the input is considered\n # noise\n if len(X_sel) == 0:\n _logger.info(self.__class__.__name__ +\n \": \" + \"No samples selected\")\n return X.copy(), y.copy()\n\n # computing distribution\n weights = n_min[n_min >= K1]/K2\n weights = weights/np.sum(weights)\n\n # Algorithm 1 - MDO over-sampling\n mu = np.mean(X_sel, axis=0)\n Z = X_sel - mu\n # executing PCA\n pca = PCA(n_components=min([len(Z[0]), len(Z)])).fit(Z)\n T = pca.transform(Z)\n # computing variances (step 13)\n V = np.var(T, axis=0)\n\n V[V < 0.001] = 0.001\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n # selecting a sample randomly according to the distribution\n idx = self.random_state.choice(np.arange(len(X_sel)), p=weights)\n\n # finding vector in PCA space\n X_temp = T[idx]\n X_temp_square = X_temp**2\n\n # computing alphas\n alpha = np.sum(X_temp_square/V)\n alpha_V = alpha*V\n alpha_V[alpha_V < 0.001] = 0.001\n\n # initializing a new vector\n X_new = np.zeros(len(X_temp))\n\n # sampling components of the new vector\n s = 0\n for j in range(len(X_temp)-1):\n r = (2*self.random_state.random_sample()-1)*np.sqrt(alpha_V[j])\n X_new[j] = r\n s = s + (r**2/alpha_V[j])\n\n if s > 1:\n last_fea_val = 0\n else:\n tmp = (1 - s)*alpha*V[-1]\n if tmp < 0:\n tmp = 0\n last_fea_val = np.sqrt(tmp)\n # determine last component to fulfill the ellipse equation\n X_new[-1] = (2*self.random_state.random_sample()-1)*last_fea_val\n # append to new samples\n samples.append(X_new)\n\n return (np.vstack([X, pca.inverse_transform(samples) + mu]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'K2': self.K2,\n 'K1_frac': self.K1_frac,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Random_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{random_smote,\n author=\"Dong, Yanjie\n and Wang, Xuehua\",\n editor=\"Xiong, Hui\n and Lee, W. B.\",\n title=\"A New Over-Sampling Approach: Random-SMOTE\n for Learning from Imbalanced Data Sets\",\n booktitle=\"Knowledge Science, Engineering and\n Management\",\n year=\"2011\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"343--352\",\n isbn=\"978-3-642-25975-3\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_componentwise]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find closest neighbors of minority\n # points\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)))\n y_1_idx, y_2_idx = self.random_state.choice(ind[idx][1:], 2)\n t = self.sample_between_points_componentwise(\n X_min[y_1_idx], X_min[y_2_idx])\n samples.append(\n self.sample_between_points_componentwise(X_min[idx], t))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ISMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{ismote,\n author=\"Li, Hu\n and Zou, Peng\n and Wang, Xiang\n and Xia, Rongze\",\n editor=\"Sun, Zengqi\n and Deng, Zhidong\",\n title=\"A New Combination Sampling Method for\n Imbalanced Data\",\n booktitle=\"Proceedings of 2013 Chinese Intelligent\n Automation Conference\",\n year=\"2013\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"547--554\",\n isbn=\"978-3-642-38466-0\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority]\n\n def __init__(self,\n n_neighbors=5,\n minority_weight=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors\n minority_weight (float): weight parameter according to the paper\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(minority_weight, \"minority_weight\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.minority_weight = minority_weight\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'minority_weight': [0.2, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_to_sample = int((len(X_maj) - len(X_min))/2 + 0.5)\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # computing distances of majority samples from minority ones\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_maj)\n\n # sort majority instances in descending order by their mean distance\n # from minority samples\n to_sort = zip(np.arange(len(X_maj)), np.mean(dist, axis=1))\n ind_sorted, dist_sorted = zip(*sorted(to_sort, key=lambda x: -x[1]))\n\n # remove the ones being farthest from the minority samples\n X_maj = X_maj[list(ind_sorted[n_to_sample:])]\n\n # construct new dataset\n X_new = np.vstack([X_maj, X_min])\n y_new = np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min))])\n\n X_min = X_new[y_new == self.min_label]\n\n # fitting nearest neighbors model\n n_neighbors = min([len(X_new), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_new)\n dist, ind = nn.kneighbors(X_min)\n\n # do the oversampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.choice(np.arange(len(X_min)))\n y_idx = self.random_state.choice(ind[idx][1:])\n\n # different generation scheme depending on the class label\n if y_new[y_idx] == self.min_label:\n diff = (X_new[y_idx] - X_min[idx])\n r = self.random_state.random_sample()\n samples.append(X_min[idx] + r * diff * self.minority_weight)\n else:\n diff = (X_new[y_idx] - X_min[idx])\n r = self.random_state.random_sample()\n sample = X_min[idx] + r * diff * (1.0 - self.minority_weight)\n samples.append(sample)\n\n return (np.vstack([X_new, np.vstack(samples)]),\n np.hstack([y_new, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'minority_weight': self.minority_weight,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass VIS_RST(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{vis_rst,\n author=\"Borowska, Katarzyna\n and Stepaniuk, Jaroslaw\",\n editor=\"Saeed, Khalid\n and Homenda, Wladyslaw\",\n title=\"Imbalanced Data Classification: A Novel\n Re-sampling Approach Combining Versatile\n Improved SMOTE and Rough Sets\",\n booktitle=\"Computer Information Systems and\n Industrial Management\",\n year=\"2016\",\n publisher=\"Springer International Publishing\",\n address=\"Cham\",\n pages=\"31--42\",\n isbn=\"978-3-319-45378-1\"\n }\n\n Notes:\n * Replication of DANGER samples will be removed by the last step of\n noise filtering.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0.0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardizing the data\n ss = StandardScaler()\n ss.fit(X)\n X = ss.transform(X)\n y = y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # fitting nearest neighbors model to determine boundary region\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_maj)\n\n # determining boundary region of majority samples\n boundary = np.array([np.sum(y[ind[i]] == self.maj_label)\n != n_neighbors for i in range(len(X_maj))])\n y_maj = y[y == self.maj_label]\n y_maj[boundary] = self.min_label\n y[y == self.maj_label] = y_maj\n\n # extracting new minority and majority set\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # labeling minority samples\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting labels\n labels = []\n for i in range(len(ind)):\n min_class_neighbors = np.sum(y[ind[i][1:]] == self.maj_label)\n if min_class_neighbors == n_neighbors-1:\n labels.append('noise')\n elif min_class_neighbors < n_neighbors/2:\n labels.append('safe')\n else:\n labels.append('danger')\n\n # extracting the number of different labels (noise is not used)\n safe = np.sum([li == 'safe' for li in labels])\n danger = np.sum([li == 'danger' for li in labels])\n\n if safe == 0:\n mode = 'no_safe'\n elif danger > 0.3*len(X_min):\n mode = 'high_complexity'\n else:\n mode = 'low_complexity'\n\n # fitting nearest neighbors to find the neighbors of minority elements\n # among minority elements\n n_neighbors_min = min([len(X_min), self.n_neighbors + 1])\n nn_min = NearestNeighbors(n_neighbors=n_neighbors_min,\n n_jobs=self.n_jobs)\n nn_min.fit(X_min)\n dist_min, ind_min = nn_min.kneighbors(X_min)\n\n # do the sampling\n samples = []\n mask = np.repeat(False, len(X_min))\n while len(samples) < n_to_sample:\n # choosing a random minority sample\n idx = self.random_state.choice(np.arange(len(X_min)))\n\n # implementation of sampling rules depending on the mode\n if mode == 'high_complexity':\n if labels[idx] == 'noise':\n pass\n elif labels[idx] == 'danger' and not mask[idx]:\n samples.append(X_min[idx])\n mask[idx] = True\n else:\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n elif mode == 'low_complexity':\n if labels[idx] == 'noise':\n pass\n elif labels[idx] == 'danger':\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n elif not mask[idx]:\n samples.append(X_min[idx])\n mask[idx] = True\n else:\n X_b = X_min[self.random_state.choice(ind_min[idx][1:])]\n samples.add(self.sample_between_points(X_min[idx], X_b))\n\n X_samp = np.vstack(samples)\n\n # final noise removal by removing those minority samples generated\n # and not belonging to the lower approximation\n nn = NearestNeighbors(n_neighbors=n_neighbors,\n n_jobs=self.n_jobs).fit(X)\n dist_check, ind_check = nn.kneighbors(X_samp)\n\n def maj_zero(i):\n return np.sum(y[ind_check[i][1:]] == self.maj_label) == 0\n\n num_maj_mask = np.array([maj_zero(i) for i in range(len(samples))])\n X_samp = X_samp[num_maj_mask]\n\n return (ss.inverse_transform(np.vstack([X, X_samp])),\n np.hstack([y, np.repeat(self.min_label, len(X_samp))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass GASMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{gasmote,\n author=\"Jiang, Kun\n and Lu, Jing\n and Xia, Kuiliang\",\n title=\"A Novel Algorithm for Imbalance Data\n Classification Based on Genetic\n Algorithm Improved SMOTE\",\n journal=\"Arabian Journal for Science and\n Engineering\",\n year=\"2016\",\n month=\"Aug\",\n day=\"01\",\n volume=\"41\",\n number=\"8\",\n pages=\"3255--3266\",\n issn=\"2191-4281\",\n doi=\"10.1007/s13369-016-2179-2\",\n url=\"https://doi.org/10.1007/s13369-016-2179-2\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_memetic,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n n_neighbors=5,\n maxn=7,\n n_pop=10,\n popl3=5,\n pm=0.3,\n pr=0.2,\n Ge=10,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors\n maxn (int): maximum number of samples to generate per minority\n instances\n n_pop (int): size of population\n popl3 (int): number of crossovers\n pm (float): mutation probability\n pr (float): selection probability\n Ge (int): number of generations\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(maxn, \"maxn\", 1)\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_in_range(pm, \"pm\", [0, 1])\n self.check_in_range(pr, \"pr\", [0, 1])\n self.check_greater_or_equal(Ge, \"Ge\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_neighbors = n_neighbors\n self.maxn = maxn\n self.n_pop = n_pop\n self.popl3 = popl3\n self.pm = pm\n self.pr = pr\n self.Ge = Ge\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n return cls.generate_parameter_combinations({'n_neighbors': [7],\n 'maxn': [2, 3, 4],\n 'n_pop': [10],\n 'popl3': [4],\n 'pm': [0.3],\n 'pr': [0.2],\n 'Ge': [10]}, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find minority neighbors of\n # minority samples\n n_neighbors = min([self.n_neighbors + 1, len(X_min)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n kfold = KFold(min([len(X), 5]))\n\n def fitness(conf):\n \"\"\"\n Evluate fitness of configuration\n\n Args:\n conf (list(list)): configuration\n \"\"\"\n # generate new samples\n samples = []\n for i in range(len(conf)):\n for _ in range(conf[i]):\n X_b = X_min[self.random_state.choice(ind[i][1:])]\n samples.append(self.sample_between_points(X_min[i], X_b))\n\n if len(samples) == 0:\n # if no samples are generated\n X_new = X\n y_new = y\n else:\n # construct dataset\n X_new = np.vstack([X, np.vstack(samples)])\n y_new = np.hstack(\n [y, np.repeat(self.min_label, len(samples))])\n\n # execute kfold cross validation\n preds, tests = [], []\n for train, test in kfold.split(X_new):\n dt = DecisionTreeClassifier(random_state=self.random_state)\n dt.fit(X_new[train], y_new[train])\n preds.append(dt.predict(X_new[test]))\n tests.append(y_new[test])\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n\n # compute fitness measure\n tp = np.sum(np.logical_and(\n tests == self.min_label, tests == preds))\n tn = np.sum(np.logical_and(\n tests == self.maj_label, tests == preds))\n fp = np.sum(np.logical_and(\n tests == self.maj_label, tests != preds))\n fn = np.sum(np.logical_and(\n tests == self.min_label, tests != preds))\n sens = tp/(tp + fn)\n spec = tn/(fp + tn)\n\n return np.sqrt(sens*spec)\n\n def crossover(conf_a, conf_b):\n \"\"\"\n Crossover\n\n Args:\n conf_a (list(list)): configuration to crossover\n conf_b (list(list)): configuration to crossover\n\n Returns:\n list(list), list(list): the configurations after crossover\n \"\"\"\n for _ in range(self.popl3):\n k = self.random_state.randint(len(conf_a))\n conf_a = np.hstack([conf_a[:k], conf_b[k:]])\n conf_b = np.hstack([conf_b[:k], conf_a[k:]])\n return conf_a, conf_b\n\n def mutation(conf, ge):\n \"\"\"\n Mutation\n\n Args:\n conf (list(list)): configuration to mutate\n ge (int): iteration number\n \"\"\"\n conf = conf.copy()\n if self.random_state.random_sample() < self.pm:\n pass\n else:\n for i in range(len(conf)):\n r = self.random_state.random_sample()\n r = r**((1 - ge/self.Ge)**3)\n if self.random_state.randint(2) == 0:\n conf[i] = int(conf[i] + (self.maxn - conf[i])*r)\n else:\n conf[i] = int(conf[i] - (conf[i] - 0)*r)\n return conf\n\n # generate initial population\n def init_pop():\n return self.random_state.randint(self.maxn, size=len(X_min))\n\n population = [[init_pop(), 0] for _ in range(self.n_pop)]\n\n # calculate fitness values\n for p in population:\n p[1] = fitness(p[0])\n\n # start iteration\n ge = 0\n while ge < self.Ge:\n # sorting population in descending order by fitness scores\n population = sorted(population, key=lambda x: -x[1])\n\n # selection operation (Step 2)\n pp = int(self.n_pop*self.pr)\n population_new = []\n for i in range(pp):\n population_new.append(population[i])\n population_new.extend(population[:(self.n_pop - pp)])\n population = population_new\n\n # crossover\n for _ in range(int(self.n_pop/2)):\n pop_0 = population[self.random_state.randint(self.n_pop)][0]\n pop_1 = population[self.random_state.randint(self.n_pop)][0]\n conf_a, conf_b = crossover(pop_0, pop_1)\n population.append([conf_a, fitness(conf_a)])\n population.append([conf_b, fitness(conf_b)])\n\n # mutation\n for _ in range(int(self.n_pop/2)):\n pop_0 = population[self.random_state.randint(self.n_pop)][0]\n conf = mutation(pop_0, ge)\n population.append([conf, fitness(conf)])\n\n ge = ge + 1\n\n # sorting final population\n population = sorted(population, key=lambda x: -x[1])\n\n # get best configuration\n conf = population[0][0]\n\n # generate final samples\n samples = []\n for i in range(len(conf)):\n for _ in range(conf[i]):\n samples.append(self.sample_between_points(\n X_min[i], X_min[self.random_state.choice(ind[i][1:])]))\n\n if len(samples) == 0:\n return X.copy(), y.copy()\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'maxn': self.maxn,\n 'n_pop': self.n_pop,\n 'popl3': self.popl3,\n 'pm': self.pm,\n 'pr': self.pr,\n 'Ge': self.Ge,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass A_SUWO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{a_suwo,\n title = \"Adaptive semi-unsupervised weighted\n oversampling (A-SUWO) for imbalanced\n datasets\",\n journal = \"Expert Systems with Applications\",\n volume = \"46\",\n pages = \"405 - 416\",\n year = \"2016\",\n issn = \"0957-4174\",\n doi = \"https://doi.org/10.1016/j.eswa.2015.10.031\",\n author = \"Iman Nekooeimehr and Susana K. Lai-Yuen\",\n keywords = \"Imbalanced dataset, Classification,\n Clustering, Oversampling\"\n }\n\n Notes:\n * Equation (7) misses a division by R_j.\n * It is not specified how to sample from clusters with 1 instances.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clus_maj=7,\n c_thres=0.8,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n n_clus_maj (int): number of majority clusters\n c_thres (float): threshold on distances\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clus_maj, \"n_clus_maj\", 1)\n self.check_greater_or_equal(c_thres, \"c_thres\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clus_maj = n_clus_maj\n self.c_thres = c_thres\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clus_maj': [5, 7, 9],\n 'c_thres': [0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_orig, y_orig = X, y\n\n # fitting nearest neighbors to find neighbors of all samples\n n_neighbors = min([len(X), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n # identifying as noise those samples which do not have neighbors of\n # the same label\n def noise_func(i):\n return np.sum(y[ind[i][1:]] == y[i]) == 0\n noise = np.where(np.array([noise_func(i) for i in range(len(X))]))[0]\n\n # removing noise\n X = np.delete(X, noise, axis=0)\n y = np.delete(y, noise)\n\n # extarcting modified minority and majority datasets\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n if len(X_min) == 0:\n _logger.info(\"All minority samples removed as noise\")\n return X_orig.copy(), y_orig.copy()\n\n n_clus_maj = min([len(X_maj), self.n_clus_maj])\n\n # clustering majority samples\n ac = AgglomerativeClustering(n_clusters=n_clus_maj)\n ac.fit(X_maj)\n maj_clusters = [np.where(ac.labels_ == i)[0]\n for i in range(n_clus_maj)]\n\n if len(maj_clusters) == 0:\n return X_orig.copy(), y_orig.copy()\n\n # initialize minority clusters\n min_clusters = [np.array([i]) for i in range(len(X_min))]\n\n # compute minority distance matrix of cluster\n dm_min = pairwise_distances(X_min)\n for i in range(len(dm_min)):\n dm_min[i, i] = np.inf\n\n # compute distance matrix of minority and majority clusters\n dm_maj = np.zeros(shape=(len(X_min), len(maj_clusters)))\n for i in range(len(X_min)):\n for j in range(len(maj_clusters)):\n pairwd = pairwise_distances(X_min[min_clusters[i]],\n X_maj[maj_clusters[j]])\n dm_maj[i, j] = np.min(pairwd)\n\n # compute threshold\n nn = NearestNeighbors(n_neighbors=len(X_min), n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n d_med = np.median(dist, axis=1)\n T = np.mean(d_med)*self.c_thres\n\n # do the clustering of minority samples\n while True:\n # finding minimum distance between minority clusters\n pi = np.min(dm_min)\n\n # if the minimum distance is higher than the threshold, stop\n if pi > T:\n break\n\n # find cluster pair of minimum distance\n min_dist_pair = np.where(dm_min == pi)\n min_i = min_dist_pair[0][0]\n min_j = min_dist_pair[1][0]\n\n # Step 3 - find majority clusters closer than pi\n A = np.where(np.logical_and(dm_maj[min_i] < pi,\n dm_maj[min_j] < pi))[0]\n\n # Step 4 - checking if there is a majority cluster between the\n # minority ones\n if len(A) > 0:\n dm_min[min_i, min_j] = np.inf\n dm_min[min_j, min_i] = np.inf\n else:\n # Step 5\n # unifying minority clusters\n min_clusters[min_i] = np.hstack([min_clusters[min_i],\n min_clusters[min_j]])\n # removing one of them\n min_clusters = np.delete(min_clusters, min_j)\n\n # updating the minority distance matrix\n dm_min[min_i] = np.min(np.vstack([dm_min[min_i],\n dm_min[min_j]]), axis=0)\n dm_min[:, min_i] = dm_min[min_i]\n # removing jth row and column (merged in i)\n dm_min = np.delete(dm_min, min_j, axis=0)\n dm_min = np.delete(dm_min, min_j, axis=1)\n\n # fixing the diagonal elements\n for i in range(len(dm_min)):\n dm_min[i, i] = np.inf\n\n # updating the minority-majority distance matrix\n dm_maj[min_i] = np.min(np.vstack([dm_maj[min_i],\n dm_maj[min_j]]), axis=0)\n dm_maj = np.delete(dm_maj, min_j, axis=0)\n\n # adaptive sub-cluster sizing\n eps = []\n # going through all minority clusters\n for c in min_clusters:\n # checking if cluster size is higher than 1\n if len(c) > 1:\n k = min([len(c), 5])\n kfold = KFold(k, random_state=self.random_state)\n preds = []\n # executing k-fold cross validation with linear discriminant\n # analysis\n X_c = X_min[c]\n for train, test in kfold.split(X_c):\n X_train = np.vstack([X_maj, X_c[train]])\n y_train_maj = np.repeat(self.maj_label, len(X_maj))\n y_train_min = np.repeat(self.min_label, len(X_c[train]))\n y_train = np.hstack([y_train_maj, y_train_min])\n ld = LinearDiscriminantAnalysis()\n ld.fit(X_train, y_train)\n preds.append(ld.predict(X_c[test]))\n preds = np.hstack(preds)\n # extracting error rate\n eps.append(np.sum(preds == self.maj_label)/len(preds))\n else:\n eps.append(1.0)\n\n # sampling distribution over clusters\n min_cluster_dist = eps/np.sum(eps)\n\n # synthetic instance generation - determining within cluster\n # distribution finding majority neighbor distances of minority\n # samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_maj)\n dist, ind = nn.kneighbors(X_min)\n dist = dist/len(X[0])\n dist = 1.0/dist\n\n # computing the THs\n THs = []\n for c in min_clusters:\n THs.append(np.mean(dist[c, 0]))\n\n # determining within cluster distributions\n within_cluster_dist = []\n for i, c in enumerate(min_clusters):\n Gamma = dist[c, 0]\n Gamma[Gamma > THs[i]] = THs[i]\n within_cluster_dist.append(Gamma/np.sum(Gamma))\n\n # extracting within cluster neighbors\n within_cluster_neighbors = []\n for c in min_clusters:\n n_neighbors = min([len(c), self.n_neighbors])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min[c])\n within_cluster_neighbors.append(nn.kneighbors(X_min[c])[1])\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random cluster index\n cluster_idx = self.random_state.choice(\n np.arange(len(min_clusters)), p=min_cluster_dist)\n if len(min_clusters[cluster_idx]) > 1:\n # if the cluster has at least two elemenets\n domain = np.arange(len(min_clusters[cluster_idx]))\n distribution = within_cluster_dist[cluster_idx]\n sample_idx = self.random_state.choice(domain, p=distribution)\n\n domain = within_cluster_neighbors[cluster_idx][sample_idx][1:]\n neighbor_idx = self.random_state.choice(domain)\n point = X_min[min_clusters[cluster_idx][sample_idx]]\n neighbor = X_min[min_clusters[cluster_idx][neighbor_idx]]\n samples.append(self.sample_between_points(point, neighbor))\n else:\n samples.append(X_min[min_clusters[cluster_idx][0]])\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clus_maj': self.n_clus_maj,\n 'c_thres': self.c_thres,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SMOTE_FRST_2T(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{smote_frst_2t,\n title = \"Fuzzy-rough imbalanced learning for the\n diagnosis of High Voltage Circuit\n Breaker maintenance: The SMOTE-FRST-2T\n algorithm\",\n journal = \"Engineering Applications of Artificial\n Intelligence\",\n volume = \"48\",\n pages = \"134 - 139\",\n year = \"2016\",\n issn = \"0952-1976\",\n doi = \"https://doi.org/10.1016/j.engappai.2015.10.009\",\n author = \"Ramentol, E. and Gondres, I. and Lajes, S.\n and Bello, R. and Caballero,Y. and\n Cornelis, C. and Herrera, F.\",\n keywords = \"High Voltage Circuit Breaker (HVCB),\n Imbalanced learning, Fuzzy rough set\n theory, Resampling methods\"\n }\n\n Notes:\n * Unlucky setting of parameters might result 0 points added, we have\n fixed this by increasing the gamma_S threshold if the number of\n samples accepted is low.\n * Similarly, unlucky setting of parameters might result all majority\n samples turned into minority.\n * In my opinion, in the algorithm presented in the paper the\n relations are incorrect. The authors talk about accepting samples\n having POS score below a threshold, and in the algorithm in\n both places POS >= gamma is used.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_noise_removal,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_application]\n\n def __init__(self,\n n_neighbors=5,\n gamma_S=0.7,\n gamma_M=0.03,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_neighbors (int): number of neighbors in the SMOTE sampling\n gamma_S (float): threshold of synthesized samples\n gamma_M (float): threshold of majority samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(gamma_S, \"gamma_S\", 0)\n self.check_greater_or_equal(gamma_M, \"gamma_M\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.gamma_S = gamma_S\n self.gamma_M = gamma_M\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'n_neighbors': [3, 5, 7],\n 'gamma_S': [0.8, 1.0],\n 'gamma_M': [0.03, 0.05, 0.1]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Turning the ranges to 1 speeds up the positive membership\n # calculations\n mmscaler = MinMaxScaler()\n X = mmscaler.fit_transform(X)\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n # extracting the attribute ranges\n\n d = len(X[0])\n\n # after MinMax scaling, the POS value can be calculated as follows\n pos_cache = pairwise_distances(X_min, X_maj, metric='l1')\n pos_cache = 1.0 - pos_cache\n pos_cache = pos_cache.clip(0, d)\n pos_cache = 1.0 - pos_cache\n\n # initializing some lists containing the results\n result_synth = []\n result_maj = []\n iteration = 0\n\n gamma_S = self.gamma_S\n gamma_M = self.gamma_M\n\n # iterating until the dataset becomes balanced\n while (len(X_min) + len(result_synth) + len(result_maj)) < len(X_maj):\n _logger.info(self.__class__.__name__ + \":\" +\n (\"iteration: %d\" % iteration))\n # checking if the parameters aren't too conservative\n if len(result_synth) < iteration:\n gamma_S = gamma_S*1.1\n _logger.info(self.__class__.__name__ + \": \" +\n \"gamma_S increased to %f\" % gamma_S)\n\n # determine proportion\n diff = (sum(y == self.maj_label) -\n sum(y == self.min_label))\n prop = max(1.1/diff, 0.2)\n\n # executing SMOTE to generate some minority samples\n smote = SMOTE(proportion=prop,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_samp, y_samp = smote.sample(X, y)\n X_samp = X_samp[len(X):]\n\n new_synth = []\n\n # computing POS membership values for the new samples\n pos_synth = pairwise_distances(X_min, X_samp, metric='l1')\n pos_synth = 1.0 - pos_synth\n pos_synth = pos_synth.clip(0, d)\n pos_synth = 1.0 - pos_synth\n\n # adding samples with POS membership smaller than gamma_S to the\n # minority set\n min_pos = np.min(pos_synth, axis=0)\n to_add = np.where(min_pos < gamma_S)[0]\n result_synth.extend(X_samp[to_add])\n new_synth.extend(X_samp[to_add])\n\n # checking the minimum POS values of the majority samples\n min_pos = np.min(pos_cache, axis=0)\n to_remove = np.where(min_pos < self.gamma_M)[0]\n\n # if the number of majority samples with POS membership smaller\n # than gamma_M is not extreme, then changing labels, otherwise\n # decreasing gamma_M\n if len(to_remove) > (len(X_maj) - len(X_min))/2:\n to_remove = np.array([])\n gamma_M = gamma_M*0.9\n _logger.info(self.__class__.__name__ + \": \" +\n \"gamma_M decreased to %f\" % gamma_M)\n else:\n result_maj.extend(X_maj[to_remove])\n X_maj = np.delete(X_maj, to_remove, axis=0)\n pos_cache = np.delete(pos_cache, to_remove, axis=1)\n\n # updating pos cache\n if len(new_synth) > 0:\n pos_cache_new = pairwise_distances(\n np.vstack(new_synth), X_maj, metric='l1')\n pos_cache_new = 1.0 - pos_cache_new\n pos_cache_new = pos_cache_new.clip(0, d)\n pos_cache_new = 1.0 - pos_cache_new\n\n pos_cache = np.vstack([pos_cache, pos_cache_new])\n\n message = \"minority added: %d, majority removed %d\"\n message = message % (len(to_add), len(to_remove))\n _logger.info(self.__class__.__name__ + \":\" + message)\n\n iteration = iteration + 1\n\n # packing the results\n X_res = np.vstack([X_maj, X_min])\n if len(result_synth) > 0:\n X_res = np.vstack([X_res, np.vstack(result_synth)])\n if len(result_maj) > 0:\n X_res = np.vstack([X_res, np.vstack(result_maj)])\n\n if len(X_maj) == 0:\n _logger.warning('All majority samples removed')\n return mmscaler.inverse_transform(X), y\n\n y_res_maj = np.repeat(self.maj_label, len(X_maj))\n n_y_res_min = len(X_min) + len(result_synth) + len(result_maj)\n y_res_min = np.repeat(self.min_label, n_y_res_min)\n y_res = np.hstack([y_res_maj, y_res_min])\n\n return mmscaler.inverse_transform(X_res), y_res\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_neighbors': self.n_neighbors,\n 'gamma_S': self.gamma_S,\n 'gamma_M': self.gamma_M,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AND_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @inproceedings{and_smote,\n author = {Yun, Jaesub and Ha,\n Jihyun and Lee, Jong-Seok},\n title = {Automatic Determination of Neighborhood\n Size in SMOTE},\n booktitle = {Proceedings of the 10th International\n Conference on Ubiquitous\n Information Management and\n Communication},\n series = {IMCOM '16},\n year = {2016},\n isbn = {978-1-4503-4142-4},\n location = {Danang, Viet Nam},\n pages = {100:1--100:8},\n articleno = {100},\n numpages = {8},\n doi = {10.1145/2857546.2857648},\n acmid = {2857648},\n publisher = {ACM},\n address = {New York, NY, USA},\n keywords = {SMOTE, imbalanced learning, synthetic\n data generation},\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self, proportion=1.0, K=15, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n K (int): maximum number of nearest neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(K, \"K\", 2)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.K = K\n self.n_jobs = n_jobs\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'K': [9, 15, 21]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n K = min([len(X_min), self.K])\n # find K nearest neighbors of all samples\n nn = NearestNeighbors(n_neighbors=K, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X)\n\n min_ind = np.where(y == self.min_label)[0]\n\n # Executing the algorithm\n kappa = []\n for i in range(len(min_ind)):\n regions_min = []\n regions_maj = []\n\n for j in range(1, K):\n # continueing if the label of the neighbors is minority\n if y[ind[min_ind[i]][j]] != self.min_label:\n continue\n\n # region coordinates\n reg = np.hstack([min_ind[i], ind[min_ind[i]][j]])\n # compute corner points\n reg_min = np.min(X[reg])\n reg_max = np.max(X[reg])\n\n r_min = []\n r_maj = []\n # all the points in the region must be among the neighbors\n # what we do is counting how many of them are minority and\n # majority samples\n for k in ind[min_ind[i]][:(j+1)]:\n if np.all(reg_min <= X[k]) and np.all(X[k] <= reg_max):\n if y[k] == self.min_label:\n r_min.append(k)\n else:\n r_maj.append(k)\n\n # appending the coordinates of points to the minority and\n # majority regions\n regions_min.append(r_min)\n regions_maj.append(r_maj)\n\n # taking the cumulative unions of minority and majority points\n for j in range(1, len(regions_min)):\n regions_min[j] = list(\n set(regions_min[j]).union(set(regions_min[j-1])))\n regions_maj[j] = list(\n set(regions_maj[j]).union(set(regions_maj[j-1])))\n\n # computing the lengths of the increasing minority and majority\n # sets\n regions_min = np.array([len(r) for r in regions_min])\n regions_maj = np.array([len(r) for r in regions_maj])\n\n # computing the precision of minority classification (all points\n # are supposed to be classified as minority)\n prec = regions_min/(regions_min + regions_maj)\n # taking the difference\n d = np.diff(prec, 1)\n # finding the biggest drop (+1 because diff reduces length, +1\n # because of indexing begins with 0)\n if len(d) == 0:\n k = 0\n else:\n k = np.argmin(d) + 2\n # appending the coordinate of the biggest drop as the ideal\n # neighborhood size note that k indices the minority neighbors\n kappa.append(k)\n\n # finding nearest minority neighbors of minority samples\n nn = NearestNeighbors(n_neighbors=max(kappa) + 1, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n if np.sum(kappa) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"No minority samples in nearest neighbors\")\n return X.copy(), y.copy()\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random point\n idx = self.random_state.randint(len(X_min))\n if kappa[idx] > 0:\n domain = ind[idx][1:(kappa[idx]+1)]\n X_b = X_min[self.random_state.choice(domain)]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'K': self.K,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NRAS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{nras,\n title = \"Noise Reduction A Priori Synthetic\n Over-Sampling for class imbalanced data\n sets\",\n journal = \"Information Sciences\",\n volume = \"408\",\n pages = \"146 - 161\",\n year = \"2017\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2017.04.046\",\n author = \"William A. Rivera\",\n keywords = \"NRAS, SMOTE, OUPS, Class imbalance,\n Classification\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_sample_ordinary,\n OverSampling.cat_noise_removal]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n t=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors\n t (float): [0,1] fraction of n_neighbors as threshold\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_in_range(t, \"t\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.t = t\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [5, 7, 9],\n 't': [0.3, 0.5, 0.8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardization is needed to make the range of the propensity scores\n # similar to that of the features\n mms = MinMaxScaler()\n X_trans = mms.fit_transform(X)\n\n # determining propensity scores using logistic regression\n lr = LogisticRegression(solver='lbfgs',\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n lr.fit(X_trans, y)\n propensity = lr.predict_proba(X_trans)[:, np.where(\n lr.classes_ == self.min_label)[0][0]]\n\n X_min = X_trans[y == self.min_label]\n\n # adding propensity scores as a new feature\n X_new = np.column_stack([X_trans, propensity])\n X_min_new = X_new[y == self.min_label]\n\n # finding nearest neighbors of minority samples\n n_neighbors = min([len(X_new), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_new)\n dist, ind = nn.kneighbors(X_min_new)\n\n # do the sampling\n samples = []\n to_remove = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n # finding the number of minority neighbors\n t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\n if t_hat < self.t*n_neighbors:\n # removing the minority point if the number of minority\n # neighbors is less then the threshold\n # to_remove indexes X_min\n if idx not in to_remove:\n to_remove.append(idx)\n # compensating the removal of the minority point\n n_to_sample = n_to_sample + 1\n\n if len(to_remove) == len(X_min):\n _logger.warning(self.__class__.__name__ + \": \" +\n \"all minority samples identified as noise\")\n return X.copy(), y.copy()\n else:\n # otherwise do the sampling\n X_b = X_trans[self.random_state.choice(ind[idx][1:])]\n samples.append(self.sample_between_points(X_min[idx], X_b))\n\n # remove noisy elements\n X_maj = X_trans[y == self.maj_label]\n X_min = np.delete(X_min, to_remove, axis=0)\n\n return (mms.inverse_transform(np.vstack([X_maj,\n X_min,\n np.vstack(samples)])),\n np.hstack([np.repeat(self.maj_label, len(X_maj)),\n np.repeat(self.min_label, len(X_min)),\n np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 't': self.t,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass AMSCO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{amsco,\n title = \"Adaptive multi-objective swarm fusion for\n imbalanced data classification\",\n journal = \"Information Fusion\",\n volume = \"39\",\n pages = \"1 - 24\",\n year = \"2018\",\n issn = \"1566-2535\",\n doi = \"https://doi.org/10.1016/j.inffus.2017.03.007\",\n author = \"Jinyan Li and Simon Fong and Raymond K.\n Wong and Victor W. Chu\",\n keywords = \"Swarm fusion, Swarm intelligence\n algorithm, Multi-objective, Crossover\n rebalancing, Imbalanced data\n classification\"\n }\n\n Notes:\n * It is not clear how the kappa threshold is used, I do use the RA\n score to drive all the evolution. Particularly:\n\n \"In the last phase of each iteration, the average Kappa value\n in current non-inferior set is compare with the latest threshold\n value, the threshold is then increase further if the average value\n increases, and vice versa. By doing so, the non-inferior region\n will be progressively reduced as the Kappa threshold lifts up.\"\n\n I don't see why would the Kappa threshold lift up if the kappa\n thresholds are decreased if the average Kappa decreases (\"vice versa\").\n\n * Due to the interpretation of kappa threshold and the lack of detailed\n description of the SIS process, the implementation is not exactly\n what is described in the paper, but something very similar.\n \"\"\"\n\n categories = [OverSampling.cat_changes_majority,\n OverSampling.cat_memetic,\n OverSampling.cat_uses_classifier]\n\n def __init__(self,\n n_pop=5,\n n_iter=15,\n omega=0.1,\n r1=0.1,\n r2=0.1,\n n_jobs=1,\n classifier=DecisionTreeClassifier(random_state=2),\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n n_pop (int): size of populations\n n_iter (int): optimization steps\n omega (float): intertia of PSO\n r1 (float): force towards local optimum\n r2 (float): force towards global optimum\n n_jobs (int): number of parallel jobs\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(n_pop, \"n_pop\", 1)\n self.check_greater_or_equal(n_iter, \"n_iter\", 1)\n self.check_greater_or_equal(omega, \"omega\", 0)\n self.check_greater_or_equal(r1, \"r1\", 0)\n self.check_greater_or_equal(r2, \"r2\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.n_pop = n_pop\n self.n_iter = n_iter\n self.omega = omega\n self.r1 = r1\n self.r2 = r2\n self.n_jobs = n_jobs\n self.classifier = classifier\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the method is an overall optimization, 1 reasonable settings\n # should be enough\n\n classifiers = [DecisionTreeClassifier(random_state=2)]\n parameter_combinations = {'n_pop': [5],\n 'n_iter': [15],\n 'omega': [0.1],\n 'r1': [0.1],\n 'r2': [0.1],\n 'classifier': classifiers}\n\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n X_maj = X[y == self.maj_label]\n\n n_cross_val = min([4, len(X_min)])\n\n def fitness(X_min, X_maj):\n \"\"\"\n Calculating fitness function\n\n Args:\n X_min (np.matrix): minority samples\n X_maj (np.matrix): majority samples\n\n Returns:\n float, float: kappa, accuracy\n \"\"\"\n kfold = StratifiedKFold(n_cross_val)\n\n # prepare assembled dataset\n X_ass = np.vstack([X_min, X_maj])\n y_ass = np.hstack([np.repeat(self.min_label, len(X_min)),\n np.repeat(self.maj_label, len(X_maj))])\n\n preds = []\n tests = []\n for train, test in kfold.split(X_ass, y_ass):\n self.classifier.fit(X_ass[train], y_ass[train])\n preds.append(self.classifier.predict(X))\n tests.append(y)\n preds = np.hstack(preds)\n tests = np.hstack(tests)\n\n # calculate kappa and accuracy scores\n tp = np.sum(np.logical_and(preds == tests,\n tests == self.min_label))\n fn = np.sum(np.logical_and(preds != tests,\n tests == self.min_label))\n tn = np.sum(np.logical_and(preds == tests,\n tests == self.maj_label))\n fp = np.sum(np.logical_and(preds != tests,\n tests == self.maj_label))\n\n p_o = (tp + tn)/(tp + fn + tn + fp)\n p_e = (tp + fn)*(tp + fp)/(tp + fn + tn + fp)**2 + \\\n (fp + tn)*(fn + tn)/(tp + fn + tn + fp)**2\n\n kappa = (p_o - p_e)/(1.0 - p_e)\n accuracy = (tp + tn)/(tp + fn + tn + fp)\n\n return kappa, accuracy\n\n def OSMOTE(X_min, X_maj):\n \"\"\"\n Executing OSMOTE phase\n\n Args:\n X_min (np.matrix): minority samples\n X_maj (np.matrix): majority samples\n\n Returns:\n np.matrix, np.matrix: new minority and majority datasets\n \"\"\"\n\n # initialize particles, first coordinate represents proportion\n # parameter of SMOTE\n # the second coordinate represents the number of neighbors to\n # take into consideration\n def init_pop():\n proportion = self.random_state.random_sample()/2.0+0.5\n n_neighbors = self.random_state.randint(3, 10)\n return np.array([proportion, n_neighbors])\n particles = [init_pop() for _ in range(self.n_pop)]\n # velocities initialized\n velocities = [np.array([0.1, 1]) for _ in range(self.n_pop)]\n # setting the limits of the search space\n limits = [np.array([0.25, 3]), np.array([4.0, 10])]\n # local best results\n local_best = [particles[i].copy() for i in range(self.n_pop)]\n # local best scores\n local_score = [(0.0, 0.0)]*self.n_pop\n # global best result\n global_best = particles[0].copy()\n # global best score\n global_score = (0.0, 0.0)\n # best dataset\n best_dataset = None\n\n # running the optimization\n for _ in range(self.n_iter):\n # update velocities\n for i in range(len(velocities)):\n diff1 = (local_best[i] - velocities[i])\n diff2 = (global_best - velocities[i])\n velocities[i] = (velocities[i]*self.omega +\n self.r1 * diff1 + self.r2*diff2)\n # clipping velocities using the upper bounds of the\n # particle search space\n velocities[i][0] = np.clip(\n velocities[i][0], -limits[1][0]/2, limits[1][0]/2)\n velocities[i][1] = np.clip(\n velocities[i][1], -limits[1][1]/2, limits[1][1]/2)\n\n # update particles\n for i in range(len(particles)):\n particles[i] = particles[i] + velocities[i]\n # clipping the particle positions using the lower and\n # upper bounds\n particles[i][0] = np.clip(\n particles[i][0], limits[0][0], limits[1][0])\n particles[i][1] = np.clip(\n particles[i][1], limits[0][1], limits[1][1])\n\n # evaluate\n scores = []\n for i in range(len(particles)):\n # apply SMOTE\n smote = SMOTE(particles[i][0],\n int(np.rint(particles[i][1])),\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X_to_sample = np.vstack([X_maj, X_min])\n y_to_sample_maj = np.repeat(\n self.maj_label, len(X_maj))\n y_to_sample_min = np.repeat(\n self.min_label, len(X_min))\n y_to_sample = np.hstack([y_to_sample_maj, y_to_sample_min])\n X_samp, y_samp = smote.sample(X_to_sample, y_to_sample)\n\n # evaluate\n scores.append(fitness(X_samp[len(X_maj):],\n X_samp[:len(X_maj)]))\n\n # update scores according to the multiobjective setting\n if (scores[i][0]*scores[i][1] >\n local_score[i][0]*local_score[i][1]):\n local_best[i] = particles[i].copy()\n local_score[i] = scores[i]\n if (scores[i][0]*scores[i][1] >\n global_score[0]*global_score[1]):\n global_best = particles[i].copy()\n global_score = scores[i]\n best_dataset = (X_samp[len(X_maj):],\n X_samp[:len(X_maj)])\n\n return best_dataset[0], best_dataset[1]\n\n def SIS(X_min, X_maj):\n \"\"\"\n SIS procedure\n\n Args:\n X_min (np.matrix): minority dataset\n X_maj (np.matrix): majority dataset\n\n Returns:\n np.matrix, np.matrix: new minority and majority datasets\n \"\"\"\n min_num = len(X_min)\n max_num = len(X_maj)\n if min_num >= max_num:\n return X_min, X_maj\n\n # initiate particles\n def init_particle():\n num = self.random_state.randint(min_num, max_num)\n maj = self.random_state.choice(np.arange(len(X_maj)), num)\n return maj\n\n particles = [init_particle() for _ in range(self.n_pop)]\n scores = [fitness(X_min, X_maj[particles[i]])\n for i in range(self.n_pop)]\n best_score = (0.0, 0.0)\n best_dataset = None\n\n for _ in range(self.n_iter):\n # mutate and evaluate\n # the way mutation or applying PSO is not described in the\n # paper in details\n for i in range(self.n_pop):\n # removing some random elements\n domain = np.arange(len(particles[i]))\n n_max = min([10, len(particles[i])])\n n_to_choose = self.random_state.randint(0, n_max)\n to_remove = self.random_state.choice(domain, n_to_choose)\n mutant = np.delete(particles[i], to_remove)\n\n # adding some random elements\n maj_set = set(np.arange(len(X_maj)))\n part_set = set(particles[i])\n diff = list(maj_set.difference(part_set))\n n_max = min([10, len(diff)])\n n_to_choose = self.random_state.randint(0, n_max)\n diff_elements = self.random_state.choice(diff, n_to_choose)\n mutant = np.hstack([mutant, np.array(diff_elements)])\n # evaluating the variant\n score = fitness(X_min, X_maj[mutant])\n if score[1] > scores[i][1]:\n particles[i] = mutant.copy()\n scores[i] = score\n if score[1] > best_score[1]:\n best_score = score\n best_dataset = mutant.copy()\n\n return X_min, X_maj[best_dataset]\n\n # executing the main optimization procedure\n current_min = X_min\n current_maj = X_maj\n for it in range(self.n_iter):\n _logger.info(self.__class__.__name__ + \": \" +\n 'staring iteration %d' % it)\n new_min, _ = OSMOTE(X_min, current_maj)\n _, new_maj = SIS(current_min, X_maj)\n\n # calculating fitness values of the four combinations\n fitness_0 = np.prod(fitness(new_min, current_maj))\n fitness_1 = np.prod(fitness(current_min, current_maj))\n fitness_2 = np.prod(fitness(new_min, new_maj))\n fitness_3 = np.prod(fitness(current_min, new_maj))\n\n # selecting the new current_maj and current_min datasets\n message = 'fitness scores: %f %f %f %f'\n message = message % (fitness_0, fitness_1, fitness_2, fitness_3)\n _logger.info(self.__class__.__name__ + \": \" + message)\n max_fitness = np.max([fitness_0, fitness_1, fitness_2, fitness_3])\n if fitness_1 == max_fitness or fitness_3 == max_fitness:\n current_maj = new_maj\n if fitness_0 == max_fitness or fitness_2 == max_fitness:\n current_min = new_min\n\n return (np.vstack([current_maj, current_min]),\n np.hstack([np.repeat(self.maj_label, len(current_maj)),\n np.repeat(self.min_label, len(current_min))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'n_pop': self.n_pop,\n 'n_iter': self.n_iter,\n 'omega': self.omega,\n 'r1': self.r1,\n 'r2': self.r2,\n 'n_jobs': self.n_jobs,\n 'classifier': self.classifier,\n 'random_state': self._random_state_init}\n\n\nclass SSO(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @InProceedings{sso,\n author=\"Rong, Tongwen\n and Gong, Huachang\n and Ng, Wing W. Y.\",\n editor=\"Wang, Xizhao\n and Pedrycz, Witold\n and Chan, Patrick\n and He, Qiang\",\n title=\"Stochastic Sensitivity Oversampling\n Technique for Imbalanced Data\",\n booktitle=\"Machine Learning and Cybernetics\",\n year=\"2014\",\n publisher=\"Springer Berlin Heidelberg\",\n address=\"Berlin, Heidelberg\",\n pages=\"161--171\",\n isbn=\"978-3-662-45652-1\"\n }\n\n Notes:\n * In the algorithm step 2d adds a constant to a vector. I have\n changed it to a componentwise adjustment, and also used the\n normalized STSM as I don't see any reason why it would be\n some reasonable, bounded value.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_uses_clustering,\n OverSampling.cat_density_based]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n h=10,\n n_iter=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n h (int): number of hidden units\n n_iter (int): optimization steps\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(h, \"h\", 1)\n self.check_greater_or_equal(n_iter, \"n_iter\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.h = h\n self.n_iter = n_iter\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5],\n 'h': [2, 5, 10, 20],\n 'n_iter': [5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # number of samples to generate in each iteration\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n samp_per_iter = max([1, int(n_to_sample/self.n_iter)])\n\n # executing the algorithm\n for _ in range(self.n_iter):\n X_min = X[y == self.min_label]\n\n # applying kmeans clustering to find the hidden neurons\n h = min([self.h, len(X_min)])\n kmeans = KMeans(n_clusters=h,\n random_state=self.random_state)\n kmeans.fit(X)\n\n # extracting the hidden center elements\n u = kmeans.cluster_centers_\n\n # extracting scale parameters as the distances of closest centers\n nn_cent = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n nn_cent.fit(u)\n dist_cent, ind_cent = nn_cent.kneighbors(u)\n v = dist_cent[:, 1]\n\n # computing the response of the hidden units\n phi = pairwise_distances(X, u)\n phi = phi**2\n phi = np.exp(-phi/v**2)\n\n # applying linear regression to find the best weights\n lr = LinearRegression()\n lr.fit(phi, y)\n f = lr.predict(phi[np.where(y == self.min_label)[0]])\n w = lr.coef_\n\n def eq_6(Q, w, u, v, x):\n \"\"\"\n Equation 6 in the paper\n \"\"\"\n tmp_sum = np.zeros(h)\n for i in range(h):\n a = (x - u[i] + Q)/np.sqrt(2*v[i])\n b = (x - u[i] - Q)/np.sqrt(2*v[i])\n tmp_prod = (sspecial.erf(a) - sspecial.erf(b))\n tmp_sum[i] = np.sqrt(np.pi/2)*v[i]*np.prod(tmp_prod)\n return np.dot(tmp_sum, w)/(2*Q)**len(x)\n\n def eq_8(Q, w, u, v, x):\n \"\"\"\n Equation 8 in the paper\n \"\"\"\n res = 0.0\n for i in range(h):\n vi2 = v[i]**2\n for r in range(h):\n vr2 = v[r]**2\n a1 = (np.sqrt(2*vi2*vr2*(vi2 + vr2)))\n\n a00_v = (vi2 + vr2)*(x + Q)\n a01_v = vi2*u[r] + vr2*u[i]\n a0_v = a00_v - a01_v\n a_v = a0_v/a1\n\n b_v = ((vi2 + vr2)*(x - Q) - (vi2*u[r] + vr2*u[i]))/a1\n tmp_prod = sspecial.erf(a_v) - sspecial.erf(b_v)\n\n tmp_a = (np.sqrt(2*vi2*vr2*(vi2 + vr2)) /\n (vi2 + vr2))**len(x)\n norm = np.linalg.norm(u[r] - u[i])\n tmp_b = np.exp(-0.5 * norm**2/(vi2 + vr2))\n res = res + tmp_a*tmp_b*np.prod(tmp_prod)*w[i]*w[r]\n\n return (np.sqrt(np.pi)/(4*Q))**len(x)*res\n\n # applying nearest neighbors to extract Q values\n n_neighbors = min([self.n_neighbors + 1, len(X)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n Q = np.mean(dist[:, n_neighbors-1])/np.sqrt(len(X[0]))\n\n # calculating the sensitivity factors\n I_1 = np.array([eq_6(Q, w, u, v, x) for x in X_min])\n I_2 = np.array([eq_8(Q, w, u, v, x) for x in X_min])\n\n stsm = f**2 - 2*f*I_1 + I_2\n\n # calculating the sampling weights\n weights = np.abs(stsm)/np.sum(np.abs(stsm))\n\n n_neighbors = min([len(X_min), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n samples = []\n for _ in range(samp_per_iter):\n idx = self.random_state.choice(\n np.arange(len(X_min)), p=weights)\n X_new = X_min[idx].copy()\n for s in range(len(X_new)):\n lam = self.random_state.random_sample(\n )*(2*(1 - weights[idx])) - (1 - weights[idx])\n X_new[s] = X_new[s] + Q*lam\n samples.append(X_new)\n\n samples = np.vstack(samples)\n X = np.vstack([X, samples])\n y = np.hstack([y, np.repeat(self.min_label, len(samples))])\n\n return X.copy(), y.copy()\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'h': self.h,\n 'n_iter': self.n_iter,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass NDO_sampling(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{ndo_sampling,\n author={Zhang, L. and Wang, W.},\n booktitle={2011 International Conference of\n Information Technology, Computer\n Engineering and Management Sciences},\n title={A Re-sampling Method for Class Imbalance\n Learning with Credit Data},\n year={2011},\n volume={1},\n number={},\n pages={393-397},\n keywords={data handling;sampling methods;\n resampling method;class imbalance\n learning;credit rating;imbalance\n problem;synthetic minority\n over-sampling technique;sample\n distribution;synthetic samples;\n credit data set;Training;\n Measurement;Support vector machines;\n Logistics;Testing;Noise;Classification\n algorithms;class imbalance;credit\n rating;SMOTE;sample distribution},\n doi={10.1109/ICM.2011.34},\n ISSN={},\n month={Sept}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n T=0.5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n T (float): threshold parameter\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(T, \"T\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.T = T\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'T': [0.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # fitting nearest neighbors model to find the neighbors of minority\n # samples among all elements\n n_neighbors = min([len(X), self.n_neighbors+1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X)\n dist, ind = nn.kneighbors(X_min)\n\n # calculating the distances between samples in the same and different\n # classes\n d_intra = []\n d_exter = []\n for i in range(len(X_min)):\n min_mask = np.where(y[ind[i][1:]] == self.min_label)[0]\n maj_mask = np.where(y[ind[i][1:]] == self.maj_label)[0]\n if len(min_mask) > 0:\n d_intra.append(np.mean(dist[i][1:][min_mask]))\n if len(maj_mask) > 0:\n d_exter.append(np.mean(dist[i][1:][maj_mask]))\n d_intra_mean = np.mean(np.array(d_intra))\n d_exter_mean = np.mean(np.array(d_exter))\n\n # calculating the alpha value\n alpha = d_intra_mean/d_exter_mean\n\n # deciding if SMOTE is enough\n if alpha < self.T:\n smote = SMOTE(self.proportion, random_state=self.random_state)\n return smote.sample(X, y)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n random_idx = self.random_state.choice(ind[idx][1:])\n # create sample close to the initial minority point\n samples.append(X_min[idx] + (X[random_idx] - X_min[idx])\n * self.random_state.random_sample()/2.0)\n if y[random_idx] == self.min_label:\n # create another sample close to the neighboring minority point\n samples.append(X[random_idx] + (X_min[idx] - X[random_idx])\n * self.random_state.random_sample()/2.0)\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'T': self.T,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass RBFNeuron(RandomStateMixin):\n \"\"\"\n This class abstracts a neuron of an RBF network\n \"\"\"\n\n def __init__(self,\n c,\n Ib,\n Ob,\n ranges,\n range_mins,\n init_conn_mask,\n init_conn_weights,\n random_state=None):\n \"\"\"\n Constructor of the neuron\n\n Args:\n c (np.array): center of the hidden unit\n Ib (float): upper bound on the absolute values of input weights\n Ob (float): upper bound on the absolute values of output weights\n ranges (np.array): ranges widths of parameters\n range_min (np.array): lower bounds of parameter ranges\n init_conn_mask (np.array): initial input connections\n init_conn_weights (np.array): initial weights of input connections\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n self.d = len(c)\n self.c = c\n self.Ib = Ib\n self.Ob = Ob\n self.init_conn_mask = init_conn_mask\n self.init_conn_weights = init_conn_weights\n self.ranges = ranges\n self.range_mins = range_mins\n\n self.set_random_state(random_state)\n\n self.beta = (self.random_state.random_sample()-0.5)*Ob\n self.mask = init_conn_mask\n self.input_weights = init_conn_weights\n self.r = self.random_state.random_sample()\n\n def clone(self):\n \"\"\"\n Clones the neuron\n\n Returns:\n RBFNeuron: an identical neuron\n \"\"\"\n r = RBFNeuron(self.c,\n self.Ib,\n self.Ob,\n self.ranges,\n self.range_mins,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n r.beta = self.beta\n r.mask = self.mask.copy()\n r.input_weights = self.input_weights.copy()\n r.r = self.r\n\n return r\n\n def evaluate(self, X):\n \"\"\"\n Evaluates the system on dataset X\n\n Args:\n X (np.matrix): dataset to evaluate on\n\n Returns:\n np.array: the output of the network\n \"\"\"\n wX = X[:, self.mask]*self.input_weights\n term_exp = -np.linalg.norm(wX - self.c[self.mask], axis=1)**2/self.r**2\n return self.beta*np.exp(term_exp)\n\n def mutate(self):\n \"\"\"\n Mutates the neuron\n \"\"\"\n r = self.random_state.random_sample()\n if r < 0.2:\n # centre creep\n self.c = self.random_state.normal(self.c, self.r)\n elif r < 0.4:\n # radius creep\n tmp = self.random_state.normal(self.r, np.var(self.ranges))\n if tmp > 0:\n self.r = tmp\n elif r < 0.6:\n # randomize centers\n self.c = self.random_state.random_sample(\n size=len(self.c))*self.ranges + self.range_mins\n elif r < 0.8:\n # randomize radii\n self.r = self.random_state.random_sample()*np.mean(self.ranges)\n else:\n # randomize output weight\n self.beta = self.random_state.normal(self.beta, self.Ob)\n\n def add_connection(self):\n \"\"\"\n Adds a random input connection to the neuron\n \"\"\"\n if len(self.mask) < self.d:\n d_set = set(range(self.d))\n mask_set = set(self.mask.tolist())\n domain = list(d_set.difference(mask_set))\n additional_elements = np.array(self.random_state.choice(domain))\n self.mask = np.hstack([self.mask, additional_elements])\n random_weight = (self.random_state.random_sample()-0.5)*self.Ib\n self.input_weights = np.hstack([self.input_weights, random_weight])\n\n def delete_connection(self):\n \"\"\"\n Deletes a random input connection\n \"\"\"\n if len(self.mask) > 1:\n idx = self.random_state.randint(len(self.mask))\n self.mask = np.delete(self.mask, idx)\n self.input_weights = np.delete(self.input_weights, idx)\n\n\nclass RBF(RandomStateMixin):\n \"\"\"\n RBF network abstraction\n \"\"\"\n\n def __init__(self,\n X,\n m_min,\n m_max,\n Ib,\n Ob,\n init_conn_mask,\n init_conn_weights,\n random_state=None):\n \"\"\"\n Initializes the RBF network\n\n Args:\n X (np.matrix): dataset to work with\n m_min (int): minimum number of hidden neurons\n m_max (int): maximum number of hidden neurons\n Ib (float): maximum absolute value of input weights\n Ob (float): maximum absolute value of output weights\n init_conn_mask (np.array): initial input connections\n init_conn_weights (np.array): initial input weights\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n self.X = X\n self.m_min = m_min\n self.m_max = m_max\n self.Ib = Ib\n self.Ob = Ob\n self.init_conn_mask = init_conn_mask\n self.init_conn_weights = init_conn_weights\n\n self.set_random_state(random_state)\n\n self.neurons = []\n self.range_mins = np.min(X, axis=0)\n self.ranges = np.max(X, axis=0) - self.range_mins\n\n # adding initial neurons\n num_neurons = self.random_state.randint(m_min, m_max)\n for _ in range(num_neurons):\n self.neurons.append(self.create_new_node())\n\n self.beta_0 = (self.random_state.random_sample()-0.5)*Ob\n\n def clone(self):\n \"\"\"\n Clones the entire network\n\n Returns:\n RBF: the cloned network\n \"\"\"\n r = RBF(self.X,\n self.m_min,\n self.m_max,\n self.Ib,\n self.Ob,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n r.neurons = [n.clone() for n in self.neurons]\n r.range_mins = self.range_mins.copy()\n r.ranges = self.ranges.copy()\n r.beta_0 = self.beta_0\n\n return r\n\n def create_new_node(self):\n \"\"\"\n Creates a new node.\n\n Returns:\n RBFNeuron: a new hidden neuron\n \"\"\"\n return RBFNeuron(self.X[self.random_state.randint(len(self.X))],\n self.Ib,\n self.Ob,\n self.ranges,\n self.range_mins,\n self.init_conn_mask,\n self.init_conn_weights,\n random_state=self.random_state)\n\n def update_data(self, X):\n \"\"\"\n Updates the data to work with\n \"\"\"\n self.X = X\n for n in self.neurons:\n n.X = X\n\n def improve_centers(self):\n \"\"\"\n Improves the center locations by kmeans clustering\n \"\"\"\n if len(np.unique(self.X, axis=0)) > len(self.neurons):\n cluster_init = np.vstack([n.c for n in self.neurons])\n kmeans = KMeans(n_clusters=len(self.neurons),\n init=cluster_init,\n n_init=1,\n max_iter=30,\n random_state=self.random_state)\n kmeans.fit(self.X)\n for i in range(len(self.neurons)):\n self.neurons[i].c = kmeans.cluster_centers_[i]\n\n def evaluate(self, X, y):\n \"\"\"\n Evaluates the target function\n\n Returns:\n float: the target function value\n \"\"\"\n evaluation = np.column_stack([n.evaluate(X) for n in self.neurons])\n f = self.beta_0 + np.sum(evaluation, axis=1)\n L_star = np.mean(abs(y[y == 1] - f[y == 1]))\n L_star += np.mean(abs(y[y == 0] - f[y == 0]))\n return L_star\n\n def mutation(self):\n \"\"\"\n Mutates the neurons\n\n Returns:\n RBF: a new, mutated RBF network\n \"\"\"\n rbf = self.clone()\n for n in rbf.neurons:\n n.mutate()\n return rbf\n\n def structural_mutation(self):\n \"\"\"\n Applies structural mutation\n\n Returns:\n RBF: a new, structurally mutated network\n \"\"\"\n # in the binary case the removal of output connections is the same as\n # removing hidden nodes\n rbf = self.clone()\n r = self.random_state.random_sample()\n if r < 0.5:\n if len(rbf.neurons) < rbf.m_max:\n rbf.neurons.append(rbf.create_new_node())\n elif len(rbf.neurons) > rbf.m_min:\n del rbf.neurons[self.random_state.randint(len(rbf.neurons))]\n else:\n rbf.neurons[self.random_state.randint(\n len(rbf.neurons))].delete_connection()\n rbf.neurons[self.random_state.randint(\n len(rbf.neurons))].add_connection()\n\n return rbf\n\n def recombine(self, rbf):\n \"\"\"\n Recombines two networks\n\n Args:\n rbf (RBF): another network\n\n Returns:\n RBF: the result of recombination\n \"\"\"\n # the order of neurons doesn't matter, so the logic can be simplified\n new = self.clone()\n if self.random_state.random_sample() < 0.5:\n n_random = self.random_state.randint(1, len(new.neurons))\n new_neurons_0 = self.random_state.choice(new.neurons, n_random)\n n_random = self.random_state.randint(1, len(rbf.neurons))\n new_neurons_1 = self.random_state.choice(rbf.neurons, n_random)\n new.neurons = [n.clone() for n in new_neurons_0]\n new.neurons.extend([n.clone() for n in new_neurons_1])\n while len(new.neurons) > self.m_max:\n del new.neurons[self.random_state.randint(len(new.neurons))]\n else:\n for i in range(len(new.neurons)):\n if self.random_state.random_sample() < 0.2:\n n_random = self.random_state.randint(len(rbf.neurons))\n new.neurons[i] = rbf.neurons[n_random].clone()\n return new\n\n\nclass DSRBF(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{dsrbf,\n title = \"A dynamic over-sampling procedure based on\n sensitivity for multi-class problems\",\n journal = \"Pattern Recognition\",\n volume = \"44\",\n number = \"8\",\n pages = \"1821 - 1833\",\n year = \"2011\",\n issn = \"0031-3203\",\n doi = \"https://doi.org/10.1016/j.patcog.2011.02.019\",\n author = \"Francisco Fernández-Navarro and César\n Hervás-Martínez and Pedro Antonio\n Gutiérrez\",\n keywords = \"Classification, Multi-class, Sensitivity,\n Accuracy, Memetic algorithm, Imbalanced\n datasets, Over-sampling method, SMOTE\"\n }\n\n Notes:\n * It is not entirely clear why J-1 output is supposed where J is the\n number of classes.\n * The fitness function is changed to a balanced mean loss, as I found\n that it just ignores classification on minority samples\n (class label +1) in the binary case.\n * The iRprop+ optimization is not implemented.\n * The original paper proposes using SMOTE incrementally. Instead of\n that, this implementation applies SMOTE to generate all samples\n needed in the sampling epochs and the evolution of RBF networks\n is used to select the sampling providing the best results.\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_memetic]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n m_min=4,\n m_max=10,\n Ib=2,\n Ob=2,\n n_pop=500,\n n_init_pop=5000,\n n_iter=40,\n n_sampling_epoch=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors in the SMOTE sampling\n m_min (int): minimum number of hidden units\n m_max (int): maximum number of hidden units\n Ib (float): input weight range\n Ob (float): output weight range\n n_pop (int): size of population\n n_init_pop (int): size of initial population\n n_iter (int): number of iterations\n n_sampling_epoch (int): resampling after this many iterations\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(m_min, \"m_min\", 1)\n self.check_greater_or_equal(m_max, \"m_max\", 1)\n self.check_greater(Ib, \"Ib\", 0)\n self.check_greater(Ob, \"Ob\", 0)\n self.check_greater_or_equal(n_pop, \"n_pop\", 2)\n self.check_greater_or_equal(n_init_pop, \"n_pop\", 2)\n self.check_greater_or_equal(n_iter, \"n_iter\", 0)\n self.check_greater_or_equal(n_sampling_epoch, \"n_sampling_epoch\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.m_min = m_min\n self.m_max = m_max\n self.Ib = Ib\n self.Ob = Ob\n self.n_pop = n_pop\n self.n_init_pop = n_init_pop\n self.n_iter = n_iter\n self.n_sampling_epoch = n_sampling_epoch\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n # as the technique optimizes, it is unnecessary to check various\n # combinations except one specifying a decent workspace with a large\n # number of iterations\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'm_min': [4],\n 'm_max': [10],\n 'Ib': [2.0],\n 'Ob': [2.0],\n 'n_pop': [100],\n 'n_init_pop': [1000],\n 'n_iter': [40],\n 'n_sampling_epoch': [8]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n # Standardizing the data to let the network work with comparable\n # attributes\n ss = StandardScaler()\n X = ss.fit_transform(X)\n X_orig = X\n y_orig = y\n\n X, y = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state).sample(X, y)\n\n # generate initial connections and weights randomly\n domain = np.arange(len(X[0]))\n n_random = int(len(X[0])/2)\n init_conn_mask = self.random_state.choice(domain, n_random)\n init_conn_weights = self.random_state.random_sample(size=n_random)\n\n # setting epoch lengths\n epoch_len = int(self.n_iter/self.n_sampling_epoch)\n\n if len(X_orig) < self.m_min + 1:\n return X_orig.copy(), y_orig.copy()\n m_max = min(len(X_orig), self.m_max)\n\n # generating initial population\n def init_pop():\n return RBF(X,\n self.m_min,\n m_max,\n self.Ib,\n self.Ob,\n init_conn_mask,\n init_conn_weights,\n random_state=self.random_state)\n\n population = [init_pop() for _ in range(self.n_init_pop)]\n population = [[p, X, y, np.inf] for p in population]\n population = sorted([[p[0], p[1], p[2], p[0].evaluate(p[1], p[2])]\n for p in population], key=lambda x: x[3])\n population = population[:self.n_pop]\n\n # executing center improval in the hidden units\n for p in population:\n p[0].improve_centers()\n\n # executing the optimization process\n for iteration in range(self.n_iter):\n message = \"Iteration %d/%d, loss: %f, data size %d\"\n message = message % (iteration, self.n_iter, population[0][3],\n len(population[0][1]))\n _logger.info(self.__class__.__name__ + \": \" + message)\n # evaluating non-evaluated elements\n for p in population:\n if p[3] == np.inf:\n p[3] = p[0].evaluate(p[1], p[2])\n\n # sorting the population by the loss values\n population = sorted([p for p in population], key=lambda x: x[3])\n population = population[:self.n_pop]\n\n # determining the number of elements to be changed\n p_best = population[0]\n p_parametric_mut = population[:int(0.1*self.n_pop)]\n p_structural_mut = population[:int(0.9*self.n_pop-1)]\n p_recombination = population[:int(0.1*self.n_pop)]\n\n # executing mutation\n for p in p_parametric_mut:\n population.append([p[0].mutation(), p[1], p[2], np.inf])\n\n # executing structural mutation\n for p in p_structural_mut:\n population.append(\n [p[0].structural_mutation(), p[1], p[2], np.inf])\n\n # executing recombination\n for p in p_recombination:\n domain = range(len(p_recombination))\n p_rec_idx = self.random_state.choice(domain)\n p_rec = p_recombination[p_rec_idx][0]\n population.append([p[0].recombine(p_rec), p[1], p[2], np.inf])\n\n # do the sampling\n if iteration % epoch_len == 0:\n smote = SMOTE(proportion=self.proportion,\n n_neighbors=self.n_neighbors,\n n_jobs=self.n_jobs,\n random_state=self.random_state)\n X, y = smote.sample(X_orig, y_orig)\n for i in range(self.n_pop):\n tmp = [population[i][0].clone(), X, y, np.inf]\n tmp[0].update_data(X)\n tmp[0].improve_centers()\n population.append(tmp)\n\n # evaluate unevaluated elements of the population\n for p in population:\n if p[3] == np.inf:\n p[3] = p[0].evaluate(p[1], p[2])\n\n # sorting the population\n population = sorted([p for p in population],\n key=lambda x: x[3])[:self.n_pop]\n\n return ss.inverse_transform(p_best[1]), p_best[2]\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'm_min': self.m_min,\n 'm_max': self.m_max,\n 'Ib': self.Ib,\n 'Ob': self.Ob,\n 'n_pop': self.n_pop,\n 'n_init_pop': self.n_init_pop,\n 'n_iter': self.n_iter,\n 'n_sampling_epoch': self.n_sampling_epoch,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Gaussian_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{gaussian_smote,\n title={Gaussian-Based SMOTE Algorithm for Solving Skewed\n Class Distributions},\n author={Hansoo Lee and Jonggeun Kim and Sungshin Kim},\n journal={Int. J. Fuzzy Logic and Intelligent Systems},\n year={2017},\n volume={17},\n pages={229-234}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n sigma=1.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors\n sigma (float): variance\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater(sigma, \"sigma\", 0.0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.sigma = sigma\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'sigma': [0.5, 1.0, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # standardization applied to make sigma compatible with the data\n ss = StandardScaler()\n X_ss = ss.fit_transform(X)\n\n # fitting nearest neighbors model to find the minority neighbors of\n # minority samples\n X_min = X_ss[y == self.min_label]\n n_neighbors = min([len(X_min), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n idx = self.random_state.randint(len(X_min))\n random_neighbor = self.random_state.choice(ind[idx][1:])\n s0 = self.sample_between_points(X_min[idx], X_min[random_neighbor])\n samples.append(self.random_state.normal(s0, self.sigma))\n\n return (np.vstack([X, ss.inverse_transform(np.vstack(samples))]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'sigma': self.sigma,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass kmeans_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{kmeans_smote,\n title = \"Improving imbalanced learning through a\n heuristic oversampling method based\n on k-means and SMOTE\",\n journal = \"Information Sciences\",\n volume = \"465\",\n pages = \"1 - 20\",\n year = \"2018\",\n issn = \"0020-0255\",\n doi = \"https://doi.org/10.1016/j.ins.2018.06.056\",\n author = \"Georgios Douzas and Fernando Bacao and\n Felix Last\",\n keywords = \"Class-imbalanced learning, Oversampling,\n Classification, Clustering, Supervised\n learning, Within-class imbalance\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_clusters=10,\n irt=2.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n n_neighbors (int): number of neighbors\n n_clusters (int): number of clusters\n irt (float): imbalanced ratio threshold\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_clusters\", 1)\n self.check_greater_or_equal(irt, \"irt\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.irt = irt\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [2, 5, 10, 20, 50],\n 'irt': [0.5, 0.8, 1.0, 1.5]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # applying kmeans clustering to all data\n n_clusters = min([self.n_clusters, len(X)])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X)\n\n # extracting clusters\n labels = kmeans.labels_\n clusters = [np.where(labels == li)[0] for li in range(n_clusters)]\n\n # cluster filtering\n def cluster_filter(c):\n numerator = np.sum(y[c] == self.maj_label) + 1\n denominator = np.sum(y[c] == self.min_label) + 1\n n_minority = np.sum(y[c] == self.min_label)\n return numerator/denominator < self.irt and n_minority > 1\n\n filt_clusters = [c for c in clusters if cluster_filter(c)]\n\n if len(filt_clusters) == 0:\n _logger.warning(self.__class__.__name__ + \": \" +\n \"number of clusters after filtering is 0\")\n return X.copy(), y.copy()\n\n # Step 2 in the paper\n sparsity = []\n nearest_neighbors = []\n cluster_minority_ind = []\n for c in filt_clusters:\n # extract minority indices in the cluster\n minority_ind = c[y[c] == self.min_label]\n cluster_minority_ind.append(minority_ind)\n # compute distance matrix of minority samples in the cluster\n dm = pairwise_distances(X[minority_ind])\n min_count = len(minority_ind)\n # compute the average of distances\n avg_min_dist = (np.sum(dm) - dm.trace()) / \\\n (len(minority_ind)**2 - len(minority_ind))\n # compute sparsity (Step 4)\n sparsity.append(avg_min_dist**len(X[0])/min_count)\n # extract the nearest neighbors graph\n n_neighbors = min([len(minority_ind), self.n_neighbors + 1])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X[minority_ind])\n nearest_neighbors.append(nn.kneighbors(X[minority_ind]))\n\n # Step 5 - compute density of sampling\n weights = sparsity/np.sum(sparsity)\n\n # do the sampling\n samples = []\n while len(samples) < n_to_sample:\n # choose random cluster index and random minority element\n clust_ind = self.random_state.choice(\n np.arange(len(weights)), p=weights)\n idx = self.random_state.randint(\n len(cluster_minority_ind[clust_ind]))\n base_idx = cluster_minority_ind[clust_ind][idx]\n # choose random neighbor\n neighbor_cluster_indices = nearest_neighbors[clust_ind][1][idx][1:]\n domain = cluster_minority_ind[clust_ind][neighbor_cluster_indices]\n neighbor_idx = self.random_state.choice(domain)\n # sample\n X_a = X[base_idx]\n X_b = X[neighbor_idx]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'irt': self.irt,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass Supervised_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{supervised_smote,\n author = {Hu, Jun AND He, Xue AND Yu, Dong-Jun AND\n Yang, Xi-Bei AND Yang, Jing-Yu AND Shen,\n Hong-Bin},\n journal = {PLOS ONE},\n publisher = {Public Library of Science},\n title = {A New Supervised Over-Sampling Algorithm\n with Application to Protein-Nucleotide\n Binding Residue Prediction},\n year = {2014},\n month = {09},\n volume = {9},\n url = {https://doi.org/10.1371/journal.pone.0107676},\n pages = {1-10},\n number = {9},\n doi = {10.1371/journal.pone.0107676}\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_uses_classifier,\n OverSampling.cat_application]\n\n def __init__(self,\n proportion=1.0,\n th_lower=0.5,\n th_upper=1.0,\n classifier=RandomForestClassifier(n_estimators=50,\n n_jobs=1,\n random_state=5),\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n th_lower (float): lower bound of the confidence interval\n th_upper (float): upper bound of the confidence interval\n classifier (obj): classifier used to estimate class memberships\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_in_range(th_lower, \"th_lower\", [0, 1])\n self.check_in_range(th_upper, \"th_upper\", [0, 1])\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.th_lower = th_lower\n self.th_upper = th_upper\n self.classifier = classifier\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n classifiers = [RandomForestClassifier(n_estimators=50,\n n_jobs=1,\n random_state=5)]\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'th_lower': [0.3, 0.5, 0.8],\n 'th_upper': [1.0],\n 'classifier': classifiers}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n # training the classifier\n self.classifier.fit(X, y)\n\n X_min = X[y == self.min_label]\n\n th_lower = self.th_lower\n\n # do the sampling\n samples = []\n n_trials = 1\n n_success = 1\n while len(samples) < n_to_sample:\n n_trials = n_trials + 1\n\n domain = range(len(X_min))\n x0, x1 = self.random_state.choice(domain, 2, replace=False)\n x0, x1 = X_min[x0], X_min[x1]\n sample = self.sample_between_points(x0, x1)\n probs = self.classifier.predict_proba(sample.reshape(1, -1))\n # extract probability\n class_column = np.where(self.classifier.classes_ == self.min_label)\n class_column = class_column[0][0]\n prob = probs[0][class_column]\n if prob >= th_lower and prob <= self.th_upper:\n samples.append(sample)\n n_success = n_success + 1\n\n # decreasing lower threshold if needed\n if n_success/n_trials < 0.02:\n th_lower = th_lower * 0.9\n n_success = 1\n n_trials = 1\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'th_lower': self.th_lower,\n 'th_upper': self.th_upper,\n 'classifier': self.classifier,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass SN_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @Article{sn_smote,\n author=\"Garc{\\'i}a, V.\n and S{\\'a}nchez, J. S.\n and Mart{\\'i}n-F{\\'e}lez, R.\n and Mollineda, R. A.\",\n title=\"Surrounding neighborhood-based SMOTE for\n learning from imbalanced data sets\",\n journal=\"Progress in Artificial Intelligence\",\n year=\"2012\",\n month=\"Dec\",\n day=\"01\",\n volume=\"1\",\n number=\"4\",\n pages=\"347--362\",\n issn=\"2192-6360\",\n doi=\"10.1007/s13748-012-0027-5\",\n url=\"https://doi.org/10.1007/s13748-012-0027-5\"\n }\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=5,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_neighbors (float): number of neighbors\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # the search for the k nearest centroid neighbors is limited for the\n # nearest 10*n_neighbors neighbors\n n_neighbors = min([self.n_neighbors*10, len(X_min)])\n nn = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # determining k nearest centroid neighbors\n ncn = np.zeros(shape=(len(X_min), self.n_neighbors)).astype(int)\n ncn_nums = np.zeros(len(X_min)).astype(int)\n\n # extracting nearest centroid neighbors\n for i in range(len(X_min)):\n # the first NCN neighbor is the first neighbor\n ncn[i, 0] = ind[i][1]\n\n # iterating through all neighbors and finding the one with smaller\n # centroid distance to X_min[i] than the previous set of neighbors\n n_cent = 1\n centroid = X_min[ncn[i, 0]]\n cent_dist = np.linalg.norm(centroid - X_min[i])\n j = 2\n while j < len(ind[i]) and n_cent < self.n_neighbors:\n new_cent_dist = np.linalg.norm(\n (centroid + X_min[ind[i][j]])/(n_cent + 1) - X_min[i])\n\n # checking if new nearest centroid neighbor found\n if new_cent_dist < cent_dist:\n centroid = centroid + X_min[ind[i][j]]\n ncn[i, n_cent] = ind[i][j]\n n_cent = n_cent + 1\n cent_dist = new_cent_dist\n j = j + 1\n\n # registering the number of nearest centroid neighbors found\n ncn_nums[i] = n_cent\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n random_idx = self.random_state.randint(len(X_min))\n random_neighbor_idx = self.random_state.choice(\n ncn[random_idx][:ncn_nums[random_idx]])\n samples.append(self.sample_between_points(\n X_min[random_idx], X_min[random_neighbor_idx]))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass CCR(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ccr,\n author = {Koziarski, Michał and Wozniak, Michal},\n year = {2017},\n month = {12},\n pages = {727–736},\n title = {CCR: A combined cleaning and resampling algorithm\n for imbalanced data classification},\n volume = {27},\n journal = {International Journal of Applied Mathematics\n and Computer Science}\n }\n\n Notes:\n * Adapted from https://github.com/michalkoziarski/CCR\n \"\"\"\n\n categories = [OverSampling.cat_extensive]\n\n def __init__(self,\n proportion=1.0,\n energy=1.0,\n scaling=0.0,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal\n to the number of majority samples\n energy (float): energy parameter\n scaling (float): scaling factor\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(energy, \"energy\", 0)\n self.check_greater_or_equal(scaling, \"scaling\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.energy = energy\n self.scaling = scaling\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'energy': [0.001, 0.0025, 0.005,\n 0.01, 0.025, 0.05, 0.1,\n 0.25, 0.5, 1.0, 2.5, 5.0,\n 10.0, 25.0, 50.0, 100.0],\n 'scaling': [0.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n def taxicab_sample(n, r):\n sample = []\n random_numbers = self.random_state.rand(n)\n\n for i in range(n):\n # spread = r - np.sum(np.abs(sample))\n spread = r\n if len(sample) > 0:\n spread -= abs(sample[-1])\n sample.append(spread * (2 * random_numbers[i] - 1))\n\n return self.random_state.permutation(sample)\n\n minority = X[y == self.min_label]\n majority = X[y == self.maj_label]\n\n energy = self.energy * (X.shape[1] ** self.scaling)\n\n distances = pairwise_distances(minority, majority, metric='l1')\n\n radii = np.zeros(len(minority))\n translations = np.zeros(majority.shape)\n\n for i in range(len(minority)):\n minority_point = minority[i]\n remaining_energy = energy\n r = 0.0\n sorted_distances = np.argsort(distances[i])\n current_majority = 0\n\n while True:\n if current_majority > len(majority):\n break\n\n if current_majority == len(majority):\n if current_majority == 0:\n radius_change = remaining_energy / \\\n (current_majority + 1.0)\n else:\n radius_change = remaining_energy / current_majority\n\n r += radius_change\n break\n\n radius_change = remaining_energy / (current_majority + 1.0)\n\n dist = distances[i, sorted_distances[current_majority]]\n if dist >= r + radius_change:\n r += radius_change\n break\n else:\n if current_majority == 0:\n last_distance = 0.0\n else:\n cm1 = current_majority - 1\n last_distance = distances[i, sorted_distances[cm1]]\n\n curr_maj_idx = sorted_distances[current_majority]\n radius_change = distances[i, curr_maj_idx] - last_distance\n r += radius_change\n decrease = radius_change * (current_majority + 1.0)\n remaining_energy -= decrease\n current_majority += 1\n\n radii[i] = r\n\n for j in range(current_majority):\n majority_point = majority[sorted_distances[j]].astype(float)\n d = distances[i, sorted_distances[j]]\n\n if d < 1e-20:\n n_maj_point = len(majority_point)\n r_num = self.random_state.rand(n_maj_point)\n r_num = 1e-6 * r_num + 1e-6\n r_sign = self.random_state.choice([-1.0, 1.0], n_maj_point)\n majority_point += r_num * r_sign\n d = np.sum(np.abs(minority_point - majority_point))\n\n translation = (r - d) / d * (majority_point - minority_point)\n translations[sorted_distances[j]] += translation\n\n majority = majority.astype(float)\n majority += translations\n\n appended = []\n for i in range(len(minority)):\n minority_point = minority[i]\n synthetic_samples = n_to_sample / (radii[i] * np.sum(1.0 / radii))\n synthetic_samples = int(np.round(synthetic_samples))\n r = radii[i]\n\n for _ in range(synthetic_samples):\n appended.append(minority_point +\n taxicab_sample(len(minority_point), r))\n\n if len(appended) == 0:\n _logger.info(\"No samples were added\")\n return X.copy(), y.copy()\n\n return (np.vstack([X, np.vstack(appended)]),\n np.hstack([y, np.repeat(self.min_label, len(appended))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'energy': self.energy,\n 'scaling': self.scaling,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass ANS(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @article{ans,\n author = {Siriseriwan, W and Sinapiromsaran, Krung},\n year = {2017},\n month = {09},\n pages = {565-576},\n title = {Adaptive neighbor synthetic minority oversampling\n technique under 1NN outcast handling},\n volume = {39},\n booktitle = {Songklanakarin Journal of Science and\n Technology}\n }\n\n Notes:\n * The method is not prepared for the case when there is no c satisfying\n the condition in line 25 of the algorithm, fixed.\n * The method is not prepared for empty Pused sets, fixed.\n \"\"\"\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_sample_ordinary,\n OverSampling.cat_density_based]\n\n def __init__(self, proportion=1.0, n_jobs=1, random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after\n sampling the number of minority samples\n will be equal to the number of majority\n samples\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [\n 0.1, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n if not self.check_enough_min_samples_for_sampling():\n return X.copy(), y.copy()\n\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n X_min = X[y == self.min_label]\n\n # outcast extraction algorithm\n\n # maximum C value\n C_max = int(0.25*len(X))\n\n # finding the first minority neighbor of minority samples\n nn = NearestNeighbors(n_neighbors=2, n_jobs=self.n_jobs)\n nn.fit(X_min)\n dist, ind = nn.kneighbors(X_min)\n\n # extracting the distances of first minority neighbors from minority\n # samples\n first_pos_neighbor_distances = dist[:, 1]\n\n # fitting another nearest neighbors model to extract majority\n # samples in the neighborhoods of minority samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X)\n\n # extracting the number of majority samples in the neighborhood of\n # minority samples\n out_border = []\n for i in range(len(X_min)):\n x = X_min[i].reshape(1, -1)\n ind = nn.radius_neighbors(x,\n first_pos_neighbor_distances[i],\n return_distance=False)\n out_border.append(np.sum(y[ind[0]] == self.maj_label))\n\n out_border = np.array(out_border)\n\n # finding the optimal C value by comparing the number of outcast\n # minority samples when traversing the range [1, C_max]\n n_oc_m1 = -1\n C = 0\n best_diff = np.inf\n for c in range(1, C_max):\n n_oc = np.sum(out_border >= c)\n if abs(n_oc - n_oc_m1) < best_diff:\n best_diff = abs(n_oc - n_oc_m1)\n C = n_oc\n n_oc_m1 = n_oc\n\n # determining the set of minority samples Pused\n Pused = np.where(out_border < C)[0]\n\n # Adaptive neighbor SMOTE algorithm\n\n # checking if there are minority samples left\n if len(Pused) == 0:\n _logger.info(self.__class__.__name__ + \": \" + \"Pused is empty\")\n return X.copy(), y.copy()\n\n # finding the maximum distances of first positive neighbors\n eps = np.max(first_pos_neighbor_distances[Pused])\n\n # fitting nearest neighbors model to find nearest minority samples in\n # the neighborhoods of minority samples\n nn = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs)\n nn.fit(X_min[Pused])\n ind = nn.radius_neighbors(X_min[Pused], eps, return_distance=False)\n\n # extracting the number of positive samples in the neighborhoods\n Np = np.array([len(i) for i in ind])\n\n if np.all(Np == 1):\n message = \"all samples have only 1 neighbor in the given radius\"\n _logger.warning(self.__class__.__name__ + \": \" + message)\n return X.copy(), y.copy()\n\n # determining the distribution used to generate samples\n distribution = Np/np.sum(Np)\n\n # generating samples\n samples = []\n while len(samples) < n_to_sample:\n random_idx = self.random_state.choice(\n np.arange(len(Pused)), p=distribution)\n if len(ind[random_idx]) > 1:\n random_neig_idx = self.random_state.choice(ind[random_idx])\n while random_neig_idx == random_idx:\n random_neig_idx = self.random_state.choice(ind[random_idx])\n X_a = X_min[Pused[random_idx]]\n X_b = X_min[Pused[random_neig_idx]]\n samples.append(self.sample_between_points(X_a, X_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass cluster_SMOTE(OverSampling):\n \"\"\"\n References:\n * BibTex::\n\n @INPROCEEDINGS{cluster_SMOTE,\n author={Cieslak, D. A. and Chawla, N. V. and\n Striegel, A.},\n booktitle={2006 IEEE International Conference\n on Granular Computing},\n title={Combating imbalance in network\n intrusion datasets},\n year={2006},\n volume={},\n number={},\n pages={732-737},\n keywords={Intelligent networks;Intrusion detection;\n Telecommunication traffic;Data mining;\n Computer networks;Data security;\n Machine learning;Counting circuits;\n Computer security;Humans},\n doi={10.1109/GRC.2006.1635905},\n ISSN={},\n month={May}}\n \"\"\"\n\n categories = [OverSampling.cat_extensive,\n OverSampling.cat_uses_clustering]\n\n def __init__(self,\n proportion=1.0,\n n_neighbors=3,\n n_clusters=3,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n proportion (float): proportion of the difference of n_maj and n_min\n to sample e.g. 1.0 means that after sampling\n the number of minority samples will be equal to\n the number of majority samples\n n_neighbors (int): number of neighbors in SMOTE\n n_clusters (int): number of clusters\n n_jobs (int): number of parallel jobs\n random_state (int/RandomState/None): initializer of random_state,\n like in sklearn\n \"\"\"\n super().__init__()\n self.check_greater_or_equal(proportion, \"proportion\", 0)\n self.check_greater_or_equal(n_neighbors, \"n_neighbors\", 1)\n self.check_greater_or_equal(n_clusters, \"n_components\", 1)\n self.check_n_jobs(n_jobs, 'n_jobs')\n\n self.proportion = proportion\n self.n_neighbors = n_neighbors\n self.n_clusters = n_clusters\n self.n_jobs = n_jobs\n\n self.set_random_state(random_state)\n\n @ classmethod\n def parameter_combinations(cls, raw=False):\n \"\"\"\n Generates reasonable paramter combinations.\n\n Returns:\n list(dict): a list of meaningful paramter combinations\n \"\"\"\n parameter_combinations = {'proportion': [0.1, 0.25, 0.5, 0.75,\n 1.0, 1.5, 2.0],\n 'n_neighbors': [3, 5, 7],\n 'n_clusters': [3, 5, 7, 9]}\n return cls.generate_parameter_combinations(parameter_combinations, raw)\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the class paramters.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n _logger.info(self.__class__.__name__ + \": \" +\n \"Running sampling via %s\" % self.descriptor())\n\n self.class_label_statistics(X, y)\n\n X_min = X[y == self.min_label]\n\n # determining the number of samples to generate\n n_to_sample = self.det_n_to_sample(self.proportion,\n self.class_stats[self.maj_label],\n self.class_stats[self.min_label])\n\n if n_to_sample == 0:\n _logger.warning(self.__class__.__name__ +\n \": \" + \"Sampling is not needed\")\n return X.copy(), y.copy()\n\n n_clusters = min([len(X_min), self.n_clusters])\n kmeans = KMeans(n_clusters=n_clusters,\n random_state=self.random_state)\n kmeans.fit(X_min)\n cluster_labels = kmeans.labels_\n unique_labels = np.unique(cluster_labels)\n\n # creating nearest neighbors objects for each cluster\n cluster_indices = [np.where(cluster_labels == c)[0]\n for c in unique_labels]\n\n def nneighbors(idx):\n n_neighbors = min([self.n_neighbors, len(cluster_indices[idx])])\n nn = NearestNeighbors(n_neighbors=n_neighbors)\n return nn.fit(X_min[cluster_indices[idx]])\n\n cluster_nns = [nneighbors(idx) for idx in range(len(cluster_indices))]\n\n if max([len(c) for c in cluster_indices]) <= 1:\n _logger.info(self.__class__.__name__ + \": \" +\n \"All clusters contain 1 element\")\n return X.copy(), y.copy()\n\n # generating the samples\n samples = []\n while len(samples) < n_to_sample:\n cluster_idx = self.random_state.randint(len(cluster_indices))\n if len(cluster_indices[cluster_idx]) <= 1:\n continue\n random_idx = self.random_state.randint(\n len(cluster_indices[cluster_idx]))\n sample_a = X_min[cluster_indices[cluster_idx]][random_idx]\n dist, indices = cluster_nns[cluster_idx].kneighbors(\n sample_a.reshape(1, -1))\n sample_b_idx = self.random_state.choice(\n cluster_indices[cluster_idx][indices[0][1:]])\n sample_b = X_min[sample_b_idx]\n samples.append(self.sample_between_points(sample_a, sample_b))\n\n return (np.vstack([X, np.vstack(samples)]),\n np.hstack([y, np.repeat(self.min_label, len(samples))]))\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the current sampling object\n \"\"\"\n return {'proportion': self.proportion,\n 'n_neighbors': self.n_neighbors,\n 'n_clusters': self.n_clusters,\n 'n_jobs': self.n_jobs,\n 'random_state': self._random_state_init}\n\n\nclass MulticlassOversampling(StatisticsMixin):\n \"\"\"\n Carries out multiclass oversampling\n\n Example::\n\n import smote_variants as sv\n import sklearn.datasets as datasets\n\n dataset= datasets.load_wine()\n\n oversampler= sv.MulticlassOversampling(sv.distance_SMOTE())\n\n X_samp, y_samp= oversampler.sample(dataset['data'], dataset['target'])\n \"\"\"\n\n def __init__(self,\n oversampler=SMOTE(random_state=2),\n strategy=\"eq_1_vs_many_successive\"):\n \"\"\"\n Constructor of the multiclass oversampling object\n\n Args:\n oversampler (obj): an oversampling object\n strategy (str/obj): a multiclass oversampling strategy, currently\n 'eq_1_vs_many_successive' or\n 'equalize_1_vs_many'\n \"\"\"\n self.oversampler = oversampler\n self.strategy = strategy\n\n def sample_equalize_1_vs_many(self, X, y):\n \"\"\"\n Does the sample generation by oversampling each minority class to the\n cardinality of the majority class using all original samples in each\n run.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n message = \"Running multiclass oversampling with strategy %s\"\n message = message % str(self.strategy)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if 'proportion' not in self.oversampler.get_params():\n message = (\"Multiclass oversampling strategy %s cannot be \"\n \"used with oversampling techniques without proportion\"\n \" parameter\")\n message = message % str(self.strategy)\n raise ValueError(message)\n\n # extract class label statistics\n self.class_label_statistics(X, y)\n\n # sort labels by number of samples\n class_labels = self.class_stats.keys()\n class_labels = sorted(class_labels, key=lambda x: -self.class_stats[x])\n\n majority_class_label = class_labels[0]\n\n # determining the majority class data\n X_maj = X[y == majority_class_label]\n\n # dict to store the results\n results = {}\n results[majority_class_label] = X_maj.copy()\n\n # running oversampling for all minority classes against all oversampled\n # classes\n for i in range(1, len(class_labels)):\n message = \"Sampling minority class with label: %d\"\n message = message % class_labels[i]\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # extract current minority class\n minority_class_label = class_labels[i]\n X_min = X[y == minority_class_label]\n X_maj = X[y != minority_class_label]\n\n # prepare data to pass to oversampling\n X_training = np.vstack([X_maj, X_min])\n y_training = np.hstack(\n [np.repeat(0, len(X_maj)), np.repeat(1, len(X_min))])\n\n # prepare parameters by properly setting the proportion value\n params = self.oversampler.get_params()\n\n num_to_generate = self.class_stats[majority_class_label] - \\\n self.class_stats[class_labels[i]]\n num_to_gen_to_all = len(X_maj) - self.class_stats[class_labels[i]]\n\n params['proportion'] = num_to_generate/num_to_gen_to_all\n\n # instantiating new oversampling object with the proper proportion\n # parameter\n oversampler = self.oversampler.__class__(**params)\n\n # executing the sampling\n X_samp, y_samp = oversampler.sample(X_training, y_training)\n\n # registaring the newly oversampled minority class in the output\n # set\n results[class_labels[i]] = X_samp[len(\n X_training):][y_samp[len(X_training):] == 1]\n\n # constructing the output set\n X_final = results[class_labels[1]]\n y_final = np.repeat(class_labels[1], len(results[class_labels[1]]))\n\n for i in range(2, len(class_labels)):\n X_final = np.vstack([X_final, results[class_labels[i]]])\n y_new = np.repeat(class_labels[i], len(results[class_labels[i]]))\n y_final = np.hstack([y_final, y_new])\n\n return np.vstack([X, X_final]), np.hstack([y, y_final])\n\n def sample_equalize_1_vs_many_successive(self, X, y):\n \"\"\"\n Does the sample generation by oversampling each minority class\n successively to the cardinality of the majority class,\n incorporating the results of previous oversamplings.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n message = \"Running multiclass oversampling with strategy %s\"\n message = message % str(self.strategy)\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n if 'proportion' not in self.oversampler.get_params():\n message = (\"Multiclass oversampling strategy %s cannot be used\"\n \" with oversampling techniques without proportion\"\n \" parameter\") % str(self.strategy)\n raise ValueError(message)\n\n # extract class label statistics\n self.class_label_statistics(X, y)\n\n # sort labels by number of samples\n class_labels = self.class_stats.keys()\n class_labels = sorted(class_labels, key=lambda x: -self.class_stats[x])\n\n majority_class_label = class_labels[0]\n\n # determining the majority class data\n X_maj = X[y == majority_class_label]\n\n # dict to store the results\n results = {}\n results[majority_class_label] = X_maj.copy()\n\n # running oversampling for all minority classes against all\n # oversampled classes\n for i in range(1, len(class_labels)):\n message = \"Sampling minority class with label: %d\"\n message = message % class_labels[i]\n _logger.info(self.__class__.__name__ + \": \" + message)\n\n # extract current minority class\n minority_class_label = class_labels[i]\n X_min = X[y == minority_class_label]\n\n # prepare data to pass to oversampling\n X_training = np.vstack([X_maj, X_min])\n y_training = np.hstack(\n [np.repeat(0, len(X_maj)), np.repeat(1, len(X_min))])\n\n # prepare parameters by properly setting the proportion value\n params = self.oversampler.get_params()\n\n n_majority = self.class_stats[majority_class_label]\n n_class_i = self.class_stats[class_labels[i]]\n num_to_generate = n_majority - n_class_i\n\n num_to_gen_to_all = i * n_majority - n_class_i\n\n params['proportion'] = num_to_generate/num_to_gen_to_all\n\n # instantiating new oversampling object with the proper proportion\n # parameter\n oversampler = self.oversampler.__class__(**params)\n\n # executing the sampling\n X_samp, y_samp = oversampler.sample(X_training, y_training)\n\n # adding the newly oversampled minority class to the majority data\n X_maj = np.vstack([X_maj, X_samp[y_samp == 1]])\n\n # registaring the newly oversampled minority class in the output\n # set\n result_mask = y_samp[len(X_training):] == 1\n results[class_labels[i]] = X_samp[len(X_training):][result_mask]\n\n # constructing the output set\n X_final = results[class_labels[1]]\n y_final = np.repeat(class_labels[1], len(results[class_labels[1]]))\n\n for i in range(2, len(class_labels)):\n X_final = np.vstack([X_final, results[class_labels[i]]])\n y_new = np.repeat(class_labels[i], len(results[class_labels[i]]))\n y_final = np.hstack([y_final, y_new])\n\n return np.vstack([X, X_final]), np.hstack([y, y_final])\n\n def sample(self, X, y):\n \"\"\"\n Does the sample generation according to the oversampling strategy.\n\n Args:\n X (np.ndarray): training set\n y (np.array): target labels\n\n Returns:\n (np.ndarray, np.array): the extended training set and target labels\n \"\"\"\n\n if self.strategy == \"eq_1_vs_many_successive\":\n return self.sample_equalize_1_vs_many_successive(X, y)\n elif self.strategy == \"equalize_1_vs_many\":\n return self.sample_equalize_1_vs_many(X, y)\n else:\n message = \"Multiclass oversampling startegy %s not implemented.\"\n message = message % self.strategy\n raise ValueError(message)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns:\n dict: the parameters of the multiclass oversampling object\n \"\"\"\n return {'oversampler': self.oversampler, 'strategy': self.strategy}\n\n\nclass OversamplingClassifier(BaseEstimator, ClassifierMixin):\n \"\"\"\n This class wraps an oversampler and a classifier, making it compatible\n with sklearn based pipelines.\n \"\"\"\n\n def __init__(self, oversampler, classifier):\n \"\"\"\n Constructor of the wrapper.\n\n Args:\n oversampler (obj): an oversampler object\n classifier (obj): an sklearn-compatible classifier\n \"\"\"\n\n self.oversampler = oversampler\n self.classifier = classifier\n\n def fit(self, X, y=None):\n \"\"\"\n Carries out oversampling and fits the classifier.\n\n Args:\n X (np.ndarray): feature vectors\n y (np.array): target values\n\n Returns:\n obj: the object itself\n \"\"\"\n\n X_samp, y_samp = self.oversampler.sample(X, y)\n self.classifier.fit(X_samp, y_samp)\n\n return self\n\n def predict(self, X):\n \"\"\"\n Carries out the predictions.\n\n Args:\n X (np.ndarray): feature vectors\n \"\"\"\n\n return self.classifier.predict(X)\n\n def predict_proba(self, X):\n \"\"\"\n Carries out the predictions with probability estimations.\n\n Args:\n X (np.ndarray): feature vectors\n \"\"\"\n\n return self.classifier.predict_proba(X)\n\n def get_params(self, deep=True):\n \"\"\"\n Returns the dictionary of parameters.\n\n Args:\n deep (bool): wether to return parameters with deep discovery\n\n Returns:\n dict: the dictionary of parameters\n \"\"\"\n\n return {'oversampler': self.oversampler, 'classifier': self.classifier}\n\n def set_params(self, **parameters):\n \"\"\"\n Sets the parameters.\n\n Args:\n parameters (dict): the parameters to set.\n\n Returns:\n obj: the object itself\n \"\"\"\n\n for parameter, value in parameters.items():\n setattr(self, parameter, value)\n\n return self\n\n\nclass MLPClassifierWrapper:\n \"\"\"\n Wrapper over MLPClassifier of sklearn to provide easier parameterization\n \"\"\"\n\n def __init__(self,\n activation='relu',\n hidden_layer_fraction=0.1,\n alpha=0.0001,\n random_state=None):\n \"\"\"\n Constructor of the MLPClassifier\n\n Args:\n activation (str): name of the activation function\n hidden_layer_fraction (float): fraction of the hidden neurons of\n the number of input dimensions\n alpha (float): alpha parameter of the MLP classifier\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n \"\"\"\n self.activation = activation\n self.hidden_layer_fraction = hidden_layer_fraction\n self.alpha = alpha\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n Fit the model to the data\n\n Args:\n X (np.ndarray): features\n y (np.array): target labels\n\n Returns:\n obj: the MLPClassifierWrapper object\n \"\"\"\n hidden_layer_size = max([1, int(len(X[0])*self.hidden_layer_fraction)])\n self.model = MLPClassifier(activation=self.activation,\n hidden_layer_sizes=(hidden_layer_size,),\n alpha=self.alpha,\n random_state=self.random_state).fit(X, y)\n return self\n\n def predict(self, X):\n \"\"\"\n Predicts the labels of the unseen data\n\n Args:\n X (np.ndarray): unseen features\n\n Returns:\n np.array: predicted labels\n \"\"\"\n return self.model.predict(X)\n\n def predict_proba(self, X):\n \"\"\"\n Predicts the class probabilities of the unseen data\n\n Args:\n X (np.ndarray): unseen features\n\n Returns:\n np.matrix: predicted class probabilities\n \"\"\"\n return self.model.predict_proba(X)\n\n def get_params(self, deep=False):\n \"\"\"\n Returns the parameters of the classifier.\n\n Returns:\n dict: the parameters of the object\n \"\"\"\n return {'activation': self.activation,\n 'hidden_layer_fraction': self.hidden_layer_fraction,\n 'alpha': self.alpha,\n 'random_state': self.random_state}\n\n def copy(self):\n \"\"\"\n Creates a copy of the classifier.\n\n Returns:\n obj: a copy of the classifier\n \"\"\"\n return MLPClassifierWrapper(**self.get_params())\n\n\nclass Folding():\n \"\"\"\n Cache-able folding of dataset for cross-validation\n \"\"\"\n\n def __init__(self, dataset, validator, cache_path=None, random_state=None):\n \"\"\"\n Constructor of Folding object\n\n Args:\n dataset (dict): dataset dictionary with keys 'data', 'target'\n and 'DESCR'\n validator (obj): cross-validator object\n cache_path (str): path to cache directory\n random_state (int/np.random.RandomState/None): initializer of\n the random state\n \"\"\"\n self.dataset = dataset\n self.db_name = self.dataset['name']\n self.validator = validator\n self.cache_path = cache_path\n self.filename = 'folding_' + self.db_name + '.pickle'\n self.db_size = len(dataset['data'])\n self.db_n_attr = len(dataset['data'][0])\n self.imbalanced_ratio = np.sum(\n self.dataset['target'] == 0)/np.sum(self.dataset['target'] == 1)\n self.random_state = random_state\n\n def do_folding(self):\n \"\"\"\n Does the folding or reads it from file if already available\n\n Returns:\n list(tuple): list of tuples of X_train, y_train, X_test, y_test\n objects\n \"\"\"\n\n self.validator.random_state = self.random_state\n\n if not hasattr(self, 'folding'):\n cond_cache_none = self.cache_path is None\n if not cond_cache_none:\n filename = os.path.join(self.cache_path, self.filename)\n cond_file_not_exists = not os.path.isfile(filename)\n else:\n cond_file_not_exists = False\n\n if cond_cache_none or cond_file_not_exists:\n _logger.info(self.__class__.__name__ +\n (\" doing folding %s\" % self.filename))\n\n self.folding = {}\n self.folding['folding'] = []\n self.folding['db_size'] = len(self.dataset['data'])\n self.folding['db_n_attr'] = len(self.dataset['data'][0])\n n_maj = np.sum(self.dataset['target'] == 0)\n n_min = np.sum(self.dataset['target'] == 1)\n self.folding['imbalanced_ratio'] = n_maj / n_min\n\n X = self.dataset['data']\n y = self.dataset['target']\n\n data = self.dataset['data']\n target = self.dataset['target']\n\n for train, test in self.validator.split(data, target, target):\n folding = (X[train], y[train], X[test], y[test])\n self.folding['folding'].append(folding)\n if self.cache_path is not None:\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n random_filename = np.random.randint(1000000)\n random_filename = str(random_filename) + '.pickle'\n random_filename = os.path.join(self.cache_path,\n random_filename)\n pickle.dump(self.folding, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n else:\n _logger.info(self.__class__.__name__ +\n (\" reading from file %s\" % self.filename))\n self.folding = pickle.load(\n open(os.path.join(self.cache_path, self.filename), \"rb\"))\n return self.folding\n\n def get_params(self, deep=False):\n return {'db_name': self.db_name}\n\n def descriptor(self):\n return str(self.get_params())\n\n\nclass Sampling():\n \"\"\"\n Cache-able sampling of dataset folds\n \"\"\"\n\n def __init__(self,\n folding,\n sampler,\n sampler_parameters,\n scaler,\n random_state=None):\n \"\"\"\n Constructor of the sampling object\n\n Args:\n folding (obj): Folding object\n sampler (class): class of a sampler object\n sampler_parameters (dict): a parameter combination for the sampler\n object\n scaler (obj): scaler object\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n \"\"\"\n self.folding = folding\n self.db_name = folding.db_name\n self.sampler = sampler\n self.sampler_parameters = sampler_parameters\n self.sampler_parameters['random_state'] = random_state\n self.scaler = scaler\n self.cache_path = folding.cache_path\n self.filename = self.standardized_filename('sampling')\n self.random_state = random_state\n\n def standardized_filename(self,\n prefix,\n db_name=None,\n sampler=None,\n sampler_parameters=None):\n \"\"\"\n standardizes the filename\n\n Args:\n filename (str): filename\n\n Returns:\n str: standardized name\n \"\"\"\n import hashlib\n\n db_name = (db_name or self.db_name)\n\n sampler = (sampler or self.sampler)\n sampler = sampler.__name__\n sampler_parameters = sampler_parameters or self.sampler_parameters\n # _logger.info(str(sampler_parameters))\n from collections import OrderedDict\n sampler_parameters_ordered = OrderedDict()\n for k in sorted(list(sampler_parameters.keys())):\n sampler_parameters_ordered[k] = sampler_parameters[k]\n\n message = \" sampler parameter string \"\n message = message + str(sampler_parameters_ordered)\n # _logger.info(self.__class__.__name__ + message)\n sampler_parameter_str = hashlib.md5(\n str(sampler_parameters_ordered).encode('utf-8')).hexdigest()\n\n filename = '_'.join(\n [prefix, db_name, sampler, sampler_parameter_str]) + '.pickle'\n filename = re.sub('[\"\\\\,:(){}]', '', filename)\n filename = filename.replace(\"'\", '')\n filename = filename.replace(\": \", \"_\")\n filename = filename.replace(\" \", \"_\")\n filename = filename.replace(\"\\n\", \"_\")\n\n return filename\n\n def cache_sampling(self):\n try:\n import mkl\n mkl.set_num_threads(1)\n _logger.info(self.__class__.__name__ +\n (\" mkl thread number set to 1 successfully\"))\n except Exception as e:\n _logger.info(self.__class__.__name__ +\n (\" setting mkl thread number didn't succeed\"))\n _logger.info(str(e))\n\n if not os.path.isfile(os.path.join(self.cache_path, self.filename)):\n # if the sampled dataset does not exist\n sampler_categories = self.sampler.categories\n is_extensive = OverSampling.cat_extensive in sampler_categories\n has_proportion = 'proportion' in self.sampler_parameters\n higher_prop_sampling_avail = None\n\n if is_extensive and has_proportion:\n proportion = self.sampler_parameters['proportion']\n all_pc = self.sampler.parameter_combinations()\n all_proportions = np.unique([p['proportion'] for p in all_pc])\n all_proportions = all_proportions[all_proportions > proportion]\n\n for p in all_proportions:\n tmp_par = self.sampler_parameters.copy()\n tmp_par['proportion'] = p\n tmp_filename = self.standardized_filename(\n 'sampling', self.db_name, self.sampler, tmp_par)\n\n filename = os.path.join(self.cache_path, tmp_filename)\n if os.path.isfile(filename):\n higher_prop_sampling_avail = (p, tmp_filename)\n break\n\n if (not is_extensive or not has_proportion or\n (is_extensive and has_proportion and\n higher_prop_sampling_avail is None)):\n _logger.info(self.__class__.__name__ + \" doing sampling\")\n begin = time.time()\n sampling = []\n folds = self.folding.do_folding()\n for X_train, y_train, X_test, y_test in folds['folding']:\n s = self.sampler(**self.sampler_parameters)\n\n if self.scaler is not None:\n print(self.scaler.__class__.__name__)\n X_train = self.scaler.fit_transform(X_train, y_train)\n X_samp, y_samp = s.sample_with_timing(X_train, y_train)\n\n if hasattr(s, 'transform'):\n X_test_trans = s.preprocessing_transform(X_test)\n else:\n X_test_trans = X_test.copy()\n\n if self.scaler is not None:\n X_samp = self.scaler.inverse_transform(X_samp)\n\n sampling.append((X_samp, y_samp, X_test_trans, y_test))\n runtime = time.time() - begin\n else:\n higher_prop, higher_prop_filename = higher_prop_sampling_avail\n message = \" reading and resampling from file %s to %s\"\n message = message % (higher_prop_filename, self.filename)\n _logger.info(self.__class__.__name__ + message)\n filename = os.path.join(self.cache_path, higher_prop_filename)\n tmp_results = pickle.load(open(filename, 'rb'))\n tmp_sampling = tmp_results['sampling']\n tmp_runtime = tmp_results['runtime']\n\n sampling = []\n folds = self.folding.do_folding()\n nums = [len(X_train) for X_train, _, _, _ in folds['folding']]\n i = 0\n for X_train, y_train, X_test, y_test in tmp_sampling:\n new_num = (len(X_train) - nums[i])/higher_prop*proportion\n new_num = int(new_num)\n offset = nums[i] + new_num\n X_offset = X_train[:offset]\n y_offset = y_train[:offset]\n sampling.append((X_offset, y_offset, X_test, y_test))\n i = i + 1\n runtime = tmp_runtime/p*proportion\n\n results = {}\n results['sampling'] = sampling\n results['runtime'] = runtime\n results['db_size'] = folds['db_size']\n results['db_n_attr'] = folds['db_n_attr']\n results['imbalanced_ratio'] = folds['imbalanced_ratio']\n\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n\n random_filename = np.random.randint(1000000)\n random_filename = str(random_filename) + '.pickle'\n random_filename = os.path.join(self.cache_path, random_filename)\n pickle.dump(results, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n\n def do_sampling(self):\n self.cache_sampling()\n results = pickle.load(\n open(os.path.join(self.cache_path, self.filename), 'rb'))\n return results\n\n def get_params(self, deep=False):\n return {'folding': self.folding.get_params(),\n 'sampler_name': self.sampler.__name__,\n 'sampler_parameters': self.sampler_parameters}\n\n def descriptor(self):\n return str(self.get_params())\n\n\nclass Evaluation():\n \"\"\"\n Cache-able evaluation of classifier on sampling\n \"\"\"\n\n def __init__(self,\n sampling,\n classifiers,\n n_threads=None,\n random_state=None):\n \"\"\"\n Constructor of an Evaluation object\n\n Args:\n sampling (obj): Sampling object\n classifiers (list(obj)): classifier objects\n n_threads (int/None): number of threads\n random_state (int/np.random.RandomState/None): random state\n initializer\n \"\"\"\n self.sampling = sampling\n self.classifiers = classifiers\n self.n_threads = n_threads\n self.cache_path = sampling.cache_path\n self.filename = self.sampling.standardized_filename('eval')\n self.random_state = random_state\n\n self.labels = []\n for i in range(len(classifiers)):\n from collections import OrderedDict\n sampling_parameters = OrderedDict()\n sp = self.sampling.sampler_parameters\n for k in sorted(list(sp.keys())):\n sampling_parameters[k] = sp[k]\n cp = classifiers[i].get_params()\n classifier_parameters = OrderedDict()\n for k in sorted(list(cp.keys())):\n classifier_parameters[k] = cp[k]\n\n label = str((self.sampling.db_name, sampling_parameters,\n classifiers[i].__class__.__name__,\n classifier_parameters))\n self.labels.append(label)\n\n print(self.labels)\n\n def calculate_metrics(self, all_pred, all_test, all_folds):\n \"\"\"\n Calculates metrics of binary classifiction\n\n Args:\n all_pred (np.matrix): predicted probabilities\n all_test (np.matrix): true labels\n\n Returns:\n dict: all metrics of binary classification\n \"\"\"\n\n results = {}\n if all_pred is not None:\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_pred)\n\n results['tp'] = np.sum(np.logical_and(\n np.equal(all_test, all_pred_labels), (all_test == 1)))\n results['tn'] = np.sum(np.logical_and(\n np.equal(all_test, all_pred_labels), (all_test == 0)))\n results['fp'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_test, all_pred_labels)), (all_test == 0)))\n results['fn'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_test, all_pred_labels)), (all_test == 1)))\n results['p'] = results['tp'] + results['fn']\n results['n'] = results['fp'] + results['tn']\n results['acc'] = (results['tp'] + results['tn']) / \\\n (results['p'] + results['n'])\n results['sens'] = results['tp']/results['p']\n results['spec'] = results['tn']/results['n']\n results['ppv'] = results['tp']/(results['tp'] + results['fp'])\n results['npv'] = results['tn']/(results['tn'] + results['fn'])\n results['fpr'] = 1.0 - results['spec']\n results['fdr'] = 1.0 - results['ppv']\n results['fnr'] = 1.0 - results['sens']\n results['bacc'] = (results['tp']/results['p'] +\n results['tn']/results['n'])/2.0\n results['gacc'] = np.sqrt(\n results['tp']/results['p']*results['tn']/results['n'])\n results['f1'] = 2*results['tp'] / \\\n (2*results['tp'] + results['fp'] + results['fn'])\n mcc_num = results['tp']*results['tn'] - results['fp']*results['fn']\n mcc_denom_0 = (results['tp'] + results['fp'])\n mcc_denom_1 = (results['tp'] + results['fn'])\n mcc_denom_2 = (results['tn'] + results['fp'])\n mcc_denom_3 = (results['tn'] + results['fn'])\n mcc_denom = mcc_denom_0 * mcc_denom_1 * mcc_denom_2*mcc_denom_3\n results['mcc'] = mcc_num/np.sqrt(mcc_denom)\n results['l'] = (results['p'] + results['n']) * \\\n np.log(results['p'] + results['n'])\n tp_fp = (results['tp'] + results['fp'])\n tp_fn = (results['tp'] + results['fn'])\n tn_fp = (results['fp'] + results['tn'])\n tn_fn = (results['fn'] + results['tn'])\n results['ltp'] = results['tp']*np.log(results['tp']/(tp_fp*tp_fn))\n results['lfp'] = results['fp']*np.log(results['fp']/(tp_fp*tn_fp))\n results['lfn'] = results['fn']*np.log(results['fn']/(tp_fn*tn_fn))\n results['ltn'] = results['tn']*np.log(results['tn']/(tn_fp*tn_fn))\n results['lp'] = results['p'] * \\\n np.log(results['p']/(results['p'] + results['n']))\n results['ln'] = results['n'] * \\\n np.log(results['n']/(results['p'] + results['n']))\n uc_num = (results['l'] + results['ltp'] + results['lfp'] +\n results['lfn'] + results['ltn'])\n uc_denom = (results['l'] + results['lp'] + results['ln'])\n results['uc'] = uc_num/uc_denom\n results['informedness'] = results['sens'] + results['spec'] - 1.0\n results['markedness'] = results['ppv'] + results['npv'] - 1.0\n results['log_loss'] = log_loss(all_test, all_pred)\n results['auc'] = roc_auc_score(all_test, all_pred[:, 1])\n aucs = [roc_auc_score(all_test[all_folds == i],\n all_pred[all_folds == i, 1])\n for i in range(np.max(all_folds)+1)]\n results['auc_mean'] = np.mean(aucs)\n results['auc_std'] = np.std(aucs)\n test_labels, preds = zip(\n *sorted(zip(all_test, all_pred[:, 1]), key=lambda x: -x[1]))\n test_labels = np.array(test_labels)\n th = int(0.2*len(test_labels))\n results['p_top20'] = np.sum(test_labels[:th] == 1)/th\n results['brier'] = np.mean((all_pred[:, 1] - all_test)**2)\n else:\n results['tp'] = 0\n results['tn'] = 0\n results['fp'] = 0\n results['fn'] = 0\n results['p'] = 0\n results['n'] = 0\n results['acc'] = 0\n results['sens'] = 0\n results['spec'] = 0\n results['ppv'] = 0\n results['npv'] = 0\n results['fpr'] = 1\n results['fdr'] = 1\n results['fnr'] = 1\n results['bacc'] = 0\n results['gacc'] = 0\n results['f1'] = 0\n results['mcc'] = np.nan\n results['l'] = np.nan\n results['ltp'] = np.nan\n results['lfp'] = np.nan\n results['lfn'] = np.nan\n results['ltn'] = np.nan\n results['lp'] = np.nan\n results['ln'] = np.nan\n results['uc'] = np.nan\n results['informedness'] = 0\n results['markedness'] = 0\n results['log_loss'] = np.nan\n results['auc'] = 0\n results['auc_mean'] = 0\n results['auc_std'] = 0\n results['p_top20'] = 0\n results['brier'] = 1\n\n return results\n\n def do_evaluation(self):\n \"\"\"\n Does the evaluation or reads it from file\n\n Returns:\n dict: all metrics\n \"\"\"\n\n if self.n_threads is not None:\n try:\n import mkl\n mkl.set_num_threads(self.n_threads)\n message = \" mkl thread number set to %d successfully\"\n message = message % self.n_threads\n # _logger.info(self.__class__.__name__ + message)\n except Exception as e:\n message = \" setting mkl thread number didn't succeed\"\n # _logger.info(self.__class__.__name__ + message)\n\n evaluations = {}\n if os.path.isfile(os.path.join(self.cache_path, self.filename)):\n evaluations = pickle.load(\n open(os.path.join(self.cache_path, self.filename), 'rb'))\n\n already_evaluated = np.array([li in evaluations for li in self.labels])\n\n if not np.all(already_evaluated):\n samp = self.sampling.do_sampling()\n else:\n return list(evaluations.values())\n\n # setting random states\n for i in range(len(self.classifiers)):\n clf_params = self.classifiers[i].get_params()\n if 'random_state' in clf_params:\n clf_params['random_state'] = self.random_state\n self.classifiers[i] = self.classifiers[i].__class__(\n **clf_params)\n if isinstance(self.classifiers[i], CalibratedClassifierCV):\n clf_params = self.classifiers[i].base_estimator.get_params()\n clf_params['random_state'] = self.random_state\n class_inst = self.classifiers[i].base_estimator.__class__\n new_inst = class_inst(**clf_params)\n self.classifiers[i].base_estimator = new_inst\n\n for i in range(len(self.classifiers)):\n if not already_evaluated[i]:\n message = \" do the evaluation %s %s %s\"\n message = message % (self.sampling.db_name,\n self.sampling.sampler.__name__,\n self.classifiers[i].__class__.__name__)\n # _logger.info(self.__class__.__name__ + message)\n all_preds, all_tests, all_folds = [], [], []\n minority_class_label = None\n majority_class_label = None\n fold_idx = -1\n for X_train, y_train, X_test, y_test in samp['sampling']:\n fold_idx += 1\n\n # X_train[X_train == np.inf]= 0\n # X_train[X_train == -np.inf]= 0\n # X_test[X_test == np.inf]= 0\n # X_test[X_test == -np.inf]= 0\n\n class_labels = np.unique(y_train)\n min_class_size = np.min(\n [np.sum(y_train == c) for c in class_labels])\n\n ss = StandardScaler()\n X_train_trans = ss.fit_transform(X_train)\n nonzero_var_idx = np.where(ss.var_ > 1e-8)[0]\n X_test_trans = ss.transform(X_test)\n\n enough_minority_samples = min_class_size > 4\n y_train_big_enough = len(y_train) > 4\n two_classes = len(class_labels) > 1\n at_least_one_feature = (len(nonzero_var_idx) > 0)\n\n if not enough_minority_samples:\n message = \" not enough minority samples: %d\"\n message = message % min_class_size\n _logger.warning(\n self.__class__.__name__ + message)\n elif not y_train_big_enough:\n message = (\" number of minority training samples is \"\n \"not enough: %d\")\n message = message % len(y_train)\n _logger.warning(self.__class__.__name__ + message)\n elif not two_classes:\n message = \" there is only 1 class in training data\"\n _logger.warning(self.__class__.__name__ + message)\n elif not at_least_one_feature:\n _logger.warning(self.__class__.__name__ +\n (\" no information in features\"))\n else:\n all_tests.append(y_test)\n if (minority_class_label is None or\n majority_class_label is None):\n class_labels = np.unique(y_train)\n n_0 = sum(class_labels[0] == y_test)\n n_1 = sum(class_labels[1] == y_test)\n if n_0 < n_1:\n minority_class_label = int(class_labels[0])\n majority_class_label = int(class_labels[1])\n else:\n minority_class_label = int(class_labels[1])\n majority_class_label = int(class_labels[0])\n\n X_fit = X_train_trans[:, nonzero_var_idx]\n self.classifiers[i].fit(X_fit, y_train)\n clf = self.classifiers[i]\n X_pred = X_test_trans[:, nonzero_var_idx]\n pred = clf.predict_proba(X_pred)\n all_preds.append(pred)\n all_folds.append(\n np.repeat(fold_idx, len(all_preds[-1])))\n\n if len(all_tests) > 0:\n all_preds = np.vstack(all_preds)\n all_tests = np.hstack(all_tests)\n all_folds = np.hstack(all_folds)\n\n evaluations[self.labels[i]] = self.calculate_metrics(\n all_preds, all_tests, all_folds)\n else:\n evaluations[self.labels[i]] = self.calculate_metrics(\n None, None, None)\n\n evaluations[self.labels[i]]['runtime'] = samp['runtime']\n sampler_name = self.sampling.sampler.__name__\n evaluations[self.labels[i]]['sampler'] = sampler_name\n clf_name = self.classifiers[i].__class__.__name__\n evaluations[self.labels[i]]['classifier'] = clf_name\n sampler_parameters = self.sampling.sampler_parameters.copy()\n\n evaluations[self.labels[i]]['sampler_parameters'] = str(\n sampler_parameters)\n evaluations[self.labels[i]]['classifier_parameters'] = str(\n self.classifiers[i].get_params())\n evaluations[self.labels[i]]['sampler_categories'] = str(\n self.sampling.sampler.categories)\n evaluations[self.labels[i]\n ]['db_name'] = self.sampling.folding.db_name\n evaluations[self.labels[i]]['db_size'] = samp['db_size']\n evaluations[self.labels[i]]['db_n_attr'] = samp['db_n_attr']\n evaluations[self.labels[i]\n ]['imbalanced_ratio'] = samp['imbalanced_ratio']\n\n if not np.all(already_evaluated):\n _logger.info(self.__class__.__name__ +\n (\" dumping to file %s\" % self.filename))\n random_filename = os.path.join(self.cache_path, str(\n np.random.randint(1000000)) + '.pickle')\n pickle.dump(evaluations, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n self.cache_path, self.filename))\n\n return list(evaluations.values())\n\n\ndef trans(X):\n \"\"\"\n Transformation function used to aggregate the evaluation results.\n\n Args:\n X (pd.DataFrame): a grouping of a data frame containing evaluation\n results\n \"\"\"\n auc_std = X.iloc[np.argmax(X['auc_mean'].values)]['auc_std']\n cp_auc = X.sort_values('auc')['classifier_parameters'].iloc[-1]\n cp_acc = X.sort_values('acc')['classifier_parameters'].iloc[-1]\n cp_gacc = X.sort_values('gacc')['classifier_parameters'].iloc[-1]\n cp_f1 = X.sort_values('f1')['classifier_parameters'].iloc[-1]\n cp_p_top20 = X.sort_values('p_top20')['classifier_parameters'].iloc[-1]\n cp_brier = X.sort_values('brier')['classifier_parameters'].iloc[-1]\n sp_auc = X.sort_values('auc')['sampler_parameters'].iloc[-1]\n sp_acc = X.sort_values('acc')['sampler_parameters'].iloc[-1]\n sp_gacc = X.sort_values('gacc')['sampler_parameters'].iloc[-1]\n sp_f1 = X.sort_values('f1')['sampler_parameters'].iloc[-1]\n sp_p_top20 = X.sort_values('p_top20')['sampler_parameters'].iloc[-1]\n sp_brier = X.sort_values('p_top20')['sampler_parameters'].iloc[0]\n\n return pd.DataFrame({'auc': np.max(X['auc']),\n 'auc_mean': np.max(X['auc_mean']),\n 'auc_std': auc_std,\n 'brier': np.min(X['brier']),\n 'acc': np.max(X['acc']),\n 'f1': np.max(X['f1']),\n 'p_top20': np.max(X['p_top20']),\n 'gacc': np.max(X['gacc']),\n 'runtime': np.mean(X['runtime']),\n 'db_size': X['db_size'].iloc[0],\n 'db_n_attr': X['db_n_attr'].iloc[0],\n 'imbalanced_ratio': X['imbalanced_ratio'].iloc[0],\n 'sampler_categories': X['sampler_categories'].iloc[0],\n 'classifier_parameters_auc': cp_auc,\n 'classifier_parameters_acc': cp_acc,\n 'classifier_parameters_gacc': cp_gacc,\n 'classifier_parameters_f1': cp_f1,\n 'classifier_parameters_p_top20': cp_p_top20,\n 'classifier_parameters_brier': cp_brier,\n 'sampler_parameters_auc': sp_auc,\n 'sampler_parameters_acc': sp_acc,\n 'sampler_parameters_gacc': sp_gacc,\n 'sampler_parameters_f1': sp_f1,\n 'sampler_parameters_p_top20': sp_p_top20,\n 'sampler_parameters_brier': sp_brier,\n }, index=[0])\n\n\ndef _clone_classifiers(classifiers):\n \"\"\"\n Clones a set of classifiers\n\n Args:\n classifiers (list): a list of classifier objects\n \"\"\"\n results = []\n for c in classifiers:\n if isinstance(c, MLPClassifierWrapper):\n results.append(c.copy())\n else:\n results.append(clone(c))\n\n return results\n\n\ndef _cache_samplings(folding,\n samplers,\n scaler,\n max_n_sampler_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n\n \"\"\"\n _logger.info(\"create sampling objects, random_state: %s\" %\n str(random_state or \"\"))\n sampling_objs = []\n\n random_state_init = random_state\n random_state = np.random.RandomState(random_state_init)\n\n _logger.info(\"samplers: %s\" % str(samplers))\n for s in samplers:\n sampling_par_comb = s.parameter_combinations()\n # following line prints all possible combinations\n# _logger.info(sampling_par_comb)\n \n domain = np.array(list(range(len(sampling_par_comb))))\n n_random = min([len(sampling_par_comb), max_n_sampler_par_comb])\n random_indices = random_state.choice(domain, n_random, replace=False)\n \n #following line prints indices chosem\n# _logger.info(\"random_indices: %s\" % random_indices)\n \n \n sampling_par_comb = [sampling_par_comb[i] for i in random_indices]\n \n # prints combinations selected\n# _logger.info(sampling_par_comb)\n\n for spc in sampling_par_comb:\n sampling_objs.append(Sampling(folding,\n s,\n spc,\n scaler,\n random_state_init))\n\n # sorting sampling objects to optimize execution\n def key(x):\n if (isinstance(x.sampler, ADG) or isinstance(x.sampler, AMSCO) or\n isinstance(x.sampler, DSRBF)):\n if 'proportion' in x.sampler_parameters:\n return 30 + x.sampler_parameters['proportion']\n else:\n return 30\n elif 'proportion' in x.sampler_parameters:\n return x.sampler_parameters['proportion']\n elif OverSampling.cat_memetic in x.sampler.categories:\n return 20\n else:\n return 10\n\n sampling_objs = list(reversed(sorted(sampling_objs, key=key)))\n\n # executing sampling in parallel\n _logger.info(\"executing %d sampling in parallel\" % len(sampling_objs))\n Parallel(n_jobs=n_jobs, batch_size=1)(delayed(s.cache_sampling)()\n for s in sampling_objs)\n\n return sampling_objs\n\n\ndef _cache_evaluations(sampling_objs,\n classifiers,\n n_jobs=1,\n random_state=None):\n # create evaluation objects\n _logger.info(\"create classifier jobs\")\n evaluation_objs = []\n\n num_threads = None if n_jobs is None or n_jobs == 1 else 1\n\n for s in sampling_objs:\n evaluation_objs.append(Evaluation(s, _clone_classifiers(\n classifiers), num_threads, random_state))\n\n _logger.info(\"executing %d evaluation jobs in parallel\" %\n (len(evaluation_objs)))\n # execute evaluation in parallel\n evals = Parallel(n_jobs=n_jobs, batch_size=1)(\n delayed(e.do_evaluation)() for e in evaluation_objs)\n\n return evals\n\n\ndef _read_db_results(cache_path_db):\n results = []\n evaluation_files = glob.glob(os.path.join(cache_path_db, 'eval*.pickle'))\n\n for f in evaluation_files:\n eval_results = pickle.load(open(f, 'rb'))\n results.append(list(eval_results.values()))\n\n return results\n\n\ndef read_oversampling_results(datasets, cache_path=None, all_results=False):\n \"\"\"\n Reads the results of the evaluation\n\n Args:\n datasets (list): list of datasets and/or dataset loaders - a dataset\n is a dict with 'data', 'target' and 'name' keys\n cache_path (str): path to a cache directory\n all_results (bool): True to return all results, False to return an\n aggregation\n\n Returns:\n pd.DataFrame: all results or the aggregated results if all_results is\n False\n \"\"\"\n\n results = []\n for dataset_spec in datasets:\n\n # loading dataset if needed and determining dataset name\n if not isinstance(dataset_spec, dict):\n dataset = dataset_spec()\n else:\n dataset = dataset_spec\n\n if 'name' in dataset:\n dataset_name = dataset['name']\n else:\n dataset_name = dataset_spec.__name__\n\n dataset['name'] = dataset_name\n\n # determining dataset specific cache path\n cache_path_db = os.path.join(cache_path, dataset_name)\n\n # reading the results\n res = _read_db_results(cache_path_db)\n\n # concatenating the results\n _logger.info(\"concatenating results\")\n db_res = [pd.DataFrame(r) for r in res]\n db_res = pd.concat(db_res).reset_index(drop=True)\n\n _logger.info(\"aggregating the results\")\n if all_results is False:\n db_res = db_res.groupby(by=['db_name', 'classifier', 'sampler'])\n db_res.apply(trans).reset_index().drop('level_3', axis=1)\n\n results.append(db_res)\n\n return pd.concat(results).reset_index(drop=True)\n\n\ndef evaluate_oversamplers(datasets,\n samplers,\n classifiers,\n cache_path,\n validator=RepeatedStratifiedKFold(\n n_splits=5, n_repeats=3),\n scaler=None,\n all_results=False,\n remove_cache=False,\n max_samp_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques using various classifiers on various\n datasets\n\n Args:\n datasets (list): list of datasets and/or dataset loaders - a dataset\n is a dict with 'data', 'target' and 'name' keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n cache_path (str): path to a cache directory\n validator (obj): validator object\n scaler (obj): scaler object\n all_results (bool): True to return all results, False to return an\n aggregation\n remove_cache (bool): True to remove sampling objects after\n evaluation\n max_samp_par_comb (int): maximum number of sampler parameter\n combinations to be tested\n n_jobs (int): number of parallel jobs\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n pd.DataFrame: all results or the aggregated results if all_results is\n False\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n\n datasets= [imbd.load_glass2, imbd.load_ecoli4]\n oversamplers= [sv.SMOTE_ENN, sv.NEATER, sv.Lee]\n classifiers= [KNeighborsClassifier(n_neighbors= 3),\n KNeighborsClassifier(n_neighbors= 5),\n DecisionTreeClassifier()]\n\n cache_path= '/home/<user>/smote_validation/'\n\n results= evaluate_oversamplers(datasets,\n oversamplers,\n classifiers,\n cache_path)\n \"\"\"\n _logger.info(str(cache_path))\n if cache_path is None:\n raise ValueError('cache_path is not specified')\n\n results = []\n for dataset_spec in datasets:\n # loading dataset if needed and determining dataset name\n if not isinstance(dataset_spec, dict):\n dataset = dataset_spec()\n else:\n dataset = dataset_spec\n\n if 'name' in dataset:\n dataset_name = dataset['name']\n else:\n dataset_name = dataset_spec.__name__\n\n dataset['name'] = dataset_name\n\n dataset_original_target = dataset['target'].copy()\n class_labels = np.unique(dataset['target'])\n n_0 = sum(dataset['target'] == class_labels[0])\n n_1 = sum(dataset['target'] == class_labels[1])\n if n_0 < n_1:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n else:\n min_label = class_labels[1]\n maj_label = class_labels[0]\n min_ind = np.where(dataset['target'] == min_label)[0]\n maj_ind = np.where(dataset['target'] == maj_label)[0]\n np.put(dataset['target'], min_ind, 1)\n np.put(dataset['target'], maj_ind, 0)\n\n cache_path_db = os.path.join(cache_path, dataset_name)\n if not os.path.isdir(cache_path_db):\n _logger.info(\"creating cache directory\")\n os.makedirs(cache_path_db)\n\n # checking of samplings and evaluations are available\n samplings_available = False\n evaluations_available = False\n\n samplings = glob.glob(os.path.join(cache_path_db, 'sampling*.pickle'))\n if len(samplings) > 0:\n samplings_available = True\n\n evaluations = glob.glob(os.path.join(cache_path_db, 'eval*.pickle'))\n if len(evaluations) > 0:\n evaluations_available = True\n\n message = (\"dataset: %s, samplings_available: %s, \"\n \"evaluations_available: %s\")\n message = message % (dataset_name, str(samplings_available),\n str(evaluations_available))\n _logger.info(message)\n\n if (remove_cache and evaluations_available and\n not samplings_available):\n # remove_cache is enabled and evaluations are available,\n # they are being read\n message = (\"reading result from cache, sampling and evaluation is\"\n \" not executed\")\n _logger.info(message)\n res = _read_db_results(cache_path_db)\n else:\n _logger.info(\"doing the folding\")\n folding = Folding(dataset, validator, cache_path_db, random_state)\n folding.do_folding()\n\n _logger.info(\"do the samplings\")\n sampling_objs = _cache_samplings(folding,\n samplers,\n scaler,\n max_samp_par_comb,\n n_jobs,\n random_state)\n\n _logger.info(\"do the evaluations\")\n res = _cache_evaluations(\n sampling_objs, classifiers, n_jobs, random_state)\n\n dataset['target'] = dataset_original_target\n\n # removing samplings once everything is done\n if remove_cache:\n filenames = glob.glob(os.path.join(cache_path_db, 'sampling*'))\n _logger.info(\"removing unnecessary sampling files\")\n if len(filenames) > 0:\n for f in filenames:\n os.remove(f)\n\n _logger.info(\"concatenating the results\")\n db_res = [pd.DataFrame(r) for r in res]\n db_res = pd.concat(db_res).reset_index(drop=True)\n\n random_filename = os.path.join(cache_path_db, str(\n np.random.randint(1000000)) + '.pickle')\n pickle.dump(db_res, open(random_filename, \"wb\"))\n os.rename(random_filename, os.path.join(\n cache_path_db, 'results.pickle'))\n\n _logger.info(\"aggregating the results\")\n if all_results is False:\n db_res = db_res.groupby(by=['db_name', 'classifier', 'sampler'])\n db_res = db_res.apply(trans).reset_index().drop('level_3', axis=1)\n\n results.append(db_res)\n\n return pd.concat(results).reset_index(drop=True)\n\n\ndef model_selection(dataset,\n samplers,\n classifiers,\n cache_path,\n score='auc',\n validator=RepeatedStratifiedKFold(n_splits=5, n_repeats=3),\n remove_cache=False,\n max_samp_par_comb=35,\n n_jobs=1,\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques on various classifiers and a dataset\n and returns the oversampling and classifier objects giving the best\n performance\n\n Args:\n dataset (dict): a dataset is a dict with 'data', 'target' and 'name'\n keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n cache_path (str): path to a cache directory\n score (str): 'auc'/'acc'/'gacc'/'f1'/'brier'/'p_top20'\n validator (obj): validator object\n all_results (bool): True to return all results, False to return an\n aggregation\n remove_cache (bool): True to remove sampling objects after\n evaluation\n max_samp_par_comb (int): maximum number of sampler parameter\n combinations to be tested\n n_jobs (int): number of parallel jobs\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n obj, obj: the best performing sampler object and the best performing\n classifier object\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.neighbors import KNeighborsClassifier\n\n datasets= imbd.load_glass2()\n oversamplers= [sv.SMOTE_ENN, sv.NEATER, sv.Lee]\n classifiers= [KNeighborsClassifier(n_neighbors= 3),\n KNeighborsClassifier(n_neighbors= 5),\n DecisionTreeClassifier()]\n\n cache_path= '/home/<user>/smote_validation/'\n\n sampler, classifier= model_selection(dataset,\n oversamplers,\n classifiers,\n cache_path,\n 'auc')\n \"\"\"\n\n if score not in ['auc', 'acc', 'gacc', 'f1', 'brier', 'p_top20']:\n raise ValueError(\"score %s not supported\" % score)\n\n results = evaluate_oversamplers(datasets=[dataset],\n samplers=samplers,\n classifiers=classifiers,\n cache_path=cache_path,\n validator=validator,\n remove_cache=remove_cache,\n max_samp_par_comb=max_samp_par_comb,\n n_jobs=n_jobs,\n random_state=random_state)\n\n # extracting the best performing classifier and oversampler parameters\n # regarding AUC\n highest_score = results[score].idxmax()\n cl_par_name = 'classifier_parameters_' + score\n samp_par_name = 'sampler_parameters_' + score\n cl, cl_par, samp, samp_par = results.loc[highest_score][['classifier',\n cl_par_name,\n 'sampler',\n samp_par_name]]\n\n # instantiating the best performing oversampler and classifier objects\n samp_obj = eval(samp)(**eval(samp_par))\n cl_obj = eval(cl)(**eval(cl_par))\n\n return samp_obj, cl_obj\n\n\ndef cross_validate(dataset,\n sampler,\n classifier,\n validator=RepeatedStratifiedKFold(n_splits=5, n_repeats=3),\n scaler=StandardScaler(),\n random_state=None):\n \"\"\"\n Evaluates oversampling techniques on various classifiers and a dataset\n and returns the oversampling and classifier objects giving the best\n performance\n\n Args:\n dataset (dict): a dataset is a dict with 'data', 'target' and 'name'\n keys\n samplers (list): list of oversampling classes/objects\n classifiers (list): list of classifier objects\n validator (obj): validator object\n scaler (obj): scaler object\n random_state (int/np.random.RandomState/None): initializer of the\n random state\n\n Returns:\n pd.DataFrame: the cross-validation scores\n\n Example::\n\n import smote_variants as sv\n import imbalanced_datasets as imbd\n\n from sklearn.neighbors import KNeighborsClassifier\n\n dataset= imbd.load_glass2()\n sampler= sv.SMOTE_ENN\n classifier= KNeighborsClassifier(n_neighbors= 3)\n\n sampler, classifier= model_selection(dataset,\n oversampler,\n classifier)\n \"\"\"\n\n class_labels = np.unique(dataset['target'])\n binary_problem = (len(class_labels) == 2)\n\n dataset_orig_target = dataset['target'].copy()\n if binary_problem:\n _logger.info(\"The problem is binary\")\n n_0 = sum(dataset['target'] == class_labels[0])\n n_1 = sum(dataset['target'] == class_labels[1])\n if n_0 < n_1:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n else:\n min_label = class_labels[0]\n maj_label = class_labels[1]\n\n min_ind = np.where(dataset['target'] == min_label)[0]\n maj_ind = np.where(dataset['target'] == maj_label)[0]\n np.put(dataset['target'], min_ind, 1)\n np.put(dataset['target'], maj_ind, 0)\n else:\n _logger.info(\"The problem is not binary\")\n label_indices = {}\n for c in class_labels:\n label_indices[c] = np.where(dataset['target'] == c)[0]\n mapping = {}\n for i, c in enumerate(class_labels):\n np.put(dataset['target'], label_indices[c], i)\n mapping[i] = c\n\n runtimes = []\n all_preds, all_tests = [], []\n\n for train, test in validator.split(dataset['data'], dataset['target']):\n _logger.info(\"Executing fold\")\n X_train, y_train = dataset['data'][train], dataset['target'][train]\n X_test, y_test = dataset['data'][test], dataset['target'][test]\n\n begin = time.time()\n X_samp, y_samp = sampler.sample(X_train, y_train)\n runtimes.append(time.time() - begin)\n\n X_samp_trans = scaler.fit_transform(X_samp)\n nonzero_var_idx = np.where(scaler.var_ > 1e-8)[0]\n X_test_trans = scaler.transform(X_test)\n\n all_tests.append(y_test)\n\n classifier.fit(X_samp_trans[:, nonzero_var_idx], y_samp)\n all_preds.append(classifier.predict_proba(\n X_test_trans[:, nonzero_var_idx]))\n\n if len(all_tests) > 0:\n all_preds = np.vstack(all_preds)\n all_tests = np.hstack(all_tests)\n\n dataset['target'] = dataset_orig_target\n\n _logger.info(\"Computing the results\")\n\n results = {}\n results['runtime'] = np.mean(runtimes)\n results['sampler'] = sampler.__class__.__name__\n results['classifier'] = classifier.__class__.__name__\n results['sampler_parameters'] = str(sampler.get_params())\n results['classifier_parameters'] = str(classifier.get_params())\n results['db_size'] = len(dataset['data'])\n results['db_n_attr'] = len(dataset['data'][0])\n results['db_n_classes'] = len(class_labels)\n\n if binary_problem:\n results['imbalance_ratio'] = sum(\n dataset['target'] == maj_label)/sum(dataset['target'] == min_label)\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_preds)\n\n results['tp'] = np.sum(np.logical_and(\n np.equal(all_tests, all_pred_labels), (all_tests == 1)))\n results['tn'] = np.sum(np.logical_and(\n np.equal(all_tests, all_pred_labels), (all_tests == 0)))\n results['fp'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_tests, all_pred_labels)), (all_tests == 0)))\n results['fn'] = np.sum(np.logical_and(np.logical_not(\n np.equal(all_tests, all_pred_labels)), (all_tests == 1)))\n results['p'] = results['tp'] + results['fn']\n results['n'] = results['fp'] + results['tn']\n results['acc'] = (results['tp'] + results['tn']) / \\\n (results['p'] + results['n'])\n results['sens'] = results['tp']/results['p']\n results['spec'] = results['tn']/results['n']\n results['ppv'] = results['tp']/(results['tp'] + results['fp'])\n results['npv'] = results['tn']/(results['tn'] + results['fn'])\n results['fpr'] = 1.0 - results['spec']\n results['fdr'] = 1.0 - results['ppv']\n results['fnr'] = 1.0 - results['sens']\n results['bacc'] = (results['tp']/results['p'] +\n results['tn']/results['n'])/2.0\n results['gacc'] = np.sqrt(\n results['tp']/results['p']*results['tn']/results['n'])\n results['f1'] = 2*results['tp'] / \\\n (2*results['tp'] + results['fp'] + results['fn'])\n mcc_num = (results['tp']*results['tn'] - results['fp']*results['fn'])\n tp_fp = (results['tp'] + results['fp'])\n tp_fn = (results['tp'] + results['fn'])\n tn_fp = (results['tn'] + results['fp'])\n tn_fn = (results['tn'] + results['fn'])\n mcc_denom = np.sqrt(tp_fp * tp_fn * tn_fp * tn_fn)\n results['mcc'] = mcc_num/mcc_denom\n results['l'] = (results['p'] + results['n']) * \\\n np.log(results['p'] + results['n'])\n results['ltp'] = results['tp']*np.log(results['tp']/(\n (results['tp'] + results['fp'])*(results['tp'] + results['fn'])))\n results['lfp'] = results['fp']*np.log(results['fp']/(\n (results['fp'] + results['tp'])*(results['fp'] + results['tn'])))\n results['lfn'] = results['fn']*np.log(results['fn']/(\n (results['fn'] + results['tp'])*(results['fn'] + results['tn'])))\n results['ltn'] = results['tn']*np.log(results['tn']/(\n (results['tn'] + results['fp'])*(results['tn'] + results['fn'])))\n results['lp'] = results['p'] * \\\n np.log(results['p']/(results['p'] + results['n']))\n results['ln'] = results['n'] * \\\n np.log(results['n']/(results['p'] + results['n']))\n ucc_num = (results['l'] + results['ltp'] + results['lfp'] +\n results['lfn'] + results['ltn'])\n results['uc'] = ucc_num/(results['l'] + results['lp'] + results['ln'])\n results['informedness'] = results['sens'] + results['spec'] - 1.0\n results['markedness'] = results['ppv'] + results['npv'] - 1.0\n results['log_loss'] = log_loss(all_tests, all_preds)\n results['auc'] = roc_auc_score(all_tests, all_preds[:, 1])\n test_labels, preds = zip(\n *sorted(zip(all_tests, all_preds[:, 1]), key=lambda x: -x[1]))\n test_labels = np.array(test_labels)\n th = int(0.2*len(test_labels))\n results['p_top20'] = np.sum(test_labels[:th] == 1)/th\n results['brier'] = np.mean((all_preds[:, 1] - all_tests)**2)\n else:\n all_pred_labels = np.apply_along_axis(\n lambda x: np.argmax(x), 1, all_preds)\n\n results['acc'] = accuracy_score(all_tests, all_pred_labels)\n results['confusion_matrix'] = confusion_matrix(\n all_tests, all_pred_labels)\n sum_confusion = np.sum(results['confusion_matrix'], axis=0)\n results['gacc'] = gmean(np.diagonal(\n results['confusion_matrix'])/sum_confusion)\n results['class_label_mapping'] = mapping\n\n return pd.DataFrame({'value': list(results.values())},\n index=results.keys())\n" ]
[ [ "scipy.spatial.Voronoi", "numpy.linalg.matrix_rank", "numpy.sqrt", "sklearn.model_selection.KFold", "numpy.all", "sklearn.cluster.AgglomerativeClustering", "numpy.exp", "numpy.where", "sklearn.preprocessing.MinMaxScaler", "tensorflow.random.set_seed", "numpy.unique", "numpy.full", "numpy.block", "numpy.diff", "numpy.outer", "tensorflow.compat.v1.set_random_seed", "numpy.zeros", "numpy.log", "pandas.concat", "numpy.multiply", "numpy.median", "scipy.signal.find_peaks_cwt", "numpy.delete", "numpy.equal", "numpy.array", "numpy.sum", "numpy.inner", "numpy.isinf", "numpy.vstack", "sklearn.metrics.roc_auc_score", "sklearn.cluster.KMeans", "numpy.nan_to_num", "sklearn.cluster.DBSCAN", "numpy.concatenate", "numpy.int", "numpy.fill_diagonal", "numpy.var", "tensorflow.compat.v1.keras.backend.set_session", "numpy.linalg.cond", "numpy.std", "numpy.argmax", "scipy.stats.skew", "numpy.min", "sklearn.manifold.Isomap", "sklearn.manifold.LocallyLinearEmbedding", "numpy.arccos", "sklearn.metrics.log_loss", "tensorflow.set_random_seed", "sklearn.mixture.GaussianMixture", "numpy.not_equal", "numpy.random.RandomState", "tensorflow.compat.v1.get_default_graph", "numpy.linalg.solve", "numpy.maximum", "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis", "sklearn.linear_model.LinearRegression", "numpy.empty", "sklearn.neural_network.MLPClassifier", "numpy.take", "sklearn.metrics.confusion_matrix", "pandas.DataFrame", "numpy.round", "sklearn.manifold.TSNE", "sklearn.base.clone", "numpy.mean", "numpy.argmin", "sklearn.discriminant_analysis.LinearDiscriminantAnalysis", "numpy.random.randint", "numpy.hstack", "numpy.clip", "sklearn.model_selection.StratifiedKFold", "numpy.linalg.det", "numpy.column_stack", "numpy.repeat", "numpy.logical_not", "sklearn.naive_bayes.GaussianNB", "numpy.linalg.inv", "numpy.logspace", "numpy.isnan", "numpy.rint", "numpy.logical_or", "numpy.cov", "sklearn.svm.SVC", "numpy.argsort", "sklearn.metrics.pairwise.pairwise_distances", "sklearn.decomposition.PCA", "numpy.diagonal", "tensorflow.compat.v1.ConfigProto", "sklearn.linear_model.LogisticRegression", "numpy.linalg.norm", "numpy.bincount", "numpy.dot", "numpy.max", "sklearn.tree.DecisionTreeClassifier", "scipy.optimize.linear_sum_assignment", "sklearn.ensemble.RandomForestClassifier", "numpy.linalg.eig", "numpy.arange", "sklearn.neighbors.KNeighborsClassifier", "sklearn.neighbors.NearestNeighbors", "scipy.special.erf", "numpy.linalg.cholesky", "numpy.logical_and", "sklearn.model_selection.cross_val_score", "numpy.abs", "numpy.random.seed", "numpy.put", "numpy.sort", "sklearn.model_selection.RepeatedStratifiedKFold", "numpy.prod", "sklearn.preprocessing.StandardScaler", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
Ingvarstep/spodernet
[ "b03d60e91588f234fc34fe59fe8a74a153f56b97" ]
[ "spodernet/preprocessing/processors.py" ]
[ "from __future__ import unicode_literals\nfrom os.path import join\nfrom spodernet.utils.util import Timer\nfrom spodernet.utils.util import get_data_path, save_data, make_dirs_if_not_exists, load_data, Timer\nfrom spodernet.interfaces import IAtBatchPreparedObservable\nfrom spodernet.utils.global_config import Config\nfrom past.builtins import basestring, long\n\nimport numpy as np\nimport os\nimport copy\nimport spacy\nimport nltk\nimport json\nimport pickle\n\nfrom spodernet.utils.logger import Logger\nlog = Logger('processors.py.txt')\n\nnlp = spacy.load('en')\ntimer = Timer()\n\nclass KeyToKeyMapper(IAtBatchPreparedObservable):\n def __init__(self, key2key):\n self.key2key = key2key\n\n def at_batch_prepared(self, batch_parts):\n str2var = batch_parts\n new_str2var = {}\n for key1, key2 in self.key2key.items():\n new_str2var[key2] = str2var[key1]\n\n return new_str2var\n\nclass DictConverter(IAtBatchPreparedObservable):\n def __init__(self, keys=['input', 'support', 'target']):\n self.keys = keys\n\n def at_batch_prepared(self, batch_parts):\n str2var = {}\n i = 0\n for key in self.keys:\n str2var[key] = batch_parts[i]\n i += 1\n if i == 2*len(self.keys): break\n str2var[key+'_length'] = batch_parts[i]\n i += 1\n\n str2var['index'] = batch_parts[-1]\n\n return str2var\n\n\nclass TargetIdx2MultiTarget(IAtBatchPreparedObservable):\n def __init__(self, num_labels, variable_name, new_variable_name, shape=None, stop_index=0):\n self.num_labels = num_labels\n self.variable_name = variable_name\n self.new_variable_name = new_variable_name\n self.shape = shape\n self.stop_index = stop_index\n\n\n def at_batch_prepared(self, str2var):\n t = str2var[self.variable_name]\n if self.shape:\n new_t = np.zeros(self.shape, dtype=np.int64)\n else:\n new_t = np.zeros((t.shape[0], self.num_labels), dtype=np.int64)\n is_packed_array = isinstance(t[0], np.ndarray)\n\n for i, row in enumerate(t):\n if (isinstance(t, list) or len(t.shape) == 1):\n if row == self.stop_index: continue\n new_t[i, row] = 1\n else:\n for col in row:\n if col == self.stop_index: break\n new_t[i, col] = 1\n\n str2var[self.new_variable_name] = new_t\n\n return str2var\n\nclass VariableLengthSorter(IAtBatchPreparedObservable):\n def __init__(self, variable_name, postfix):\n self.variable_name = variable_name\n self.postfix = postfix\n\n def at_batch_prepared(self, str2var):\n var_len = str2var[self.variable_name + '_length']\n argidx = np.argsort(var_len)[::-1]\n\n for key in str2var.keys():\n str2var[key+self.postfix] = str2var[key][argidx]\n if 'length' in key:\n str2var[key+self.postfix] = str2var[key][argidx].tolist()\n\n return str2var\n\nclass ListIndexRemapper(object):\n def __init__(self, list_of_new_idx):\n self.list_of_new_idx = list_of_new_idx\n\n def at_batch_prepared(self, line):\n new_line = []\n for idx in self.list_of_new_idx:\n new_line.append(line[idx])\n\n return new_line\n\nclass JsonLoaderProcessors(object):\n def process(self, line):\n return json.loads(line)\n\nclass RemoveLineOnJsonValueCondition(object):\n def __init__(self, key, func_condition):\n self.key = key\n self.func_condition = func_condition\n\n def process(self, json_dict):\n if self.func_condition(json_dict[self.key]):\n return None\n else:\n return json_dict\n\nclass DictKey2ListMapper(object):\n def __init__(self, ordered_keys_source):\n self.ordered_keys_source = ordered_keys_source\n\n def process(self, dict_object):\n list_of_ordered_values = []\n for key in self.ordered_keys_source:\n list_of_ordered_values.append(dict_object[key])\n return list_of_ordered_values\n\n\nclass AbstractProcessor(object):\n def __init__(self):\n self.state = None\n self.execution_state = set(['fit', 'transform'])\n self.sample_counter = 0\n self.timer = Timer(silent=True)\n\n def link_with_pipeline(self, state):\n self.state = state\n\n def abstract_process(self, inputs, inp_type, benchmark):\n benchmark=True\n if benchmark:\n self.sample_counter +=1\n self.timer.tick()\n result = self.process(inputs, inp_type)\n if benchmark:\n self.timer.tick()\n if self.sample_counter == 10000:\n log.info_once('Time taken for 10000 samples for input type {0} for processor {1}: '.format(inp_type, type(self).__name__) + '{0} seconds', round(self.timer.tock(), 2))\n return result\n\n def process(self, inputs, inp_type):\n raise NotImplementedError('Classes that inherit from AbstractProcessor need to implement the process method')\n\n\nclass AbstractLoopLevelTokenProcessor(AbstractProcessor):\n def __init__(self):\n super(AbstractLoopLevelTokenProcessor, self).__init__()\n self.successive_for_loops_to_tokens = None\n self.execution_state = set(['fit', 'transform'])\n\n def process_token(self, token, inp_type):\n raise NotImplementedError('Classes that inherit from AbstractLoopLevelTokenProcessor need to implement the process_token method ')\n\n def process(self, sample, inp_type):\n if self.successive_for_loops_to_tokens == None:\n i = 0\n level = sample\n while not ( isinstance(level, basestring)\n or isinstance(level, long)):\n level = level[0]\n i+=1\n self.successive_for_loops_to_tokens = i\n\n if self.successive_for_loops_to_tokens == 0:\n ret = self.process_token(sample, inp_type)\n\n elif self.successive_for_loops_to_tokens == 1:\n new_tokens = []\n for token in sample:\n new_tokens.append(self.process_token(token, inp_type))\n ret = new_tokens\n\n elif self.successive_for_loops_to_tokens == 2:\n new_sents = []\n for sent in sample:\n new_tokens = []\n for token in sent:\n new_tokens.append(self.process_token(token, inp_type))\n new_sents.append(new_tokens)\n ret = new_sents\n\n return ret\n\nclass AbstractLoopLevelListOfTokensProcessor(AbstractProcessor):\n def __init__(self):\n super(AbstractLoopLevelListOfTokensProcessor, self).__init__()\n self.successive_for_loops_to_list_of_tokens = None\n self.execution_state = set(['fit', 'transform'])\n\n def process_list_of_tokens(self, tokens, inp_type):\n raise NotImplementedError('Classes that inherit from AbstractLoopLevelListOfTokensProcessor need to implement the process_list_of_tokens method ')\n\n def process(self, sample, inp_type):\n if self.successive_for_loops_to_list_of_tokens == None:\n i = 0\n level = sample\n while not (isinstance(level, basestring)\n or isinstance(level, int)\n or isinstance(level, np.int32)\n or isinstance(level, np.float32)):\n level = level[0]\n i+=1\n self.successive_for_loops_to_list_of_tokens = i-1\n\n if self.successive_for_loops_to_list_of_tokens == 0:\n ret = self.process_list_of_tokens(sample, inp_type, samples_idx)\n\n elif self.successive_for_loops_to_list_of_tokens == 1:\n new_sents = []\n for sent in sample:\n new_sents.append(self.process_list_of_tokens(sent, inp_type))\n ret = new_sents\n\n return ret\n\nclass TfidfFitter(AbstractProcessor):\n def __init__(self):\n super(TfidfFitter, self).__init__()\n self.execution_state = set(['fit'])\n\n def link_with_pipeline(self, state):\n self.tfidf = state['tfidf']\n state['tfidf_data'] = {}\n self.data = state['tfidf_data']\n\n def process(self, data, inp_type):\n if inp_type not in self.data: self.data[inp_type] = []\n self.data[inp_type].append(data)\n return data\n\nclass TfidfTransformer(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self):\n super(TfidfTransformer, self).__init__()\n self.fitted = set()\n self.execution_state = set(['transform'])\n\n def link_with_pipeline(self, state):\n self.tfidf = state['tfidf']\n self.data = state['tfidf_data']\n\n def process(self, list_of_token, inp_type):\n if inp_type not in self.fitted:\n self.tfidf[inp_type].fit(self.data[inp_type])\n self.fitted.add(inp_type)\n doc = ' '.join(list_of_token)\n X = self.tfidf[inp_type].transform([doc])\n weights = []\n vocab = self.tfidf[inp_type].vocabulary_\n for token in list_of_token:\n if token in vocab:\n idx = self.tfidf[inp_type].vocabulary_[token]\n weights.append(X[0, idx])\n else:\n weights.append(0.0)\n\n return weights\n\nclass DeepSeqMap(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self, func):\n super(DeepSeqMap, self).__init__()\n self.func = func\n\n def process_list_of_tokens(self, data, inp_type):\n return self.func(data)\n\nclass Tokenizer(AbstractProcessor):\n def __init__(self):\n super(Tokenizer, self).__init__()\n self.tokenizer = nltk.tokenize.WordPunctTokenizer()\n\n def process(self, sentence, inp_type):\n return self.tokenizer.tokenize(sentence)\n\nclass NERTokenizer(AbstractProcessor):\n def __init__(self):\n super(NERTokenizer, self).__init__()\n self.execution_state = set(['transform'])\n\n def process(self, sentence, inp_type):\n return [token.ent_type_ for token in nlp(sentence, disable=['parse'])]\n\nclass DependencyParser(AbstractProcessor):\n def __init__(self):\n super(DependencyParser, self).__init__()\n self.execution_state = set(['transform'])\n\n def process(self, sentence, inp_type):\n return [token.dep_ for token in nlp(sentence)]\n\nclass POSTokenizer(AbstractProcessor):\n def __init__(self):\n super(POSTokenizer, self).__init__()\n self.execution_state = set(['transform'])\n\n def process(self, sentence, inp_type):\n return [token.pos_ for token in nlp(sentence, disable=['parse, entity'])]\n\nclass SentTokenizer(AbstractProcessor):\n def __init__(self):\n super(SentTokenizer, self).__init__()\n\n def process(self, sentence, inp_type):\n return [sent.text.replace('\\n', '') for sent in nlp(sentence, disable=['entity']).sents]\n\nclass CustomTokenizer(AbstractProcessor):\n def __init__(self, tokenizer_method):\n super(CustomTokenizer, self).__init__()\n self.tokenize = tokenizer_method\n\n def process(self, sentence, inp_type):\n return self.tokenize(sentence)\n\nclass NaiveNCharTokenizer(AbstractProcessor):\n def __init__(self, N=3):\n super(NaiveNCharTokenizer, self).__init__()\n self.N = N\n\n def process(self, sentence, inp_type):\n return [sentence[i:i+self.N] for i in range(0, len(sentence), self.N)]\n\nclass AddToVocab(AbstractLoopLevelTokenProcessor):\n def __init__(self, general_vocab_keys=['input', 'support']):\n super(AddToVocab, self).__init__()\n self.general_vocab_keys = set(general_vocab_keys)\n self.execution_state = set(['fit'])\n\n def process_token(self, token, inp_type):\n if inp_type == 'target':\n self.state['vocab']['general'].add_label(token)\n log.statistical('Example vocab target token {0}', 0.01, token)\n if inp_type in self.general_vocab_keys:\n self.state['vocab']['general'].add_token(token)\n message = 'Example vocab {0} token'.format(inp_type)\n log.statistical(message + ': {0}', 0.01, token)\n self.state['vocab'][inp_type].add_token(token)\n return token\n\nclass ToLower(AbstractProcessor):\n def __init__(self, exclude_keys=None):\n super(ToLower, self).__init__()\n self.exclude_keys = exclude_keys\n\n def process(self, token, inp_type):\n if self.exclude_keys is not None:\n if inp_type in self.exclude_keys:\n return token\n\n return token.lower()\n\n\nclass ConvertTokenToIdx(AbstractLoopLevelTokenProcessor):\n def __init__(self, keys2keys=None):\n super(ConvertTokenToIdx, self).__init__()\n self.keys2keys = keys2keys #maps key to other key, for example encode inputs with support vocabulary\n self.execution_state = set(['transform'])\n\n def process_token(self, token, inp_type):\n if not self.keys2keys is None and inp_type in self.keys2keys:\n return self.state['vocab'][self.keys2keys[inp_type]].get_idx(token)\n else:\n if inp_type != 'target':\n log.statistical('a non-label token {0}', 0.00001, token)\n return self.state['vocab']['general'].get_idx(token)\n else:\n log.statistical('a token {0}', 0.00001, token)\n return self.state['vocab']['general'].get_idx_label(token)\n\nclass ApplyFunction(AbstractProcessor):\n def __init__(self, func):\n super(ApplyFunction, self).__init__()\n self.func = func\n self.execution_state =['fit', 'transform']\n\n def process(self, data, inp_type):\n return self.func(data)\n\nclass SaveStateToList(AbstractProcessor):\n def __init__(self, name):\n super(SaveStateToList, self).__init__()\n self.name = name\n self.execution_state = set(['transform'])\n\n def link_with_pipeline(self, state):\n self.state = state\n if self.name not in self.state['data']:\n self.state['data'][self.name] = {}\n self.data = self.state['data'][self.name]\n\n def process(self, data, inp_type):\n if inp_type not in self.data: self.data[inp_type] = []\n self.data[inp_type].append(data)\n return data\n\nclass SaveLengthsToState(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self):\n super(SaveLengthsToState, self).__init__()\n self.execution_state = set(['fit'])\n\n def link_with_pipeline(self, state):\n self.state = state\n self.state['data']['lengths'] = {}\n self.data = self.state['data']['lengths']\n\n def process_list_of_tokens(self, tokens, inp_type):\n if inp_type not in self.data: self.data[inp_type] = []\n self.data[inp_type].append(int(len(tokens)))\n log.statistical('A list of tokens: {0}', 0.0001, tokens)\n log.debug_once('Pipeline {1}: A list of tokens: {0}', tokens, self.state['name'])\n return tokens\n\nclass Idx2MultiTargetConverter(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self, num_labels, stop_index=0):\n super(Idx2MultiTargetConverter, self).__init__()\n self.num_labels = num_labels\n self.stop_index = stop_index\n self.execution_state = set(['transform'])\n\n def process_list_of_tokens(self, tokens, inp_type):\n out = [0]*self.num_labels\n for col in tokens:\n if col == self.stop_index: break\n out[col] = 1\n\n return out\n\nclass SaveMaxLengthsToState(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self):\n super(SaveMaxLengthsToState, self).__init__()\n self.execution_state = set(['fit'])\n\n def link_with_pipeline(self, state):\n self.state = state\n self.state['data']['max_lengths'] = {}\n self.data = self.state['data']['max_lengths']\n\n def process_list_of_tokens(self, tokens, inp_type):\n if inp_type not in self.data: self.data[inp_type] = 0\n self.data[inp_type] = max(self.data[inp_type], len(tokens))\n return tokens\n\nclass StreamToHDF5(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self, name, samples_per_file=50000, keys=['input', 'support', 'target']):\n super(StreamToHDF5, self).__init__()\n self.execution_state = set(['transform'])\n self.max_length = None\n self.samples_per_file = samples_per_file\n self.name = name\n self.keys = copy.deepcopy(keys)\n if 'index' not in self.keys:\n self.keys.append('index')\n self.shard_id = {}\n self.max_lengths = {}\n self.data = {}\n self.datatypes = {}\n self.lengths = {}\n self.current_sample = {}\n self.idx = {}\n for key in self.keys:\n self.shard_id[key] = 0\n self.max_lengths[key] = 0\n self.data[key] = []\n self.datatypes[key] = None\n self.current_sample[key] = 0\n self.idx[key] = 0\n\n self.num_samples = None\n self.config = {'paths' : [], 'sample_count' : []}\n self.checked_for_lengths = False\n self.paths = {}\n self.shuffle_idx = None\n self.current_X = {}\n\n def link_with_pipeline(self, state):\n self.state = state\n self.base_path = join(self.state['path'], self.name)\n make_dirs_if_not_exists(self.base_path)\n\n def init_and_checks(self):\n if 'lengths' not in self.state['data']:\n log.error('Do a first pass to produce lengths first, that is use the \"SaveLengths\" ' \\\n 'processor, execute, clean processors, then rerun the pipeline with hdf5 streaming.')\n if self.num_samples == None:\n self.num_samples = len(self.state['data']['lengths'][self.keys[0]])\n log.debug('Using type int32 for inputs and supports for now, but this may not be correct in the future')\n self.checked_for_lengths = True\n self.num_samples = len(self.state['data']['lengths'][self.keys[0]])\n log.debug('Number of samples as calcualted with the length data (SaveLengthsToState): {0}', self.num_samples)\n\n def process_list_of_tokens(self, tokens, inp_type):\n if not self.checked_for_lengths:\n self.init_and_checks()\n\n if self.datatypes[inp_type] is None:\n if isinstance(tokens[0], float):\n self.datatypes[inp_type] = np.float32\n elif isinstance(tokens[0], int):\n self.datatypes[inp_type] = np.int32\n else:\n raise ValueError('Unsupported type: {0} for key {1}'.format(type(tokens[0]), inp_type))\n\n if self.max_lengths[inp_type] == 0:\n if 'max_lengths' in self.state['data']:\n max_length = self.state['data']['max_lengths'][inp_type]\n else:\n max_length = np.max(self.state['data']['lengths'][inp_type])\n log.debug('Calculated max length for input type {0} to be {1}', inp_type, max_length)\n self.max_lengths[inp_type] = max_length\n log.statistical('max length of the dataset: {0}', 0.0001, max_length)\n if inp_type not in self.current_X:\n self.current_X[inp_type] = np.zeros((self.samples_per_file, self.max_lengths[inp_type]), dtype=self.datatypes[inp_type])\n self.current_sample[inp_type] = 0\n self.current_X[inp_type][self.current_sample[inp_type], :len(tokens)] = tokens\n self.current_sample[inp_type] += 1\n\n if inp_type == self.keys[-2]:\n self.data['index'].append(self.idx[inp_type])\n self.idx[inp_type] += 1\n\n if (self.current_sample[inp_type] % self.samples_per_file == 0\n or self.idx[inp_type] == self.num_samples):\n if self.current_sample[inp_type] > 0:\n self.save_to_hdf5(inp_type)\n\n\n if self.idx[inp_type] % 10000 == 0:\n if self.idx[inp_type] % 50000 == 0:\n log.info('Processed {0} samples so far...', self.idx[inp_type])\n else:\n log.debug('Processed {0} samples so far...', self.idx[inp_type])\n\n if self.idx[inp_type] == self.num_samples:\n counts = np.array(self.config['sample_count'])\n log.debug('Counts for each shard: {0}'.format(counts))\n fractions = counts / np.float32(np.sum(counts))\n self.config['fractions'] = fractions.tolist()\n self.config['counts'] = counts.tolist()\n self.config['paths'] = []\n self.config['max_lengths'] = self.max_lengths\n for i in range(fractions.size):\n self.config['paths'].append(self.paths[i])\n\n pickle.dump(self.config, open(join(self.base_path, 'hdf5_config.pkl'), 'wb'), pickle.HIGHEST_PROTOCOL)\n\n return tokens\n\n def save_to_hdf5(self, inp_type):\n idx = self.shard_id[inp_type]\n if self.current_sample[inp_type] >= self.samples_per_file -1:\n X = self.current_X[inp_type]\n else:\n X = self.current_X[inp_type][:self.current_sample[inp_type]]\n file_name = inp_type + '_' + str(idx+1) + '.hdf5'\n\n if inp_type == 'input':\n #self.shuffle_idx = np.arange(X.shape[0])\n log.debug_once('First row of input data with shape {1} written to hdf5: {0}', X[0], X.shape)\n #X = X[self.shuffle_idx]\n log.debug('Writing hdf5 file for input type {0} to disk. Using index {1} and path {2}', inp_type, idx, join(self.base_path, file_name))\n log.debug('Writing hdf5 data. One sample row: {0}, shape: {1}, type: {2}', X[0], X.shape, X.dtype)\n save_data(join(self.base_path, file_name), X)\n if idx not in self.paths: self.paths[idx] = []\n self.paths[idx].append(join(self.base_path, file_name))\n\n if inp_type == self.keys[0]:\n log.statistical('Count of shard {0}; should be {1} most of the time'.format(X.shape[0], self.samples_per_file), 0.1)\n self.config['sample_count'].append(X.shape[0])\n\n if inp_type != self.keys[-2]:\n start = idx*self.samples_per_file\n end = (idx+1)*self.samples_per_file\n X_len = np.array(self.state['data']['lengths'][inp_type][start:end], dtype=np.int32)\n file_name_len = inp_type + '_lengths_' + str(idx+1) + '.hdf5'\n #X_len = X_len[self.shuffle_idx]\n save_data(join(self.base_path, file_name_len), X_len)\n self.paths[idx].append(join(self.base_path, file_name_len))\n else:\n start = idx*self.samples_per_file\n end = (idx+1)*self.samples_per_file\n X_len = np.array(self.state['data']['lengths'][inp_type][start:end], dtype=np.int32)\n file_name_len = inp_type + '_lengths_' + str(idx+1) + '.hdf5'\n #X_len = X_len[self.shuffle_idx]\n save_data(join(self.base_path, file_name_len), X_len)\n self.paths[idx].append(join(self.base_path, file_name_len))\n\n file_name_index = 'index_' + str(idx+1) + '.hdf5'\n index = np.arange(self.idx[inp_type] - X.shape[0], self.idx[inp_type], dtype=np.int32)\n #index = index[self.shuffle_idx]\n save_data(join(self.base_path, file_name_index), index)\n self.paths[idx].append(join(self.base_path, file_name_index))\n\n self.shard_id[inp_type] += 1\n self.current_X.pop(inp_type, None)\n self.current_sample[inp_type] = 0\n\n\n\n\nclass StreamToBatch(AbstractLoopLevelListOfTokensProcessor):\n def __init__(self, keys=['input', 'support', 'target'], seed=234234):\n super(StreamToBatch, self).__init__()\n self.execution_state = set(['transform'])\n self.str2var = {}\n self.str2samples = {}\n self.rdm = np.random.RandomState(seed)\n for key in keys:\n self.str2samples[key] = []\n\n def process_list_of_tokens(self, tokens, inp_type):\n self.str2samples[inp_type].append(tokens)\n return tokens\n\n def shuffle(self):\n idx = None\n for key in self.str2samples.keys():\n if idx is None:\n variable = self.str2samples[key]\n idx = np.arange(variable.shape[0])\n self.rdm.shuffle(idx)\n\n self.str2samples[key] = variable[idx]\n\n def get_data(self):\n for key, variable in self.str2samples.items():\n n = len(variable)\n lengths = [len(tokens) for tokens in variable]\n max_length = np.max(lengths)\n x = np.zeros((n, max_length))\n for row, (l, sample) in enumerate(zip(lengths, variable)):\n x[row,:l] = sample\n\n self.str2var[key] = x\n self.str2var[key + '_length'] = np.array(lengths)\n return self.str2var\n" ]
[ [ "numpy.arange", "numpy.max", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.sum", "numpy.random.RandomState" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dgketchum/etrm
[ "f74f5771fbc6ba5750a790e384eac422b598325a" ]
[ "zobs/orecharge/ETRM_distributed/ETRM_savAnMo_22APR16s.py" ]
[ "# ETRM - Evapotranspiration and Recharge Model, Point version, DISTRIBUTED\n# ETRM - Evapotranspiration and Recharge Model, Point version, DISTRIBUTED\n# David Ketchum, April 2016\nimport datetime\nimport calendar\nimport os\nfrom dateutil import rrule\nfrom osgeo import gdal\nimport numpy as np\n\nnp.set_printoptions(linewidth=700, precision=2)\n\nstartTime = datetime.datetime.now()\nprint(startTime)\ndef cells(array):\n window = array[480:510, 940:970]\n return window\n\n# Set start datetime object\nstart, end = datetime.datetime(2000, 1, 1), datetime.datetime(2013, 12, 31)\n# Define winter and summer for SNOW algorithm\nsWin, eWin = datetime.datetime(start.year, 11, 1), datetime.datetime(end.year, 3, 30)\n# Define monsoon for Ksat, presumed storm intensity\nsMon, eMon = datetime.datetime(start.year, 6, 1), datetime.datetime(start.year, 10, 1)\n\n# Read in static data as arrays\npath = 'C:\\\\Recharge_GIS\\\\OSG_Data\\\\current_use'\nraster = 'aws_mod_4_21_10_0'\naws_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\ntaw = np.array(aws_open.GetRasterBand(1).ReadAsArray(), dtype=float)\ndataset = aws_open\n# taw = aws[480:520, 940:980]\n# initialize ones and zeros arrays for use later\nmin_val = np.ones(taw.shape) * 0.001\ntaw = np.maximum(taw, min_val)\naws_open = []\n\nraster = 'nlcd_root_dpth_15apr'\nnlcd_rt_z_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\nnlcd_rt_z = np.array(nlcd_rt_z_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# nlcd_rt_z = nlcd_rt_z[480:520, 940:980]\nnlcd_rt_z = np.maximum(nlcd_rt_z, min_val)\nnlcd_rt_z_open = []\n\nraster = 'nlcd_plnt_hgt1_250_m_degraded1'\nnlcd_plt_hgt_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\nnlcd_plt_hgt = np.array(nlcd_plt_hgt_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# nlcd_plt_hgt = nlcd_plt_hgt[480:520, 940:980]\nnlcd_plt_hgt = np.maximum(nlcd_plt_hgt, min_val)\nnlcd_plt_hgt_open = []\n\nraster = 'Soil_Ksat_15apr' # convert from micrometer/sec to mm/day\nksat_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\nksat = np.array(ksat_open.GetRasterBand(1).ReadAsArray(), dtype=float) * 86.4\n# ksat = ksat[480:520, 940:980]\nksat1 = np.maximum(ksat, min_val)\nksat_open = []\n\nraster = 'tew_250_15apr'\ntew_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\ntew = np.array(tew_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# tew = tew[480:520, 940:980]\ntew = np.maximum(tew, min_val)\ntew_open = []\n\npath = 'C:\\\\Recharge_GIS\\\\Array_Results\\\\initialize'\nraster = 'de_4_19_23_11'\nde_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\nde1 = np.array(de_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# de = de[480:520, 940:980]\nde1 = np.where(np.isnan(de1) == True, np.zeros(taw.shape), de1)\nde_open = []\n\nraster = 'dr_4_19_23_11'\ndr_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\ndr1 = np.array(dr_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# dr = dr[480:520, 940:980]\ndr1 = np.where(np.isnan(dr1) == True, np.zeros(taw.shape), dr1)\ndr_open = []\n\nraster = 'drew_4_19_23_11'\ndrew_open = gdal.Open('{a}\\\\{b}.tif'.format(a=path, b=raster))\ndrew1 = np.array(drew_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n# dr = dr[480:520, 940:980]\ndrew1 = np.where(np.isnan(drew1) == True, np.zeros(taw.shape), drew1)\ndrew_open = []\n\n# Create indices to plot point time series, these are empty lists that will\n# be filled as the simulation progresses\npltRain = []\npltEta = []\npltSnow_fall = []\npltRo = []\npltDr = []\npltPdr = []\npltDe = []\npltDrew = []\npltTemp = []\npltTempM = []\npltDp_r = []\npltKs = []\npltEtrs = []\npltKcb = []\npltKe = []\npltMlt = []\npltSwe = []\npltDay = []\npltFs1 = []\npltPpt = []\npltKr = []\npltMass = []\n\np_mo_Et = np.zeros(taw.shape)\np_mo_Precip = np.zeros(taw.shape)\np_mo_Ro = np.zeros(taw.shape)\np_mo_deps = dr1 + de1 + drew1\np_mo_Infil = np.zeros(taw.shape)\np_mo_Etrs = np.zeros(taw.shape)\n\np_yr_Et = np.zeros(taw.shape)\np_yr_Precip = np.zeros(taw.shape)\np_yr_Ro = np.zeros(taw.shape)\np_yr_deps = dr1 + de1 + drew1\np_yr_Infil = np.zeros(taw.shape)\np_yr_Etrs = np.zeros(taw.shape)\n\ndp_r_mo = []\nref_et_mo = []\net_mo = []\nprecip_mo = []\nrunoff_mo = []\nsnow_ras_mo = []\ndelta_s_mo = []\n\ndp_r_yr = []\nref_et_yr = []\net_yr = []\nprecip_yr = []\nrunoff_yr = []\nsnow_ras_yr = []\ndelta_s_yr = []\n\n# Define user-controlled constants, these are constants to start with day one, replace\n# with spin-up data when multiple years are covered\nze = np.ones(taw.shape) * 40\np = np.ones(taw.shape) * 0.4\nkc_min = np.ones(taw.shape) * 0.15\ninfil = np.zeros(taw.shape)\nprecip = np.zeros(taw.shape)\nref_et = np.zeros(taw.shape)\nkr = np.zeros(taw.shape)\nks = np.zeros(taw.shape)\npKcb = np.zeros(taw.shape)\npKr = np.zeros(taw.shape)\npKs = np.zeros(taw.shape)\net = np.zeros(taw.shape)\nrunoff = np.zeros(taw.shape)\nppt_tom = np.zeros(taw.shape)\nfb = np.ones(taw.shape) * 0.25\nswe = np.zeros(taw.shape)\nke_max = 1.0\ntot_mass = np.zeros(taw.shape)\ncum_mass = np.zeros(taw.shape)\ntot_transp = np.zeros(taw.shape)\ntot_evap = np.zeros(taw.shape)\na_min = np.ones(taw.shape) * 0.45\na_max = np.ones(taw.shape) * 0.90\na = a_max\npA = a_min\n\nfor dday in rrule.rrule(rrule.DAILY, dtstart=start, until=end):\n if dday != start:\n pKcb = kcb\n doy = dday.timetuple().tm_yday\n print(\"Time : {a} day {b}_{c}\".format(a=str(datetime.datetime.now() - startTime), b=doy, c=dday.year))\n # NDVI to kcb\n if dday.year == 2000:\n path = 'F:\\\\NDVI\\\\NDVI_std_all'\n ras_list = os.listdir('F:\\\\NDVI\\\\NDVI_std_all')\n obj = [1, 49, 81, 113, 145, 177, 209, 241, 273, 305, 337]\n if doy < 49:\n strt = 1\n band = doy\n nd = 48\n raster = '{a}\\\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,\n b=str(strt).rjust(3, '0'),\n c=str(nd).rjust(3, '0'))\n ndvi_open = gdal.Open(raster)\n ndvi = np.array(ndvi_open.GetRasterBand(band).ReadAsArray(), dtype=float)\n ndvi_open = []\n kcb = ndvi * 1.25\n else:\n for num in obj[1:]:\n diff = doy - num\n if 0 <= diff <= 31:\n pos = obj.index(num)\n strt = obj[pos]\n band = diff + 1\n if num == 337:\n nd = num + 29\n else:\n nd = num + 31\n raster = '{a}\\\\T{b}_{c}_2000_etrf_subset_001_048_ndvi_daily.tif'.format(a=path,\n b=str(strt).rjust(3, '0'),\n c=str(nd).rjust(3, '0'))\n ndvi_open = gdal.Open(raster)\n ndvi = np.array(ndvi_open.GetRasterBand(band).ReadAsArray(), dtype=float)\n ndvi_open = []\n kcb = ndvi * 1.25\n\n elif dday.year == 2001:\n path = \"F:\\\\NDVI\\\\NDVI_std_all\"\n obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,\n 225, 241, 257, 273, 289, 305, 321, 337, 353]\n for num in obj:\n diff = doy - num\n if 0 <= diff <= 15:\n pos = obj.index(num)\n strt = obj[pos]\n band = diff + 1\n if num == 353:\n nd = num + 12\n else:\n nd = num + 15\n raster = '{a}\\\\{b}_{c}_{d}.tif'.format(a=path, b=dday.year, c=strt, d=nd)\n ndvi_open = gdal.Open(raster)\n ndvi = np.array(ndvi_open.GetRasterBand(band).ReadAsArray(), dtype=float)\n ndvi_open = []\n kcb = ndvi * 1.25\n\n else:\n path = \"F:\\\\NDVI\\\\NDVI_std_all\"\n obj = [1, 17, 33, 49, 65, 81, 97, 113, 129, 145, 161, 177, 193, 209,\n 225, 241, 257, 273, 289, 305, 321, 337, 353]\n for num in obj:\n diff = doy - num\n if 0 <= diff <= 15:\n pos = obj.index(num)\n strt = obj[pos]\n band = diff + 1\n if num == 353:\n nd = num + 12\n else:\n nd = num + 15\n raster = '{a}\\\\{b}_{c}.tif'.format(a=path, b=dday.year, c=pos+1, d=nd)\n ndvi_open = gdal.Open(raster)\n ndvi = np.array(ndvi_open.GetRasterBand(band).ReadAsArray(), dtype=float)\n ndvi_open = []\n kcb = ndvi * 1.25\n # kcb = kcb[480:520, 940:980]\n kcb = np.maximum(kcb, min_val)\n kcb = np.where(np.isnan(kcb) == True, pKcb, kcb)\n\n # PRISM to ppt\n # Remember to use the new PRISM!\n path = 'F:\\\\PRISM\\Precip\\\\800m_std_all'\n raster = '{a}\\\\PRISMD2_NMHW2mi_{b}{c}{d}.tif'.format(a=path, b=dday.year,\n c=str(dday.month).rjust(2, '0'),\n d=str(dday.day).rjust(2, '0'))\n ppt_open = gdal.Open(raster)\n ppt = np.array(ppt_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n\n dday_tom = dday + datetime.timedelta(days=1)\n raster_tom = '{a}\\\\PRISMD2_NMHW2mi_{b}{c}{d}.tif'.format(a=path, b=dday.year,\n c=str(dday_tom.month).rjust(2, '0'),\n d=str(dday_tom.day).rjust(2, '0'))\n ppt_tom_open = gdal.Open(raster_tom)\n ppt_tom = np.array(ppt_tom_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n ppt_open = []\n ppt_tom_open = []\n # ppt = ppt[480:520, 940:980]\n # ppt_tom = ppt_tom[480:520, 940:980]\n ppt = np.maximum(ppt, np.zeros(taw.shape))\n ppt_tom = np.maximum(ppt_tom, np.zeros(taw.shape))\n\n # PRISM to mintemp, maxtemp, temp\n if dday.year in [2000, 2001, 2003, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013]:\n path = \"F:\\\\PRISM\\\\Temp\\\\Minimum_standard\"\n raster = '{a}\\\\cai_tmin_us_us_30s_{b}{c}{d}.tif'.format(a=path, b=dday.year,\n c=str(dday.month).rjust(2, '0'),\n d=str(dday.day).rjust(2, '0'))\n minTemp_open = gdal.Open(raster)\n min_temp = np.array(minTemp_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n minTemp_open = []\n else:\n path = \"F:\\\\PRISM\\\\Temp\\\\Minimum_standard\"\n raster = '{a}\\\\TempMin_NMHW2Buff_{b}{c}{d}.tif'.format(a=path, b=dday.year,\n c=str(dday.month).rjust(2, '0'),\n d=str(dday.day).rjust(2, '0'))\n minTemp_open = gdal.Open(raster)\n min_temp = np.array(minTemp_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n minTemp_open = []\n path = \"F:\\\\PRISM\\\\Temp\\\\Maximum_standard\"\n raster = '{a}\\\\TempMax_NMHW2Buff_{b}{c}{d}.tif'.format(a=path, b=dday.year,\n c=str(dday.month).rjust(2, '0'),\n d=str(dday.day).rjust(2, '0'))\n maxTemp_open = gdal.Open(raster)\n max_temp = np.array(maxTemp_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n maxTemp_open = []\n\n temp = (min_temp + max_temp)/2\n\n # min_temp = min_temp[480:520, 940:980]\n # max_temp = max_temp[480:520, 940:980]\n # temp = temp[480:520, 940:980]\n\n # PM data to etrs\n path = \"F:\\\\PM_RAD\"\n raster = '{a}\\\\PM{d}\\\\PM_NM_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)\n etrs_open = gdal.Open(raster)\n etrs = np.array(etrs_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n etrs_open = []\n # etrs = etrs[480:520, 940:980]\n etrs = np.maximum(etrs, min_val)\n\n # Net Longwave Radiation Data is with the PM data\n path = \"F:\\\\PM_RAD\"\n raster = '{a}\\\\PM{d}\\\\RLIN_NM_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)\n rlin_open = gdal.Open(raster)\n rlin = np.array(rlin_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n rlin_open = []\n # rlin = rlin[480:520, 940:980]\n rlin = np.maximum(rlin, np.zeros(taw.shape))\n\n # Net Shortwave Radiation Data\n path = \"F:\\\\PM_RAD\"\n raster = '{a}\\\\rad{d}\\\\RTOT_{b}_{c}.tif'.format(a=path, b=dday.year, c=str(doy).rjust(3, '0'), d=dday.year)\n rg_open = gdal.Open(raster)\n rg = np.array(rg_open.GetRasterBand(1).ReadAsArray(), dtype=float)\n rg_open = []\n # rg = rg[480:520, 940:980]\n rg = np.maximum(rg, np.zeros(taw.shape))\n\n # ETRM Daily Run #######################################################################\n\n day_of_year = dday.timetuple().tm_yday\n if dday == start:\n # Total evaporable water is depth of water in the evaporable\n # soil layer, i.e., the water available to both stage 1 and 2 evaporation\n\n rew = np.minimum((2+(tew/3.)), 0.8 * tew)\n # del tew1, tew2\n\n # you should have all these from previous model runs\n pDr = dr1\n pDe = de1\n pDrew = drew1\n dr = dr1\n de = de1\n drew = drew1\n\n if sMon.timetuple().tm_yday <= dday.timetuple().tm_yday <= eMon.timetuple().tm_yday:\n ksat = ksat1 * 2/24.\n else:\n ksat = ksat1 * 6/24.\n\n kc_max_1 = kcb + 0.0001\n min_val = np.ones(taw.shape) * 0.0001\n kc_max = np.maximum(min_val, kc_max_1)\n # del kc_max_1\n\n nlcd_plt_hgt = nlcd_plt_hgt * 0.5 + 1\n numr = np.maximum(kcb - kc_min, min_val * 10)\n denom = np.maximum((kc_max - kc_min), min_val * 10)\n fcov_ref = (numr / denom) ** nlcd_plt_hgt\n fcov_min = np.minimum(fcov_ref, np.ones(taw.shape))\n fcov = np.maximum(fcov_min, min_val * 10)\n few = np.maximum(1 - fcov, 0.01) # exposed ground\n # del numr, denom, fcov_ref, fcov_min\n\n pKr = kr\n kr = np.minimum(((tew - de) / (tew - rew)), np.ones(taw.shape))\n kr = np.where(np.isnan(kr) == True, pKr, kr)\n\n pKs = ks\n ks_ref = np.where(((taw - pDr) / (0.6 * taw)) < np.zeros(taw.shape), np.ones(taw.shape) * 0.001,\n ((taw - pDr) / (0.6 * taw)))\n ks_ref = np.where(np.isnan(ks) == True, pKs, ks_ref)\n ks = np.minimum(ks_ref, np.ones(taw.shape))\n\n # Ke evaporation reduction coefficient; stage 1 evaporation\n fsa = np.where(np.isnan((rew - drew) / (ke_max * etrs)) == True, np.zeros(taw.shape), (rew - drew) / (ke_max * etrs))\n fsb = np.minimum(fsa, np.ones(taw.shape))\n fs1 = np.maximum(fsb, np.zeros(taw.shape))\n ke = np.where(drew < rew, np.minimum((fs1 + (1 - fs1) * kr) * (kc_max - ks * kcb), few * kc_max), np.zeros(taw.shape))\n\n transp = (ks * kcb) * etrs\n et_init = (ks * kcb + ke) * etrs\n eta = np.maximum(et_init, np.zeros(taw.shape))\n evap_init = ke * etrs\n evap_min = np.maximum(evap_init, np.zeros(taw.shape))\n evap = np.minimum(evap_min, kc_max)\n\n # Load temp, find swe, melt, and precipitation, load Ksat\n # Use SNOTEL data for precip and temps:\n # df_snow : (stel_date, stel_snow, stel_precip, stel_tobs, stel_tmax, stel_tmin, stel_tavg, stel_snwd)\n\n snow_fall = np.where(temp <= 0.0, ppt, np.zeros(taw.shape))\n rain = np.where(temp >= 0.0, ppt, np.zeros(taw.shape))\n\n pA = a\n a = np.where(snow_fall > 3.0, np.ones(taw.shape) * a_max, a)\n a = np.where(snow_fall <= 3.0, a_min + (pA - a_min) * np.exp(-0.12), a)\n a = np.where(snow_fall == 0.0, a_min + (pA - a_min) * np.exp(-0.05), a)\n a = np.where(a < a_min, a_min, a)\n\n swe += snow_fall\n\n mlt_init = np.maximum(((1 - a) * rg * 0.2) + (temp - 1.8) * 11.0, np.zeros(taw.shape)) # use calibrate coefficients\n mlt = np.minimum(swe, mlt_init)\n\n swe -= mlt\n\n # Find depletions\n pDr = dr\n pDe = de\n pDrew = drew\n watr = rain + mlt\n deps = dr + de + drew\n # print cells(watr)\n # print cells(deps)\n\n ro = np.zeros(taw.shape)\n ro = np.where(watr > ksat + deps, watr - ksat - deps, ro)\n ro = np.maximum(ro, np.zeros(taw.shape))\n # print cells(ro)\n\n dp_r = np.zeros(taw.shape)\n id1 = np.where(watr > deps, np.ones(taw.shape), np.zeros(taw.shape))\n id2 = np.where(ksat > watr - deps, np.ones(taw.shape), np.zeros(taw.shape))\n dp_r = np.where(id1 + id2 > 1.99, np.maximum(watr - deps, np.zeros(taw.shape)), dp_r)\n # print cells(dp_r)\n dp_r = np.where(watr > ksat + deps, ksat, dp_r)\n dp_r = np.maximum(dp_r, np.zeros(taw.shape))\n # print cells(dp_r)\n\n drew_1 = np.minimum((pDrew + ro + (evap - (rain + mlt))), rew)\n drew = np.maximum(drew_1, np.zeros(taw.shape))\n diff = np.maximum(pDrew - drew, np.zeros(taw.shape))\n\n de_1 = np.minimum((pDe + (evap - (rain + mlt - diff))), tew)\n de = np.maximum(de_1, np.zeros(taw.shape))\n diff = np.maximum(((pDrew - drew) + (pDe - de)), np.zeros(taw.shape))\n\n dr_1 = np.minimum((pDr + ((transp + dp_r) - (rain + mlt - diff))), taw)\n dr = np.maximum(dr_1, np.zeros(taw.shape))\n # dr = (pDr + dr_2) / 2.\n\n # Create cumulative rasters to show net over entire run\n\n infil += dp_r\n infil = np.maximum(infil, np.zeros(taw.shape))\n\n prev_et = et\n ref_et += etrs\n et = et + evap + transp\n et_ind = et / ref_et\n et = np.where(np.isnan(et) == True, prev_et, et)\n et = np.where(et > ref_et, ref_et / 2., et)\n et = np.maximum(et, np.ones(taw.shape) * 0.001)\n\n precip = precip + rain + snow_fall\n precip = np.maximum(precip, np.zeros(taw.shape))\n\n runoff += ro\n runoff = np.maximum(runoff, np.zeros(taw.shape))\n\n snow_ras = swe + snow_fall - mlt\n snow_ras = np.maximum(snow_ras, np.zeros(taw.shape))\n\n if dday.month == 5 and dday.day == 10 and dday.year == 2009:\n outputs = [dp_r, eta, etrs, ppt, ro, swe, deps]\n output_names = ['infil', 'et', 'etrs', 'precip', 'runoff', 'swe', 'deps']\n\n x = 0\n now = datetime.datetime.now()\n tag = 'saved_on_{}_{}'.format(now.month, now.day)\n for element in outputs:\n name = output_names[x]\n print(\"Saving {}_{}_{}_{}\".format(name, dday.day, dday.month, dday.year))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'C:\\\\Recharge_GIS\\\\Array_Results\\\\forPeter_3MAY\\\\{a}_{b}_{c}.tif'.format(a=name, b=dday.month, c=dday.year)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n if dday.month == 7 and dday.day == 29 and dday.year == 2009:\n outputs = [dp_r, eta, etrs, ppt, ro, swe, deps]\n output_names = ['infil', 'et', 'etrs', 'precip', 'runoff', 'swe', 'deps']\n\n x = 0\n now = datetime.datetime.now()\n tag = 'saved_on_{}_{}'.format(now.month, now.day)\n for element in outputs:\n name = output_names[x]\n print(\"Saving {}_{}_{}_{}\".format(name, dday.day, dday.month, dday.year))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'C:\\\\Recharge_GIS\\\\Array_Results\\\\forPeter_3MAY\\\\{a}_{b}_{c}.tif'.format(a=name, b=dday.month, c=dday.year)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n\n if dday.month == 5 and dday.day == 13 and dday.year == 2010:\n outputs = [dp_r, eta, etrs, ppt, ro, swe, deps]\n output_names = ['infil', 'et', 'etrs', 'precip', 'runoff', 'swe', 'deps']\n\n x = 0\n now = datetime.datetime.now()\n tag = 'saved_on_{}_{}'.format(now.month, now.day)\n for element in outputs:\n name = output_names[x]\n print(\"Saving {}_{}_{}_{}\".format(name, dday.day, dday.month, dday.year))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'C:\\\\Recharge_GIS\\\\Array_Results\\\\forPeter_3MAY\\\\{a}_{b}_{c}.tif'.format(a=name, b=dday.month, c=dday.year)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n # use monthrange check to find last day of each month and save rasters\n mo_date = calendar.monthrange(dday.year, dday.month)\n if dday.day == mo_date[1]:\n infil_mo = infil - p_mo_Infil\n infil_mo = np.maximum(infil_mo, np.zeros(taw.shape))\n\n ref_et_mo = etrs - p_mo_Etrs\n et_mo = et - p_mo_Et\n et_mo = np.where(np.isnan(et_mo) == True, p_mo_Et, et_mo)\n et_mo = np.where(et_mo > ref_et, ref_et / 2., et_mo)\n et_mo = np.maximum(et_mo, np.ones(taw.shape) * 0.001)\n\n precip_mo = precip - p_mo_Precip\n precip_mo = np.maximum(precip_mo, np.zeros(taw.shape))\n\n runoff_mo = ro - p_mo_Ro\n runoff_mo = np.maximum(runoff_mo, np.zeros(taw.shape))\n\n snow_ras_mo = swe\n snow_ras_mo = np.maximum(snow_ras_mo, np.zeros(taw.shape))\n\n mo_deps = drew + de + dr\n delta_s_mo = p_mo_deps - mo_deps\n\n outputs = [infil_mo, et_mo, precip_mo, runoff_mo, snow_ras_mo, delta_s_mo, mo_deps]\n output_names = ['infil', 'et', 'precip', 'runoff', 'snow_ras', 'delta_s_mo', 'mo_deps']\n\n x = 0\n now = datetime.datetime.now()\n tag = 'saved_on_{}_{}'.format(now.month, now.day)\n for element in outputs:\n name = output_names[x]\n print(\"Saving {a}_{b}_{c}\".format(a=name, b=dday.month, c=dday.year))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'F:\\\\Monthly_results\\\\{a}_{b}_{c}.tif'.format(a=name, b=dday.month, c=dday.year)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n\n p_mo_Et = et\n p_mo_Precip = precip\n p_mo_Ro = ro\n p_mo_deps = mo_deps\n p_mo_Infil = infil\n p_mo_Etrs = etrs\n\n if dday.day == 31 and dday.month == 12:\n infil_yr = infil - p_yr_Infil\n infil_yr = np.maximum(infil_yr, np.zeros(taw.shape))\n\n ref_et_yr = etrs - p_yr_Etrs\n et_yr = et - p_yr_Et\n et_yr = np.where(np.isnan(et_yr) == True, p_yr_Et, et_yr)\n et_yr = np.where(et_yr > ref_et, ref_et / 2., et_yr)\n et_yr = np.maximum(et_yr, np.ones(taw.shape) * 0.001)\n\n precip_yr = precip - p_yr_Precip\n precip_yr = np.maximum(precip_yr, np.zeros(taw.shape))\n\n runoff_yr = ro - p_yr_Ro\n runoff_yr = np.maximum(runoff_yr, np.zeros(taw.shape))\n\n snow_ras_yr = swe\n snow_ras_yr = np.maximum(snow_ras_yr, np.zeros(taw.shape))\n\n yr_deps = drew + de + dr\n delta_s_yr = p_yr_deps - yr_deps\n\n outputs = [infil_yr, et_yr, precip_yr, runoff_yr, snow_ras_yr, delta_s_yr, yr_deps]\n output_names = ['infil', 'et', 'precip', 'runoff', 'snow_ras', 'delta_s_yr', 'yr_deps']\n\n x = 0\n for element in outputs:\n name = output_names[x]\n print(\"Saving {a}_{c}\".format(a=name, c=dday.year))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'F:\\\\Annual_results\\\\{a}_{c}.tif'.format(a=name, c=dday.year)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n\n p_yr_Et = et\n p_yr_Precip = precip\n p_yr_Ro = ro\n p_mo_deps = yr_deps\n p_yr_Infil = infil\n p_yr_Etrs = etrs\n\n # Check MASS BALANCE for the love of WATER!!!\n mass = rain + mlt - (ro + transp + evap + dp_r + ((pDr - dr) + (pDe - de) + (pDrew - drew)))\n tot_mass += abs(mass)\n cum_mass += mass\n print(mass[480, 940])\n print(tot_mass[480, 940])\n\n pltDay.append(dday)\n pltRain.append(rain[480, 940])\n pltEta.append(eta[480, 940])\n pltSnow_fall.append(snow_fall[480, 940])\n pltRo.append(ro[480, 940])\n pltDr.append(dr[480, 940])\n pltDe.append(de[480, 940])\n pltDrew.append(drew[480, 940])\n pltTemp.append(temp[480, 940])\n pltDp_r.append(dp_r[480, 940])\n pltKs.append(ks[480, 940])\n pltPdr.append(pDr[480, 940])\n pltEtrs.append(etrs[480, 940])\n pltKcb.append(kcb[480, 940])\n pltPpt.append(ppt[480, 940])\n pltKe.append(ke[480, 940])\n pltKr.append(kr[480, 940])\n pltMlt.append(mlt[480, 940])\n pltSwe.append(swe[480, 940])\n pltTempM.append(max_temp[480, 940])\n pltFs1.append(fs1[480, 940])\n pltMass.append(mass[480, 940])\n\n\n# fdata = np.column_stack((pltSnow_fall, pltRain, pltMlt, pltEta, pltRo, pltDp_r, pltDr, pltDe, pltDrew, pltMass))\n# np.savetxt('C:\\\\Recharge_GIS\\\\Array_Results\\\\array_records\\\\10apr16_ETRM_mass.csv',\n# fdata, fmt=['%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f', '%1.3f'],\n# delimiter=',')\n\noutputs = [infil, et, precip, runoff, snow_ras, tot_mass, cum_mass, dr, de, drew, taw, tew, rew]\noutput_names = ['infil', 'et', 'precip', 'runoff', 'snow_ras', 'tot_mass', 'cum_mass', 'dr', 'de', 'drew', 'taw', 'tew',\n 'rew']\nx = 0\nnow = datetime.datetime.now()\ntag = '{}_{}_{}_{}'.format(now.month, now.day, now.hour, now.minute)\nfor element in outputs:\n name = output_names[x]\n print(\"Saving {a}\".format(a=name))\n driver = gdal.GetDriverByName('GTiff')\n filename = 'F:\\\\ETRM_14yr_results\\\\{a}.tif'.format(a=name)\n cols = dataset.RasterXSize\n rows = dataset.RasterYSize\n bands = dataset.RasterCount\n band = dataset.GetRasterBand(1)\n datatype = band.DataType\n outDataset = driver.Create(filename, cols, rows, bands, datatype)\n geoTransform = dataset.GetGeoTransform()\n outDataset.SetGeoTransform(geoTransform)\n proj = dataset.GetProjection()\n outDataset.SetProjection(proj)\n outBand = outDataset.GetRasterBand(1)\n outBand.WriteArray(element, 0, 0)\n x += 1\n\n\n\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.isnan", "numpy.set_printoptions", "numpy.ones", "numpy.exp", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
annis/cosmosis
[ "55efc1bc2260ca39298c584ae809fa2a8e72a38e" ]
[ "cosmosis/samplers/emcee/emcee_sampler.py" ]
[ "from .. import ParallelSampler, sample_ellipsoid\nimport numpy as np\nimport sys\n\n\ndef log_probability_function(p):\n r = emcee_pipeline.run_results(p)\n return r.post, (r.prior, r.extra)\n\n\nclass EmceeSampler(ParallelSampler):\n parallel_output = False\n supports_resume = True\n sampler_outputs = [(\"prior\", float), (\"post\", float)]\n\n def config(self):\n global emcee_pipeline\n emcee_pipeline = self.pipeline\n\n if self.is_master():\n import emcee\n self.emcee = emcee\n\n self.emcee_version = int(self.emcee.__version__[0])\n\n\n # Parameters of the emcee sampler\n self.nwalkers = self.read_ini(\"walkers\", int, 2)\n self.samples = self.read_ini(\"samples\", int, 1000)\n self.nsteps = self.read_ini(\"nsteps\", int, 100)\n\n assert self.nsteps>0, \"You specified nsteps<=0 in the ini file - please set a positive integer\"\n assert self.samples>0, \"You specified samples<=0 in the ini file - please set a positive integer\"\n\n random_start = self.read_ini(\"random_start\", bool, False)\n start_file = self.read_ini(\"start_points\", str, \"\")\n covmat_file = self.read_ini(\"covmat\", str, \"\")\n self.ndim = len(self.pipeline.varied_params)\n\n #Starting positions and values for the chain\n self.num_samples = 0\n self.prob0 = None\n self.blob0 = None\n\n if start_file:\n self.p0 = self.load_start(start_file)\n self.output.log_info(\"Loaded starting position from %s\", start_file)\n elif self.distribution_hints.has_cov():\n center = self.start_estimate()\n cov = self.distribution_hints.get_cov()\n self.p0 = sample_ellipsoid(center, cov, size=self.nwalkers)\n self.output.log_info(\"Generating starting positions from covmat from earlier in pipeline\")\n elif covmat_file:\n center = self.start_estimate()\n cov = self.load_covmat(covmat_file)\n self.output.log_info(\"Generating starting position from covmat in %s\", covmat_file)\n iterations_limit = 100000\n n=0\n p0 = []\n for i in range(iterations_limit):\n p = self.emcee.utils.sample_ellipsoid(center, cov)[0]\n if np.isfinite(self.pipeline.prior(p)):\n p0.append(p)\n if len(p0)==self.nwalkers:\n break\n else:\n raise ValueError(\"The covmat you used could not generate points inside the prior\")\n self.p0 = np.array(p0)\n elif random_start:\n self.p0 = [self.pipeline.randomized_start()\n for i in range(self.nwalkers)]\n self.output.log_info(\"Generating random starting positions from within prior\")\n else:\n center_norm = self.pipeline.normalize_vector(self.start_estimate())\n sigma_norm=np.repeat(1e-3, center_norm.size)\n p0_norm = self.emcee.utils.sample_ball(center_norm, sigma_norm, size=self.nwalkers)\n p0_norm[p0_norm<=0] = 0.001\n p0_norm[p0_norm>=1] = 0.999\n self.p0 = [self.pipeline.denormalize_vector(p0_norm_i) for p0_norm_i in p0_norm]\n self.output.log_info(\"Generating starting positions in small ball around starting point\")\n\n #Finally we can create the sampler\n self.ensemble = self.emcee.EnsembleSampler(self.nwalkers, self.ndim,\n log_probability_function,\n pool=self.pool)\n\n def resume(self):\n if self.output.resumed:\n data = np.genfromtxt(self.output._filename, invalid_raise=False)[:, :self.ndim]\n num_samples = len(data) // self.nwalkers\n self.p0 = data[-self.nwalkers:]\n self.num_samples += num_samples\n if self.num_samples >= self.samples:\n print(\"You told me to resume the chain - it has already completed (with {} samples), so sampling will end.\".format(len(data)))\n print(\"Increase the 'samples' parameter to keep going.\")\n else:\n print(\"Continuing emcee from existing chain - have {} samples already\".format(len(data)))\n\n def load_start(self, filename):\n #Load the data and cut to the bits we need.\n #This means you can either just use a test file with\n #starting points, or an emcee output file.\n data = np.genfromtxt(filename, invalid_raise=False)[-self.nwalkers:, :self.ndim]\n if data.shape != (self.nwalkers, self.ndim):\n raise RuntimeError(\"There are not enough lines or columns \"\n \"in the starting point file %s\" % filename)\n return list(data)\n\n\n def load_covmat(self, covmat_file):\n covmat = np.loadtxt(covmat_file)\n\n if covmat.ndim == 0:\n covmat = covmat.reshape((1, 1))\n elif covmat.ndim == 1:\n covmat = np.diag(covmat ** 2)\n\n nparams = len(self.pipeline.varied_params)\n if covmat.shape != (nparams, nparams):\n raise ValueError(\"The covariance matrix was shape (%d x %d), \"\n \"but there are %d varied parameters.\" %\n (covmat.shape[0], covmat.shape[1], nparams))\n return covmat\n\n\n def output_samples(self, pos, prob, extra_info):\n for params, post, extra in zip(pos,prob,extra_info):\n prior, extra = extra \n self.output.parameters(params, extra, prior, post)\n\n def execute(self):\n #Run the emcee sampler.\n if self.num_samples == 0:\n print(\"Begun sampling\")\n outputs = []\n if self.emcee_version < 3:\n kwargs = dict(lnprob0=self.prob0, blobs0=self.blob0, \n iterations=self.nsteps, storechain=False)\n else:\n # In emcee3 we have to enable storing the chain because\n # we want the acceptance fraction. Also the name of one\n # of the parameters has changed.\n kwargs = dict(log_prob0=self.prob0, blobs0=self.blob0, \n iterations=self.nsteps, store=True)\n\n for (pos, prob, rstate, extra_info) in self.ensemble.sample(self.p0, **kwargs):\n outputs.append((pos.copy(), prob.copy(), np.copy(extra_info)))\n \n for (pos, prob, extra_info) in outputs:\n self.output_samples(pos, prob, extra_info)\n\n #Set the starting positions for the next chunk of samples\n #to the last ones for this chunk\n self.p0 = pos\n self.prob0 = prob\n self.blob0 = extra_info\n self.num_samples += self.nsteps\n acceptance_fraction = self.ensemble.acceptance_fraction.mean()\n print(\"Done {} iterations of emcee. Acceptance fraction {:.3f}\".format(\n self.num_samples, acceptance_fraction))\n sys.stdout.flush()\n self.output.final(\"mean_acceptance_fraction\", acceptance_fraction)\n\n def is_converged(self):\n return self.num_samples >= self.samples\n" ]
[ [ "numpy.diag", "numpy.genfromtxt", "numpy.copy", "numpy.repeat", "numpy.array", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kventinel/Practical_DL
[ "a2e427ce207e9869260daadfe758e2926a007f3f" ]
[ "_under_construction/week7/pretrained_lenet.py" ]
[ "from lasagne.layers import InputLayer\nfrom lasagne.layers import DenseLayer\nfrom lasagne.layers import ConcatLayer\nfrom lasagne.layers import NonlinearityLayer\nfrom lasagne.layers import GlobalPoolLayer\nfrom lasagne.layers import Conv2DLayer as ConvLayer\nfrom lasagne.layers import MaxPool2DLayer as PoolLayerDNN\nfrom lasagne.layers import MaxPool2DLayer as PoolLayer\nfrom lasagne.layers import LocalResponseNormalization2DLayer as LRNLayer\nfrom lasagne.nonlinearities import softmax, linear\n\n\ndef build_inception_module(name, input_layer, nfilters):\n # nfilters: (pool_proj, 1x1, 3x3_reduce, 3x3, 5x5_reduce, 5x5)\n net = {}\n net['pool'] = PoolLayerDNN(input_layer, pool_size=3, stride=1, pad=1)\n net['pool_proj'] = ConvLayer(net['pool'], nfilters[0], 1)\n\n net['1x1'] = ConvLayer(input_layer, nfilters[1], 1)\n\n net['3x3_reduce'] = ConvLayer(input_layer, nfilters[2], 1)\n net['3x3'] = ConvLayer(net['3x3_reduce'], nfilters[3], 3, pad=1)\n\n net['5x5_reduce'] = ConvLayer(input_layer, nfilters[4], 1)\n net['5x5'] = ConvLayer(net['5x5_reduce'], nfilters[5], 5, pad=2)\n\n net['output'] = ConcatLayer([\n net['1x1'],\n net['3x3'],\n net['5x5'],\n net['pool_proj'],\n ])\n\n return {'{}/{}'.format(name, k): v for k, v in net.items()}\n\n\ndef build_model():\n net = {}\n net['input'] = InputLayer((None, 3, None, None))\n net['conv1/7x7_s2'] = ConvLayer(net['input'], 64, 7, stride=2, pad=3)\n net['pool1/3x3_s2'] = PoolLayer(net['conv1/7x7_s2'],\n pool_size=3,\n stride=2,\n ignore_border=False)\n net['pool1/norm1'] = LRNLayer(net['pool1/3x3_s2'], alpha=0.00002, k=1)\n net['conv2/3x3_reduce'] = ConvLayer(net['pool1/norm1'], 64, 1)\n net['conv2/3x3'] = ConvLayer(net['conv2/3x3_reduce'], 192, 3, pad=1)\n net['conv2/norm2'] = LRNLayer(net['conv2/3x3'], alpha=0.00002, k=1)\n net['pool2/3x3_s2'] = PoolLayer(net['conv2/norm2'], pool_size=3, stride=2)\n\n net.update(build_inception_module('inception_3a',\n net['pool2/3x3_s2'],\n [32, 64, 96, 128, 16, 32]))\n net.update(build_inception_module('inception_3b',\n net['inception_3a/output'],\n [64, 128, 128, 192, 32, 96]))\n net['pool3/3x3_s2'] = PoolLayer(net['inception_3b/output'],\n pool_size=3, stride=2)\n\n net.update(build_inception_module('inception_4a',\n net['pool3/3x3_s2'],\n [64, 192, 96, 208, 16, 48]))\n net.update(build_inception_module('inception_4b',\n net['inception_4a/output'],\n [64, 160, 112, 224, 24, 64]))\n net.update(build_inception_module('inception_4c',\n net['inception_4b/output'],\n [64, 128, 128, 256, 24, 64]))\n net.update(build_inception_module('inception_4d',\n net['inception_4c/output'],\n [64, 112, 144, 288, 32, 64]))\n net.update(build_inception_module('inception_4e',\n net['inception_4d/output'],\n [128, 256, 160, 320, 32, 128]))\n net['pool4/3x3_s2'] = PoolLayer(net['inception_4e/output'],\n pool_size=3, stride=2)\n\n net.update(build_inception_module('inception_5a',\n net['pool4/3x3_s2'],\n [128, 256, 160, 320, 32, 128]))\n net.update(build_inception_module('inception_5b',\n net['inception_5a/output'],\n [128, 384, 192, 384, 48, 128]))\n\n net['pool5/7x7_s1'] = GlobalPoolLayer(net['inception_5b/output'])\n net['loss3/classifier'] = DenseLayer(net['pool5/7x7_s1'],\n num_units=1000,\n nonlinearity=linear)\n net['prob'] = NonlinearityLayer(net['loss3/classifier'],\n nonlinearity=softmax)\n return net\n\n\nimport skimage.transform\nimport numpy as np\nMEAN_VALUES = np.array([104, 117, 123]).reshape((3,1,1))\ndef preprocess(im):\n if len(im.shape) == 2:\n im = im[:, :, np.newaxis]\n im = np.repeat(im, 3, axis=2)\n # Resize so smallest dim = 224, preserving aspect ratio\n h, w, _ = im.shape\n if h < w:\n im = skimage.transform.resize(im, (224, w*224//h), preserve_range=True)\n else:\n im = skimage.transform.resize(im, (h*224//w, 224), preserve_range=True)\n\n # Central crop to 224x224\n h, w, _ = im.shape\n im = im[h//2-112:h//2+112, w//2-112:w//2+112]\n \n rawim = np.copy(im).astype('uint8')\n \n # Shuffle axes to c01\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n \n # Convert to BGR\n im = im[::-1, :, :]\n\n im = im - MEAN_VALUES\n return im[np.newaxis].astype('float32')\n" ]
[ [ "numpy.copy", "numpy.repeat", "numpy.array", "numpy.swapaxes" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Freyr-Wings/cse252c_hw3-release
[ "8299b52ffa39195edacddb682a03f8a4b548fbac" ]
[ "Segmentation/dataLoader.py" ]
[ "import torch\nimport numpy as np\nimport os.path as osp\nimport random\nfrom torch.utils.data import Dataset\nfrom PIL import Image\nimport cv2\n\n\nclass BatchLoader(Dataset):\n def __init__(self, imageRoot, labelRoot, fileList, imWidth=None, imHeight=None, numClasses=21):\n super(BatchLoader, self).__init__()\n\n self.imageRoot = imageRoot\n self.labelRoot = labelRoot\n self.fileList = fileList\n\n with open(fileList, 'r') as fIn:\n imgNames = fIn.readlines()\n imgNames = [x.strip() for x in imgNames]\n imgNames = sorted(imgNames)\n\n self.imgNames = [osp.join(imageRoot, x + '.jpg') for x in imgNames]\n self.labelNames = [osp.join(labelRoot, x + '.png') for x in imgNames]\n\n self.count = len(self.imgNames)\n self.perm = list(range(self.count))\n random.shuffle(self.perm)\n print('Image Num: %d' % self.count)\n\n # If image height and width are None\n # do not do any cropping\n self.imHeight = imHeight\n self.imWidth = imWidth\n\n # MEAN and std of image\n self.imMean = np.array([0.485, 0.456, 0.406], dtype=np.float32)\n self.imStd = np.array([0.229, 0.224, 0.225], dtype=np.float32)\n\n self.imMean = self.imMean.reshape([1, 1, 3])\n self.imStd = self.imStd.reshape([1, 1, 3])\n self.numClasses = numClasses\n\n self.iterCount = 0\n\n def __len__(self):\n return self.count\n\n def __getitem__(self, ind):\n\n imName = self.imgNames[self.perm[ind]]\n labelName = self.labelNames[self.perm[ind]]\n\n im = self.loadImage(imName)\n label, labelIndex, mask = self.loadLabel(labelName)\n\n # If image size is given, randomly crop the images\n if not (self.imHeight is None or self.imWidth is None):\n nrows, ncols = im.shape[1], im.shape[2]\n gapH = (nrows - self.imHeight)\n gapW = (ncols - self.imWidth)\n rs = int(np.round(np.random.random() * gapH))\n cs = int(np.round(np.random.random() * gapW))\n\n im = im[:, rs:rs + self.imHeight, cs:cs + self.imWidth]\n label = label[:, rs:rs + self.imHeight, cs:cs + self.imWidth]\n labelIndex = labelIndex[:, rs:rs + self.imHeight, cs:cs + self.imWidth]\n mask = mask[:, rs:rs + self.imHeight, cs:cs + self.imWidth]\n\n ## Load data\n # im: input immage batch, Nx3ximHeightximWidth\n # label: binary label of 21 classe, Nx21ximHeightximWidth\n # labelIndex: label of 21 classes, Nx1ximHeightximWidth\n # mask: mask of valid region, Nx1ximHeightximWidth\n\n batchDict = {\n 'im': im,\n 'label': label,\n 'labelIndex': labelIndex,\n 'mask': mask\n }\n return batchDict\n\n def loadImage(self, imName):\n # Load inpute image\n\n im = Image.open(imName)\n im = np.asarray(im)\n\n nrows, ncols = im.shape[0], im.shape[1]\n if not (self.imHeight is None or self.imWidth is None):\n if nrows < self.imHeight or ncols < self.imWidth:\n scaleRow = float(nrows) / float(self.imHeight)\n scaleCol = float(ncols) / float(self.imWidth)\n if scaleRow > scaleCol:\n ncols = int(np.ceil(ncols / scaleCol))\n nrows = int(np.ceil(nrows / scaleCol))\n else:\n ncols = int(np.ceil(ncols / scaleRow))\n nrows = int(np.ceil(nrows / scaleRow))\n im = cv2.resize(im, (ncols, nrows), interpolation=cv2.INTER_LINEAR)\n\n if len(im.shape) == 2:\n print('Warning: load a gray image')\n im = im[:, :, np.newaxis]\n im = np.concatenate([im, im, im], axis=2)\n im = im.astype(np.float32) / 255.0\n\n im = (im - self.imMean) / self.imStd\n im = im.transpose([2, 0, 1])\n return im\n\n def loadLabel(self, labelName):\n # Load ground-truth label\n\n labelIndex = Image.open(labelName)\n labelIndex = np.array(labelIndex)\n assert (len(labelIndex.shape) == 2)\n\n nrows, ncols = labelIndex.shape[0], labelIndex.shape[1]\n if not (self.imHeight is None or self.imWidth is None):\n if nrows < self.imHeight or ncols < self.imWidth:\n scaleRow = float(nrows) / float(self.imHeight)\n scaleCol = float(ncols) / float(self.imWidth)\n if scaleRow > scaleCol:\n ncols = int(np.ceil(ncols / scaleCol))\n nrows = int(np.ceil(nrows / scaleCol))\n else:\n ncols = int(np.ceil(ncols / scaleRow))\n nrows = int(np.ceil(nrows / scaleRow))\n\n labelIndex = cv2.resize(labelIndex, (ncols, nrows), interpolation=cv2.INTER_NEAREST)\n\n labelIndex = labelIndex.astype(np.int64)\n\n nrows, ncols = labelIndex.shape[0], labelIndex.shape[1]\n xIndex, yIndex = np.meshgrid(np.arange(0, ncols), np.arange(0, nrows))\n xIndex, yIndex, labelIndex = \\\n xIndex.astype(np.int32), yIndex.astype(np.int32), labelIndex.astype(np.int32)\n\n mask = (labelIndex != 255).astype(np.float32)[np.newaxis, :, :]\n labelIndex[labelIndex == 255] = 0\n labelIndex = labelIndex[np.newaxis, :, :]\n\n label = np.zeros([self.numClasses, nrows, ncols], dtype=np.float32)\n label[labelIndex.flatten(), yIndex.flatten(), xIndex.flatten()] = 1.0\n label = label * mask\n\n return label, labelIndex, mask\n" ]
[ [ "numpy.random.random", "numpy.asarray", "numpy.arange", "numpy.concatenate", "numpy.ceil", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maksimt/distr-nmf
[ "e7e3bb3cb619cf1b75a01d0948921bd7d1102901" ]
[ "tests/test_tasks.py" ]
[ "import pytest\nimport numpy as np\nimport os\nfrom distr_nmf.src import tasks_nmf\nfrom distr_nmf.src.exec_config import log_mpc_filename\nimport luigi\nfrom matrixops.transform import normalize, tfidf\nfrom rri_nmf import nmf\n\n\ndef _gen_random_mat(n, d, density, random_seed=0, nnz_per_row=1):\n np.random.seed(random_seed)\n X = np.zeros((n, d))\n for i in range(n):\n J = np.random.choice(d, size=(1, nnz_per_row), replace=False)\n X[i, J] = 1\n # X[np.indices(I_nz.shape)[0], I_nz] = 1\n\n density -= float(nnz_per_row) / d\n if density > 0:\n X = X + _mask(np.random.rand(n, d), density)\n return X\n\n\ndef _mask(X, density):\n M = np.random.rand(*X.shape)\n X[M >= density] = 0\n return X\n\n\n\n\n\ndef test_correct_hash(n=100, d=25, seed=0, M=1, n_iter=2):\n \"\"\"This test may break if new parameters are added to tasks but they are\n not added to the parameter dicts used for testing; this is mostly useful\n to make sure the other tests can still run\"\"\"\n X = _gen_random_mat(n, d, 0.1, random_seed=seed)\n idf = True\n tasks_nmf.remove_intermediate = False\n\n X_fn = '/tmp/X.npy'\n np.save('/tmp/X.npy', X)\n\n\n K = 2\n w_row_sum = 1\n\n nmf_params = {\n \"reg_w_l1\": 0.0, \"project_W_each_iter\": True, \"random_seed\": 0,\n \"reg_w_l2\": 0.0, \"reg_t_l2\": 0.0, \"k\": K, \"reg_t_l1\": 0.0,\n \"project_T_each_iter\": True, \"agg\": \"double_precision_nonprivate\",\n \"init\": \"random\", \"t_row_sum\": 1.0, \"idf\": idf,\n \"reset_topic_method\": \"random\",\n \"w_row_sum\": w_row_sum\n }\n dataset_params = {\n \"M\": M, \"d\": d, \"dataset_name\": X_fn, \"n\": n,\n 'execution_mode': 'local'\n }\n\n Ws = tasks_nmf.GetWeights(dataset_params=dataset_params,\n nmf_params=nmf_params,\n n_iter=1,\n topic_num=K - 1,\n group_id=0\n )\n\n assert Ws.output().path.split('/')[-1] == 'GetWeights__417225404975075603'\n\n\n# TODO: add fixtures and break down into multiple tests\[email protected](('n', 'd', 'seed', 'M', 'n_iter', 'mode'),\n [\n (100, 25, 0, 1, 2, 'local'),\n (100, 25, 0, 1, 2, 'mock_distr_MPC'),\n (20, 25, 0, 3, 2, 'local'),\n (21, 26, 1, 5, 2, 'local')\n ])\ndef test_distr_matches_centralized(n, d, seed, M, n_iter, mode):\n X = _gen_random_mat(n, d, 0.1, random_seed=seed)\n idf = True\n tasks_nmf.remove_intermediate = False\n\n X_fn = '/tmp/X.npy'\n np.save('/tmp/X.npy', X)\n\n for fn in os.listdir('/tmp/'):\n if fn.startswith('Gen') or fn.startswith('Get') or fn.startswith('Agg'):\n try:\n os.remove('/tmp/' + fn)\n except OSError as e:\n raise e\n K = 2\n w_row_sum = 1\n compare_W = True\n\n if idf:\n X = tfidf(X)\n X = normalize(X)\n\n LocalNMFTaks = tasks_nmf.MultiWorkerNMF(dataset_name=X_fn,\n k=K,\n n_iter=n_iter,\n idf=idf,\n M=M,\n execution_mode=mode)\n luigi.build([LocalNMFTaks], local_scheduler=True)\n\n nmf_params = {\n \"reg_w_l1\": 0.0, \"project_W_each_iter\": True, \"random_seed\": 0,\n \"reg_w_l2\": 0.0, \"reg_t_l2\": 0.0, \"k\": K, \"reg_t_l1\": 0.0,\n \"project_T_each_iter\": True, \"agg\": \"double_precision_nonprivate\",\n \"init\": \"random\", \"t_row_sum\": 1.0, \"idf\": idf,\n \"reset_topic_method\": \"random\",\n \"w_row_sum\": w_row_sum\n }\n dataset_params = {\n \"M\": M, \"d\": d, \"dataset_name\": X_fn, \"n\": n,\n 'execution_mode': mode\n }\n\n for it in range(1, n_iter):\n GT = tasks_nmf.GetTopics(nmf_params=nmf_params,\n dataset_params=dataset_params,\n n_iter=it, topic_num=K - 1)\n if compare_W:\n if it >= 0:\n W = np.zeros((n, K))\n for m in range(dataset_params['M']):\n Ws = tasks_nmf.GetWeights(dataset_params=dataset_params,\n nmf_params=nmf_params,\n n_iter=it,\n topic_num=K - 1,\n group_id=m\n )\n with Ws.output().open() as f:\n W_I = np.load(f)\n I = range(m, n, dataset_params['M'])\n W[I, :] = W_I\n\n with GT.output().open() as f:\n T = np.load(f)\n base_nmf_soln = nmf.nmf(X, K, max_iter=it, init='random',\n random_state=0, debug=0,\n reset_topic_method='random',\n fix_reset_seed=True,\n negative_denom_correction=False,\n project_W_each_iter=True,\n w_row_sum=w_row_sum) # ,\n if compare_W:\n assert np.allclose(W, base_nmf_soln['W'])\n assert np.allclose(T, base_nmf_soln['T'])\n\ndef test_MPC_logging():\n #TODO: this depends on being run after test_distr_matches_centralized();\n #TODO: we should use fixtures instead\n with open('/var/log/largenmf_MPC.log') as f:\n s = f.read()\n assert s.count('sending') + s.count('receiving') == 16\n" ]
[ [ "numpy.allclose", "numpy.random.seed", "numpy.random.choice", "numpy.save", "numpy.random.rand", "numpy.load", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
veugene/data_tools
[ "6b590bee65e69ea1e88b92cb67b360f3ec3c3c85" ]
[ "data_tools/wrap.py" ]
[ "import warnings\nimport numpy as np\n\n\nclass delayed_view(object):\n \"\"\"\n Given an array, create a view into that array without preloading the viewed\n data into memory. Data is loaded as needed when indexing into the\n delayed_view.\n \n Indexing is numpy-style, using any combination of integers, slices, index\n lists, ellipsis (only one, as with numpy), and boolean arrays but not \n non-boolean multi-dimensional arrays. Note that the indexing style is also\n used on the underlying data sources so those data sources must support the\n style of indexing used with a multi_source_array object; use simple\n indexing with integers and slices (eg. obj[0,3:10]) when unsure.\n \n Adding dimensions to the output just by indexing is not supported. This\n means that unlike with numpy, indexing cannot be done with `None` or\n `numpy.newaxis`; also, for example, an array A with shape (4,5) can be\n indexed as A[[0,1]] and A[[[0,1]]] (these are equivalent) but not as\n A[[[[0,1]]]] for which numpy would add a dimension to the output.\n \n arr : the source array\n shuffle : randomize data access order within the view\n idx_min : the view into arr starts at this index\n idx_max : the view into arr ends before this index\n rng : numpy random number generator\n \"\"\"\n \n def __init__(self, arr, shuffle=False, idx_min=None, idx_max=None,\n rng=None):\n self.arr = arr\n self.shuffle = shuffle\n self.idx_min = idx_min\n if idx_min is None:\n self.idx_min = 0\n self.idx_max = idx_max\n if idx_max is None:\n self.idx_max = len(self.arr)\n if rng is None:\n rng = np.random.RandomState()\n self.rng = rng\n self.num_items = min(self.idx_max, len(arr))-self.idx_min\n assert(self.num_items >= 0)\n self.dtype = self.arr.dtype\n try:\n self.shape = arr.shape\n except AttributeError:\n self.shape = (len(arr),)+np.shape(arr[0])\n self.ndim = len(self.shape)\n \n # Create index list\n self.arr_indices = np.arange(self.idx_min, min(self.idx_max, len(arr)))\n if self.shuffle:\n self.rng.shuffle(self.arr_indices)\n \n def re_shuffle(self, random_seed=None):\n rng = self.rng\n if random_seed is not None:\n rng = np.random.RandomState(random_seed)\n rng.shuffle(self.arr_indices)\n \n def __iter__(self):\n for idx in self.arr_indices:\n idx = int(idx) # Some libraries don't like np.integer\n yield self.arr[idx]\n \n def _get_element(self, int_key, key_remainder=None):\n if not isinstance(int_key, (int, np.integer)):\n raise IndexError(\"cannot index with {}\".format(type(int_key)))\n idx = self.arr_indices[int_key]\n if key_remainder is not None:\n idx = (idx,)+key_remainder\n idx = int(idx) # Some libraries don't like np.integer\n return self.arr[idx]\n \n def _get_block(self, values, key_remainder=None):\n item_block = None\n for i, v in enumerate(values):\n # Lists in the aggregate key index in tandem;\n # so, index into those lists (the first list is `values`)\n v_key_remainder = key_remainder\n if isinstance(values, tuple) or isinstance(values, list):\n if key_remainder is not None:\n broadcasted_key_remainder = ()\n for k in key_remainder:\n if hasattr(k, '__len__') and len(k)==np.size(k):\n broadcasted_key_remainder += (k[i],)\n else:\n broadcasted_key_remainder += (k,)\n v_key_remainder = broadcasted_key_remainder\n \n # Make a single read at an integer index of axis 0\n elem = self._get_element(v, v_key_remainder)\n if item_block is None:\n item_block = np.zeros((len(values),)+elem.shape,\n self.dtype)\n item_block[i] = elem\n return item_block\n \n def __getitem__(self, key):\n item = None\n key_remainder = None\n \n # Grab the key for the first dimension, store the remainder\n if hasattr(key, '__len__'):\n if isinstance(key, np.ndarray):\n if key.dtype == np.bool:\n if key.ndim != self.ndim:\n raise IndexError(\"not enough indices, given a boolean \"\n \"index array with shape \"\n \"{}\".format(np.shape(key)))\n key = key.nonzero()\n elif key.ndim > 1:\n raise IndexError(\"indexing by non-boolean multidimensional\"\n \" arrays not supported\")\n \n # If there are lists in the key, make sure they have the same shape\n key_shapes = []\n for k in key:\n if hasattr(k, '__len__'):\n key_shapes.append(np.shape(k))\n for s in key_shapes:\n if s!=key_shapes[0]:\n raise IndexError(\"shape mismatch: indexing arrays could \"\n \"not be broadcast together with shapes \"\n \"\"+\" \".join([str(s) for s in key_shapes]))\n if len(key_shapes) > self.ndim:\n # More sublists/subtuples than dimensions in the array\n raise IndexError(\"too many indices for array\")\n \n # If there are iterables in the key, or if the key is a tuple, then\n # each key index corresponds to a separate data dimension (as per\n # Numpy). Otherwise, such as when the key is a list of integers,\n # each index corresponds only to the first data dimension.\n key_remainder = None\n if len(key_shapes) or isinstance(key, tuple):\n key_remainder = tuple(key[1:])\n key = key[0]\n \n # Handle ellipsis\n if key is Ellipsis:\n key = slice(0, self.num_items)\n if key_remainder is not None and len(key_remainder) < self.ndim-1:\n key_remainder = (Ellipsis,)+key_remainder\n \n # At this point the `key` is only for the first dimension and any keys\n # for other dimensions that may have been passed are in key_remainder\n if isinstance(key, (int, np.integer)):\n item = self._get_element(key, key_remainder)\n elif isinstance(key, slice):\n start = key.start if key.start is not None else 0\n stop = key.stop if key.stop is not None else self.num_items\n stop = min(stop, self.num_items)\n step = key.step if key.step is not None else 1\n item = self._get_block(range(start, stop, step), key_remainder)\n elif hasattr(key, '__len__'):\n item = self._get_block(key, key_remainder)\n else:\n raise IndexError(\"cannot index with {}\".format(type(key)))\n \n return item\n \n def __len__(self):\n return self.num_items\n\n\nclass multi_source_array(delayed_view):\n \"\"\"\n Given a list of sources, create an array-like interface that combines the\n sources. This object allows slicing and iterating over the elements. Data\n access automatically spans all data sources.\n \n Indexing is numpy-style with the exeption of indexing using non-boolean\n multi-dimensional arrays, as detailed in wrap.delayed_view.\n \n source_list : list of sources to combine into one source\n class_list : specifies class number for each source; same length as\n source_list\n shuffle : randomize data access order within and across all sources\n maxlen : the maximum number of elements to take from each source; if\n shuffle is False, a source is accessed as source[0:maxlen] and if\n shuffle is True, a source is accessed as shuffle(source)[0:maxlen]\n rng : numpy random number generator\n \"\"\"\n \n def __init__(self, source_list, class_list=None, shuffle=False,\n maxlen=None, rng=None):\n self.source_list = source_list\n self.class_list = class_list\n self.shuffle = shuffle\n self.maxlen = maxlen\n if self.maxlen == None:\n self.maxlen = np.inf\n self.num_items = 0\n for source in source_list:\n self.num_items += min(len(source), self.maxlen)\n \n # Ensure that all the data sources contain elements of the same shape\n # and data type\n self.dtype = self.source_list[0].dtype\n self.shape = None\n for i, source in enumerate(source_list):\n try:\n shape = source.shape\n except AttributeError:\n shape = len(source)+np.shape(source[0])\n if self.shape is None:\n self.shape = (self.num_items,)+shape[1:]\n if self.shape[1:]!=shape[1:]:\n # In order, match all dimensions with the same shape, until\n # a match is not found.\n new_shape = self.shape\n for i in range(1, max(min(len(self.shape), len(shape)), 1)):\n if self.shape[1:i]==shape[1:i]:\n new_shape = self.shape[:i]\n self.shape = new_shape\n if source.dtype != self.dtype:\n self.dtype = None # Cannot determine dtype.\n self.ndim = len(self.shape)\n if rng is None:\n rng = np.random.RandomState()\n self.rng = rng\n \n # Index the data sources\n self.index_pairs = []\n for i, source in enumerate(self.source_list):\n source_indices = np.arange(len(source))\n if self.shuffle:\n self.rng.shuffle(source_indices)\n source_indices = source_indices[:min(len(source), self.maxlen)]\n for j in source_indices:\n self.index_pairs.append((i, j))\n if self.shuffle==True:\n self.rng.shuffle(self.index_pairs)\n \n def re_shuffle(self, random_seed=None):\n rng = self.rng\n if random_seed is not None:\n rng = np.random.RandomState(random_seed)\n rng.shuffle(self.index_pairs)\n \n def get_labels(self):\n labels = []\n for p in self.index_pairs:\n if not self.class_list:\n labels.append(p[0])\n else:\n labels.append(self.class_list[ p[0] ])\n return labels\n \n def __iter__(self):\n for source_num, idx in self.index_pairs:\n yield self.source_list[source_num][idx]\n \n def _get_element(self, int_key, key_remainder=None):\n if not isinstance(int_key, (int, np.integer)):\n raise IndexError(\"cannot index with {}\".format(type(int_key)))\n source_num, idx = self.index_pairs[int_key]\n if key_remainder is not None:\n idx = (idx,)+key_remainder\n idx = int(idx) # Some libraries don't like np.integer\n return self.source_list[source_num][idx]\n" ]
[ [ "numpy.size", "numpy.random.RandomState", "numpy.shape" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
damiclem/freeda_network
[ "5f4067da5cd5e6129417e586414b7a20af8cc129" ]
[ "modules/dataset.py" ]
[ "# Dependencies\nimport numpy as np\nimport pandas as pd\n\n# Load words dataset table\ndef load_words(path):\n return pd.read_csv(path, dtype={\n 'tweet': np.unicode_,\n 'index': np.int,\n 'text': np.unicode_,\n 'pos': np.unicode_,\n 'conf': np.float \n })\n \n# Load tweets dataset\ndef load_tweets(path):\n return pd.read_csv(\n path, \n parse_dates=['created_at'], \n dtype={\n 'id_str': np.unicode_,\n 'text': np.unicode_\n })" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
cjhaitman/bert_seq2seq
[ "110d5afd76944cd0ec74d72c7a16a68e90032793" ]
[ "bert_seq2seq/model/roberta_model.py" ]
[ "import logging\nimport math\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\ndef swish(x):\n return x * torch.sigmoid(x)\n\ndef gelu(x):\n \"\"\" \n \"\"\"\n return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))\n\ndef mish(x):\n return x * torch.tanh(nn.functional.softplus(x))\n\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"mish\": mish}\n\nclass BertConfig(object):\n \n def __init__(\n self,\n vocab_size,\n hidden_size=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=2,\n initializer_range=0.02,\n layer_norm_eps=1e-12,\n ):\n\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.hidden_act = hidden_act\n self.intermediate_size = intermediate_size\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n\n\nclass BertLayerNorm(nn.Module):\n \"\"\"LayerNorm层, 见Transformer(一), 讲编码器(encoder)的第3部分\"\"\"\n def __init__(self, hidden_size, eps=1e-12):\n \"\"\"Construct a layernorm module in the TF style (epsilon inside the square root).\n \"\"\"\n super(BertLayerNorm, self).__init__()\n self.weight = nn.Parameter(torch.ones(hidden_size))\n self.bias = nn.Parameter(torch.zeros(hidden_size))\n self.variance_epsilon = eps\n\n def forward(self, x):\n u = x.mean(-1, keepdim=True)\n s = (x - u).pow(2).mean(-1, keepdim=True)\n x = (x - u) / torch.sqrt(s + self.variance_epsilon)\n return self.weight * x + self.bias\n\n\n\nclass BertEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)\n self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)\n\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, input_ids=None, token_type_ids=None, position_ids=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = inputs_embeds.size()[:-1]\n\n seq_length = input_shape[1]\n device = input_ids.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n inputs_embeds = self.word_embeddings(input_ids)\n position_embeddings = self.position_embeddings(position_ids)\n token_type_embeddings = self.token_type_embeddings(token_type_ids)\n\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings\n embeddings = self.LayerNorm(embeddings)\n embeddings = self.dropout(embeddings)\n return embeddings\n\n\nclass BertSelfAttention(nn.Module):\n def __init__(self, config: BertConfig):\n super().__init__()\n if config.hidden_size % config.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size (%d) is not a multiple of the number of attention \"\n \"heads (%d)\" % (config.hidden_size, config.num_attention_heads)\n )\n \n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = int(config.hidden_size / config.num_attention_heads)\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n\n self.query = nn.Linear(config.hidden_size, self.all_head_size)\n self.key = nn.Linear(config.hidden_size, self.all_head_size)\n self.value = nn.Linear(config.hidden_size, self.all_head_size)\n\n self.dropout = nn.Dropout(config.attention_probs_dropout_prob)\n\n def transpose_for_scores(self, x):\n new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)\n x = x.view(*new_x_shape)\n\n ## 最后xshape (batch_size, num_attention_heads, seq_len, head_size)\n return x.permute(0, 2, 1, 3)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_attentions=False\n ):\n mixed_query_layer = self.query(hidden_states)\n mixed_key_layer = self.key(hidden_states)\n mixed_value_layer = self.value(hidden_states)\n\n query_layer = self.transpose_for_scores(mixed_query_layer)\n key_layer = self.transpose_for_scores(mixed_key_layer)\n value_layer = self.transpose_for_scores(mixed_value_layer)\n\n # Take the dot product between \"query\" and \"key\" to get the raw attention scores.\n attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))\n attention_scores = attention_scores / math.sqrt(self.attention_head_size)\n \n # Apply the attention mask is (precomputed for all layers in BertModel forward() function)\n attention_scores = attention_scores + attention_mask\n\n # Normalize the attention scores to probabilities.\n attention_probs = nn.Softmax(dim=-1)(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.dropout(attention_probs)\n\n # 注意力加权\n context_layer = torch.matmul(attention_probs, value_layer)\n # 把加权后的V reshape, 得到[batch_size, length, embedding_dimension]\n context_layer = context_layer.permute(0, 2, 1, 3).contiguous()\n new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)\n context_layer = context_layer.view(*new_context_layer_shape)\n\n # 得到输出\n if output_attentions:\n return context_layer, attention_probs\n return context_layer, None\n\n\nclass BertSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.self = BertSelfAttention(config)\n self.output = BertSelfOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_attentions=False\n ):\n self_outputs, attention_metrix = self.self(hidden_states, attention_mask, output_attentions=output_attentions)\n attention_output = self.output(self_outputs, hidden_states)\n \n return attention_output, attention_metrix\n\n\nclass BertIntermediate(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.intermediate_size)\n self.intermediate_act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.intermediate_act_fn(hidden_states)\n return hidden_states\n\nclass BertOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.intermediate_size, config.hidden_size)\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n\n def forward(self, hidden_states, input_tensor):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.LayerNorm(hidden_states + input_tensor)\n return hidden_states\n\n\nclass BertLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.attention = BertAttention(config)\n self.intermediate = BertIntermediate(config)\n self.output = BertOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_attentions=False\n ):\n attention_output, attention_matrix = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)\n intermediate_output = self.intermediate(attention_output)\n layer_output = self.output(intermediate_output, attention_output)\n return layer_output, attention_matrix\n\n\nclass BertEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])\n\n def forward(\n self,\n hidden_states,\n attention_mask,\n output_all_encoded_layers=True,\n output_attentions=False\n ):\n all_encoder_layers = []\n all_attention_matrices = []\n for i, layer_module in enumerate(self.layer):\n \n layer_output, attention_matrix = layer_module(\n hidden_states, attention_mask, output_attentions=output_attentions\n )\n hidden_states = layer_output\n if output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n all_attention_matrices.append(attention_matrix)\n if not output_all_encoded_layers:\n all_encoder_layers.append(hidden_states)\n all_attention_matrices.append(attention_matrix)\n \n return all_encoder_layers, all_attention_matrices\n\n\nclass BertPooler(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n # We \"pool\" the model by simply taking the hidden state corresponding\n # to the first token.\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\n\nclass BertPredictionHeadTransform(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n self.transform_act_fn = ACT2FN[config.hidden_act]\n self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = self.transform_act_fn(hidden_states)\n hidden_states = self.LayerNorm(hidden_states)\n return hidden_states\n\n\nclass BertLMPredictionHead(nn.Module):\n def __init__(self, config, bert_model_embedding_weights):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.decoder.weight = bert_model_embedding_weights\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass BertOnlyMLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n\n def forward(self, sequence_output):\n prediction_scores = self.predictions(sequence_output)\n return prediction_scores\n\n\nclass BertOnlyNSPHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, pooled_output):\n seq_relationship_score = self.seq_relationship(pooled_output)\n return seq_relationship_score\n\n\nclass BertPreTrainingHeads(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.predictions = BertLMPredictionHead(config)\n self.seq_relationship = nn.Linear(config.hidden_size, 2)\n\n def forward(self, sequence_output, pooled_output):\n prediction_scores = self.predictions(sequence_output)\n seq_relationship_score = self.seq_relationship(pooled_output)\n return prediction_scores, seq_relationship_score\n\n\nclass BertPreTrainedModel(nn.Module):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n def __init__(self, config, *inputs, **kwargs):\n super(BertPreTrainedModel, self).__init__()\n if not isinstance(config, BertConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `BertConfig`. \"\n \"To create a model from a Google pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n ))\n self.config = config\n\n def init_bert_weights(self, module):\n \"\"\" Initialize the weights.\n \"\"\"\n if isinstance(module, (nn.Linear)):\n # 初始线性映射层的参数为正态分布\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, BertLayerNorm):\n # 初始化LayerNorm中的alpha为全1, beta为全0\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n # 初始化偏置为0\n module.bias.data.zero_()\n\n\nclass BertModel(BertPreTrainedModel):\n \"\"\"\n The model can behave as an encoder (with only self-attention) as well\n as a decoder, in which case a layer of cross-attention is added between\n the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,\n Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.\n To behave as an decoder the model needs to be initialized with the\n :obj:`is_decoder` argument of the configuration set to :obj:`True`; an\n :obj:`encoder_hidden_states` is expected as an input to the forward pass.\n .. _`Attention is all you need`:\n https://arxiv.org/abs/1706.03762\n \"\"\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n self.embeddings = BertEmbeddings(config)\n self.encoder = BertEncoder(config)\n self.pooler = BertPooler(config)\n\n self.apply(self.init_bert_weights)\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n output_all_encoded_layers=True,\n output_attentions=False\n ): \n \n \n extended_attention_mask = (input_ids > 0).float()\n # 注意力矩阵mask: [batch_size, 1, 1, seq_length]\n extended_attention_mask = extended_attention_mask.unsqueeze(1).unsqueeze(2)\n if attention_mask is not None :\n ## 如果传进来的注意力mask不是null,那就直接用传进来的注意力mask 乘 原始mask\n # 注意 原始mask是extended_attention_mask,这个是用来把pad部分置为0,去掉pad部分影响\n extended_attention_mask = attention_mask * extended_attention_mask\n\n if token_type_ids is None:\n token_type_ids = torch.zeros_like(input_ids)\n\n # extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n\n embedding_output = self.embeddings(\n input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids\n )\n encoder_layers, all_attention_matrices = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n output_all_encoded_layers=output_all_encoded_layers,\n output_attentions=output_attentions\n )\n sequence_output = encoder_layers[-1]\n pooled_output = self.pooler(sequence_output)\n\n if output_attentions:\n return all_attention_matrices\n if not output_all_encoded_layers:\n # 如果不用输出所有encoder层\n encoder_layers = encoder_layers[-1]\n return encoder_layers, pooled_output\n" ]
[ [ "torch.nn.Softmax", "torch.nn.Dropout", "torch.sigmoid", "torch.ones", "torch.zeros", "torch.sqrt", "torch.zeros_like", "torch.nn.Embedding", "torch.nn.Tanh", "torch.nn.Linear", "torch.matmul", "torch.arange", "torch.nn.functional.softplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pankajdarak-xlnx/pyxir
[ "a93b785a04b6602418c4f07a0f29c809202d35bd" ]
[ "python/pyxir/quantization/quant_ops.py" ]
[ "# Copyright 2020 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nxfDNN module for quantization of neural network graphs in xfDNN intermediate \nrepresentation (IR)\n\n\n\"\"\"\n\nimport abc\nimport numpy as np\nimport logging\n\nfrom pyxir.shared.quant_param_factory import LayerParams\nfrom .util import ThresholdLayerInputs, ThresholdLayerOutputs, ThresholdWeights\n\nlogger = logging.getLogger(\"pyxir\")\n\n\nclass BaseQuantOp(object):\n\n __abcmeta__ = abc.ABCMeta\n\n \"\"\"\n Attributes\n ----------\n qp_factory: QuantParamFactory\n the quantization parameter factory\n quant_layers: List[tuple]\n the layers to be quantized using the quantization parameter factory\n \"\"\"\n\n def __init__(self, qp_factory, quant_layers, bitwidth):\n self._quant_param = qp_factory\n self._quant_layers = quant_layers\n self._bitwidth = bitwidth\n\n def __call__(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray) -> None\n self.quantize(layer, inpts, outpt, qkey)\n\n @abc.abstractmethod\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n The quantization method is responsible for quantizing a specific\n layer, given the inputs and output\n\n TODO: Can we integrate this better with the specified layer instead\n of passing layer objects to this method??\n \"\"\"\n raise NotImplementedError(\"\")\n\n '''\n def _find_input_nodes_with_th_set(self, node_name):\n # (str) -> str\n # Some layers (e.g. pooling) are stored under a quant util name\n node_name = node_name + '_QUANT_UTIL' if (node_name + '_QUANT_UTIL')\\\n in self._quant_param.th_layer_out else node_name \n if (node_name in self._quant_param.bw_layer_out and \\\n node_name in self._quant_param.th_layer_out):\n return node_name\n\n for inpt_name in self.runtime.get_input_node_names(node_name):\n d = self._find_input_nodes_with_th_set(inpt_name)\n if d is not None:\n return d\n\n return None\n '''\n\n def _internal_name(self, layer_name):\n # type: (str) -> str\n return layer_name + '_QUANT_UTIL' if (layer_name + '_QUANT_UTIL')\\\n in self._quant_param.th_layer_out else layer_name \n\n\nclass SkipQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n Skip quantization for the given operation\n \"\"\"\n pass\n\n\nclass DefaultQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n Default quantization for the given operation\n \"\"\"\n op_name = layer.name\n\n assert(len(layer.inputs) == len(inpts))\n if len(layer.inputs) != 1:\n # TODO\n logger.warn(\"[INTERNAL WARNING] DefaultQuant operation executed on\"\n \" layer: {} with type: {} has zero or multiple inputs.\"\n \" Please check if this is correct.\"\n .format(layer.name, layer.type))\n\n if not self._internal_name(op_name) in self._quant_param.th_layer_out:\n # ! check because Relu layer should not adjust its parent layer\n # quantization params \n #\t(this happens for Relu layer after eltwise layer) -> TODO\n # List is used to make the thresholds point to the same objects\n input_name = self._internal_name(layer.inputs[0])\n th_in_lst = self._quant_param.th_layer_out[input_name]\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = th_in_lst\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = th_in_lst\n\n# INPUT\n\n\nclass InputQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"\\nQuantize input layer: {}\".format(op_name))\n\n self._quant_layers[qkey].append((op_name, op_type, None))\n\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n \n # TODO Using ThresholdLayerOutputs might return invalid outputs\n # likely because of invalid Kulback-Leibler divergence parameters\n # (zeros). This happens for PyTorch (encountered with AlexNet) \n # input values.\n #threshold = ThresholdLayerOutputs(outpt, self._bitwidth)\n threshold = ThresholdLayerInputs(outpt, self._bitwidth)\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = [threshold] # list to make mutable\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = [threshold] # list to make mutable\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n# CONVOLUTION\n\n\nclass ConvQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"\\nQuantize convolution layer: {}\".format(op_name))\n\n # TODO: adding x to _quant_layers list as layers param, is this correct?\n # TODO: Adding x here -> numpy array not hashable -> add None for now\n self._quant_layers[qkey].append((op_name, op_type, None))\n\n\n ## INPUT\n assert(len(inpts) <= 3)\n # TODO: we are now getting the threshold for input layer from a previous layer.\n # Note that there should exist a previous layer with a threshold as thresholds\n # are computed on input layers\n\n # input_names = self.runtime.get_input_node_names(op_name)\n input_name = layer.inputs[0]\n logger.debug(\"Found input names: {}\".format(input_name))\n #assert(len(input_name) == 1)\n #input_node_with_th = self._find_input_nodes_with_th_set(op_name)\n #threshold = self._quant_param.th_layer_out[input_node_with_th]\n int_input_name = self._internal_name(input_name)\n th_in_lst = self._quant_param.th_layer_out[int_input_name]\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = th_in_lst\n\n inpt = inpts[0]\n inpt_min, inpt_max, inpt_std = np.min(inpt), np.max(inpt), np.std(inpt)\n #threshold = ThresholdLayerOutputs(inpt, self._bitwidth) [OLD]\n logger.debug(\"Input (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\".format(inpt.shape, inpt_min, inpt_max, inpt_std))\n \n\n ## PARAMS\n if len(inpts) > 0 and params['W'] is not None:\n raise ValueError(\"Convolution kernel should be passed either as an input\"\\\n \" or a parameter but not both\")\n elif len(inpts) > 1:\n weights = inpts[1]\n else:\n weights = params[\"W\"]\n weights = np.transpose(weights, (3,2,0,1))\n weights_min, weights_max, weights_std = np.min(weights), np.max(weights), np.std(weights)\n \n # weights should have format OIHW, which they should already be??\n # TODO: pass data_layout??\n threshold = ThresholdWeights(weights, self._bitwidth)\n\n self._quant_param.bw_params[op_name] = self._bitwidth\n self._quant_param.th_params[op_name] = threshold\n\n logger.debug(\"Weights (outchan,inchan,h,w) = {}, Min: {}, Max: {}, Stdev: {}\".format(weights.shape, weights_min, weights_max, weights_std))\n\n\n ## OUTPUTS\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold = ThresholdLayerOutputs(outpt, self._bitwidth)\n logger.debug(threshold)\n\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = [threshold]\n \n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\".format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n## SCALE\n\nclass ScaleQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"\\nQuantize scaling layer: {}\".format(op_name))\n\n ## INPUT\n\n # input_names = self.runtime.get_input_node_names(op_name)\n # logger.debug(\"Found input names: {}\".format(input_names))\n # assert(len(input_names) == 1)\n # input_node_with_th = self._find_input_nodes_with_th_set(op_name)\n # threshold = self._quant_param.th_layer_out[input_node_with_th]\n\n input_name = layer.inputs[0]\n logger.debug(\"Found input names: {}\".format(input_name))\n int_input_name = self._internal_name(input_name)\n th_in_lst = self._quant_param.th_layer_out[int_input_name]\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = th_in_lst\n\n inpt = inpts[0]\n inpt_min, inpt_max, inpt_std = np.min(inpt), np.max(inpt), np.std(inpt)\n #threshold = ThresholdLayerOutputs(inpt, self._bitwidth) [OLD]\n logger.debug(\"Input (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\".format(inpt.shape, inpt_min, inpt_max, inpt_std))\n \n ## PARAMS\n\n if len(inpts) > 0 and params['gamma'] is not None:\n raise ValueError(\"scaling parameter should be passed either as an \"\n \"input or a parameter but not both\")\n elif len(inpts) > 1:\n gamma = inpts[1]\n else:\n gamma = params[\"gamma\"]\n #weights = np.transpose(weights, (3,2,0,1))\n\n # weights should have format IOHW, which they should already be??\n # TODO: pass data_layout??\n th_params = ThresholdWeights(gamma, self._bitwidth)\n\n #logger.debug(\"th_params: {}\".format(th_params))\n\n self._quant_param.bw_params[op_name] = self._bitwidth\n self._quant_param.th_params[op_name] = th_params\n\n gamma_min, gamma_max, gamma_std = \\\n np.min(gamma), np.max(gamma), np.std(gamma)\n\n logger.debug(\"Weights (channels) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(gamma.shape, gamma_min, gamma_max, gamma_std))\n\n ## OUTPUT\n\n # Only calculate threshold on outputs\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = [threshold]\n\n # TODO: do Float2Fixed2Float??\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n # sf_params = th_params / (np.power(2.0, self._bitwidth - 1) - 1)\n # TODO: Adding th_params / 2**(bitwidth-1) -1 = sf_params as layer params\n # to make sure that the 'clip' part in calculating the multiplier in \n # quantize_base.py becomes equal to 1\n # TODO: why is this clip part there?????\n\n # !! scaling is done by an elementwise addition folowwed by scale and shift \n # from quantization parameters, instead of an explicit scale\n self._quant_layers[qkey].append(\n [op_name, op_type, [LayerParams(gamma)]])\n\n## BATCHNORM\n\nclass BatchNormQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n raise NotImplementedError(\"\")\n\n## CONCAT\n\nclass ConcatQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n # TODO: Overlap with quantize_tf.py\n\n logger.info(\"Quantize concat layer: {}\".format(op_name))\n self._quant_layers[qkey].append((op_name, op_type, None))\n\n # Only calculate threshold on outputs\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n for input_name in layer.inputs:\n int_input_name = self._internal_name(input_name)\n self._quant_param.th_layer_out[int_input_name][0] = threshold\n\n input_name = self._internal_name(layer.inputs[0])\n th_lst = self._quant_param.th_layer_out[input_name]\n\n self._quant_param.th_layer_in[op_name] = th_lst \n self._quant_param.th_layer_out[op_name] = th_lst[:] # TODO for DenseNet-like architectures\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n\n # for inpt_name in self.runtime.get_input_node_names(op_name):\n # d = self._find_input_nodes_with_th_set(inpt_name)\n # logger.debug(\"Inpt name: {}, find: {}\".format(inpt_name, d))\n # if d is not None:\n # self._quant_param.bw_layer_out[d] = self._bitwidth\n # self._quant_param.th_layer_out[d] = threshold\n\n # TODO How to handle quantization for concat layers after concat layers?\n # See DenseNet kind of architectures.\n\n input_names = layer.inputs\n for input_name in input_names:\n input_name = self._internal_name(input_name)\n self._quant_param.th_layer_out[input_name][0] = th_lst[0]\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\nclass ConcatQuantWithScale(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n # TODO: Overlap with quantize_tf.py\n\n logger.info(\"Quantize concat layer: {}\".format(op_name))\n self._quant_layers[qkey].append((op_name, op_type, None))\n\n # Only calculate threshold on outputs\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n self._quant_param.th_layer_in[op_name] = [threshold]\n self._quant_param.th_layer_out[op_name] = [threshold] # TODO for DenseNet-like architectures\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n\n # TODO How to handle quantization for concat layers after concat layers?\n # See DenseNet kind of architectures.\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n\n## ELTWISE\n\nclass EltwiseQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"Quantize elemtwise layer: {}\".format(op_name))\n\n self._quant_layers[qkey].append((op_name, op_type, None))\n \n ## INPUT\n\n # TODO: First add inputs and then compute new threshold and set thresholds of \n # inputs to this new threshold??\n inpt_min, inpt_max, inpt_std = np.min(np.array(inpts))\\\n , np.max(np.array(inpts)), np.std(np.array(inpts))\n # # Get thresholds from inputs th_out\n # input_names = self.runtime.get_input_node_names(op_name)\n # logger.debug(\"Found input names: {}\".format(input_names))\n # assert(len(input_names) == 2)\n\n # input_nodes_with_th = \\\n # [self._find_input_nodes_with_th_set(in_name) for in_name in input_names]\n # threshold = self._quant_param.th_layer_out[max(input_nodes_with_th, \n # key=lambda in_name: self._quant_param.th_layer_out[in_name])]\n\n input_names = layer.inputs\n th_in_lst = self._quant_param.th_layer_out[\n self._internal_name(max(input_names, key=lambda in_name: \n self._quant_param.th_layer_out[self._internal_name(in_name)]))]\n\n # threshold = ThresholdLayerOutputs(inpts[0], self._bitwidth)\n # for i in range(1, len(inpts)):\n # threshold = max(threshold, ThresholdLayerOutputs(inpts[i], self._bitwidth)) \n #threshold = np.maximum(threshold_left, threshold_right)\n logger.debug(\"Threshold in: {}\".format(th_in_lst))\n\n # Set the output thresholds of the inputs with thresholds set to make sure\n # that all inputs are on the same scale before being added\n # TODO\n # for inpt_name in self.runtime.get_input_node_names(op_name):\n # d = self._find_input_nodes_with_th_set(inpt_name)\n # logger.debug(\"Set threshold for ancestor: {}\".format(d))\n # if d is None:\n # raise ValueError(\"No input ancestor to elementwise operation: {} \"\\\n # \"with threshold set. This is required.\")\n # self._quant_param.bw_layer_out[d] = self._bitwidth\n # self._quant_param.th_layer_out[d] = threshold\n\n for input_name in input_names:\n input_name = self._internal_name(input_name)\n self._quant_param.th_layer_out[input_name][0] = th_in_lst[0]\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = th_in_lst\n\n logger.debug(\"Input left shape: {}, right shape:{}, Min: {}, Max: {}, \"\\\n \"Stdev: {}\".format(inpts[0].shape, inpts[1].shape, inpt_min, \n inpt_max, inpt_std))\n\n # Only calculate threshold on outputs\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold_out = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n logger.debug(\"Threshold out: {}\".format(threshold_out))\n\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = [threshold_out]\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n\nclass EltwiseQuantWithScale(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"Quantize elemtwise layer: {}\".format(op_name))\n\n self._quant_layers[qkey].append((op_name, op_type, None))\n \n ## INPUT\n\n # TODO: First add inputs and then compute new threshold and set thresholds of \n # inputs to this new threshold??\n inpt_min, inpt_max, inpt_std = np.min(np.array(inpts))\\\n , np.max(np.array(inpts)), np.std(np.array(inpts))\n # Get thresholds from inputs th_out\n # input_names = self.runtime.get_input_node_names(op_name)\n # logger.debug(\"Found input names: {}\".format(input_names))\n # assert(len(input_names) == 2)\n\n # input_nodes_with_th = \\\n # [self._find_input_nodes_with_th_set(in_name) for in_name in input_names]\n # threshold = self._quant_param.th_layer_out[max(input_nodes_with_th, \n # key=lambda in_name: self._quant_param.th_layer_out[in_name])]\n input_names = layer.inputs\n th_in_lst = self._quant_param.th_layer_out[\n self._internal_name(max(input_names, key=lambda in_name: \n self._quant_param.th_layer_out[self._internal_name(in_name)]))]\n\n # threshold = ThresholdLayerOutputs(inpts[0], self._bitwidth)\n # for i in range(1, len(inpts)):\n # threshold = max(threshold, ThresholdLayerOutputs(inpts[i], self._bitwidth)) \n #threshold = np.maximum(threshold_left, threshold_right)\n logger.debug(\"Threshold in: {}\".format(th_in_lst))\n\n # Set the output thresholds of the inputs with thresholds set to make sure\n # that all inputs are on the same scale before being added\n # TODO\n # for inpt_name in self.runtime.get_input_node_names(op_name):\n # d = self._find_input_nodes_with_th_set(inpt_name)\n # logger.debug(\"Set threshold for ancestor: {}\".format(d))\n # if d is None:\n # raise ValueError(\"No input ancestor to elementwise operation: {} \"\\\n # \"with threshold set. This is required.\")\n # self._quant_param.bw_layer_out[d] = self._bitwidth\n # self._quant_param.th_layer_out[d] = threshold\n\n self._quant_param.bw_layer_in[op_name] = self._bitwidth\n self._quant_param.th_layer_in[op_name] = th_in_lst\n\n logger.debug(\"Input left shape: {}, right shape:{}, Min: {}, Max: {}, \"\\\n \"Stdev: {}\".format(inpts[0].shape, inpts[1].shape, inpt_min, \n inpt_max, inpt_std))\n\n # Only calculate threshold on outputs\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n threshold_out = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n logger.debug(\"Threshold out: {}\".format(threshold_out))\n\n self._quant_param.bw_layer_out[op_name] = self._bitwidth\n self._quant_param.th_layer_out[op_name] = [threshold_out]\n\n logger.debug(\"Output (n,c,h,w) = {}, Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n\n\n##POOLING\n\nclass PoolQuant(BaseQuantOp):\n\n def quantize(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO\n \"\"\"\n if layer.op == 'Max':\n return self._quantize_max(layer, inpts, outpt, qkey)\n elif layer.op =='Avg':\n return self._quantize_avg(layer, inpts, outpt, qkey)\n else:\n raise ValueError(\"Unsupported pooling operation: {}. Only `Max` \"\\\n \" and `Avg` are valid operations.\")\n \n def _quantize_max(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray, str) -> None\n \"\"\"\n TODO Describe pool quantization in high level here\n \"\"\"\n assert(layer.op == 'Max')\n \n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n\n logger.info(\"Quantize max pooling layer: {}\".format(op_name))\n\n # TODO Change name of max pool because FPGA does wrong things if \n # the corresponding quantization parameters are provided\n quant_name = op_name + '_QUANT_UTIL'\n\n # NOTE: Avg pool computes sum instead of average on FPGA, division is left to\n # the quantization parameters so the pool_divisor should be included \n # for average pooling. For maxpool this divisor is not necessary and\n # we set it to 1\n pool_divisor = [1]\n self._quant_layers[qkey].append(\n (quant_name, op_type, LayerParams(pool_divisor)))\n\n\n ## INPUT\n inpt = inpts[0]\n inpt_min, inpt_max, inpt_std = np.min(inpt), np.max(inpt), np.std(inpt)\n # th_in = ThresholdLayerOutputs(inpt, self._bitwidth)\n # input_node_with_th = self._find_input_nodes_with_th_set(op_name)\n assert(len(layer.inputs) == 1)\n input_name = self._internal_name(layer.inputs[0])\n th_in_lst = self._quant_param.th_layer_out[input_name]\n\n self._quant_param.bw_layer_in[quant_name] = self._bitwidth\n self._quant_param.th_layer_in[quant_name] = th_in_lst\n\n logger.debug(\"Input (n,c,h,w) = ({}), Min: {}, Max: {}, Stdev: {}\"\n .format(inpt.shape, inpt_min, inpt_max, inpt_std))\n \n ## NO PARAMS\n\n ## OUTPUTS\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n # th_out = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n self._quant_param.bw_layer_out[quant_name] = self._bitwidth\n self._quant_param.th_layer_out[quant_name] = th_in_lst\n \n logger.debug(\"Output (n,c,h,w) = ({}), Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n \n def _quantize_avg(self, layer, inpts, outpt, qkey):\n # (RtLayer, List[numpy.ndarray], numpy.ndarray) -> None\n \"\"\"\n TODO Describe pool quantization in high level here\n \"\"\"\n assert(layer.op == 'Avg')\n \n op_name, op_type, params = layer.name, layer.type, layer.get_params()\n \n logger.info(\"Quantize average pool layer: {}\".format(op_name))\n\n logger.debug(\"Kernel product = {}\".format(np.prod(layer.ksize)))\n quant_name = op_name\n # NOTE: Avg pool computes sum instead of average on FPGA, division is left to\n # the quantization parameters so the pool_divisor should be included \n # here for average pooling\n pool_divisor = [np.prod(layer.ksize)]\n self._quant_layers[qkey].append(\n (op_name, op_type, LayerParams(pool_divisor)))\n\n\n ## INPUT\n inpt = inpts[0]\n inpt_min, inpt_max, inpt_std = np.min(inpt), np.max(inpt), np.std(inpt)\n \n # th_in = ThresholdLayerOutputs(inpt, self._bitwidth)\n # input_node_with_th = self._find_input_nodes_with_th_set(op_name)\n # th_in = self._quant_param.th_layer_out[input_node_with_th]\n assert(len(layer.inputs) == 1)\n input_name = self._internal_name(layer.inputs[0])\n th_in_lst = self._quant_param.th_layer_out[input_name]\n\n self._quant_param.bw_layer_in[quant_name] = self._bitwidth\n self._quant_param.th_layer_in[quant_name] = th_in_lst\n\n logger.debug(\"Input (n,c,h,w) = ({}), Min: {}, Max: {}, Stdev: {}\"\n .format(inpt.shape, inpt_min, inpt_max, inpt_std))\n \n ## NO PARAMS\n\n ## OUTPUTS\n outpt_min, outpt_max, outpt_std = np.min(outpt), np.max(outpt), np.std(outpt)\n th_out = ThresholdLayerOutputs(outpt, self._bitwidth)\n\n self._quant_param.bw_layer_out[quant_name] = self._bitwidth\n self._quant_param.th_layer_out[quant_name] = [th_out]\n \n logger.debug(\"Output (n,c,h,w) = ({}), Min: {}, Max: {}, Stdev: {}\"\n .format(outpt.shape, outpt_min, outpt_max, outpt_std))\n" ]
[ [ "numpy.min", "numpy.max", "numpy.std", "numpy.prod", "numpy.transpose", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
flabowski/POD-UQNN
[ "1c81be432e69d24ae894828f42918fbc1fe54bc1" ]
[ "poduqnn/pod.py" ]
[ "\"\"\"Module to handle Proper Orthogonal Decomposition tasks.\"\"\"\nimport numpy as np\nfrom numba import njit\n\n\n@njit(parallel=False)\ndef perform_pod(U, eps=0., n_L=0, verbose=True):\n \"\"\"POD algorithmm.\"\"\"\n # Number of DOFs\n n_h = U.shape[0]\n\n # Number of snapshots n_s x Number of time steps\n n_st = U.shape[1]\n\n # SVD algoritm call\n _, D, ZT = np.linalg.svd(U, full_matrices=False)\n\n # Getting MATLAB-like orientation\n Z = ZT.T\n \n # Storing eigenvalues and their sum\n lambdas = D**2\n sum_lambdas = np.sum(lambdas)\n \n # Finding n_L\n if n_L == 0:\n sum_lambdas_trunc = 0.\n for i in range(n_st):\n sum_lambdas_trunc += lambdas[i]\n n_L += 1\n if sum_lambdas_trunc/sum_lambdas >= (1 - eps):\n break\n \n # Truncating according to n_L\n lambdas_trunc = lambdas[0:n_L]\n \n if verbose:\n print(\"Contructing the reduced bases V\")\n\n U = np.ascontiguousarray(U)\n\n V = np.zeros((n_h, n_L))\n for i in range(n_L):\n Z_i = np.ascontiguousarray(Z[:, i])\n V[:, i] = U.dot(Z_i) / np.sqrt(lambdas_trunc[i])\n\n return np.ascontiguousarray(V)\n\n\n# @njit(parallel=False)\ndef perform_fast_pod(U, eps, eps_init):\n \"\"\"Two-step version of POD algorithm.\"\"\"\n print(\"Performing initial time-trajectory POD\")\n # Number of snapshots n_s x Number of space nodes (n_x * n_y * ...)\n n_s = U.shape[-1]\n\n T_list = []\n for k in range(n_s):\n U_k = U[:, :, k]\n # Retrieving each time-trajectory\n T_k = perform_pod(U_k, eps=eps_init, n_L=0, verbose=False)\n T_list.append(T_k)\n\n # Reshaping the 3d-mat into a 2d-mat\n U_f = np.concatenate(T_list, axis=1)\n\n print(\"Performing SVD\")\n return perform_pod(U_f, eps=eps, n_L=0, verbose=True)\n" ]
[ [ "numpy.linalg.svd", "numpy.sqrt", "numpy.ascontiguousarray", "numpy.concatenate", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexcwsmith/imageProcessing
[ "266afd661a299d236e5b7d34a1b867bf29277bf9" ]
[ "clearMapSubregionParser_old.py" ]
[ "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 15 17:30:50 2019\n\n@author: smith\n\"\"\"\n\nimport ClearMap.IO.IO as io\nimport numpy as np\nimport pandas as pd\nimport os\n\n\nsamples = ['IA1_RT', 'IA1_RB', 'IA1_LT', 'IA1_LB', \n 'IA2_NP', 'IA2_RT', 'IA2_RB', 'IA2_LT', 'IA2_LB']\n\n\ndataList = []\nfor mouse in samples:\n sampleName = mouse\n baseDirectory = '/d2/studies/ClearMap/IA_iDISCO/' + sampleName\n\n\n#sampleName = 'IA2_LT'\n#baseDirectory = '/d2/studies/ClearMap/IA_iDISCO/' + sampleName\n#sampleName = mouse\n##IMPORT PREVIOUSLY PRE-PROCESSED POINTS DATA\n hemisphere = '_left'\n data = io.readData(os.path.join(baseDirectory, sampleName + '_Caudoputamen' + '_isolated_points' + hemisphere + '.tif'))\n points = np.nonzero(data)[:3]\n dfPoints = pd.DataFrame(points, index=['x', 'y', 'z']).T\n dfPoints.rename(columns={0: \"x\", 1: \"y\", 2: \"z\"})\n \n ###View Range:\n x_range=dfPoints.x.max() - dfPoints.x.min()\n y_range=dfPoints.y.max() - dfPoints.y.min()\n z_range=dfPoints.z.max() - dfPoints.z.min()\n print(x_range, y_range, z_range)\n \n #Bin Y axis\n dfPoints['y_bins']=pd.cut(dfPoints['y'], bins=2)\n dfPoints['y_bins'].value_counts()\n dfPoints_counts_y = dfPoints['y_bins'].value_counts()\n dfPoints_sorted_y = dfPoints_counts_y.sort_index()\n dfPoints_sorted_y.to_excel(os.path.join(baseDirectory, 'y_binned_striatum_Jan11' + hemisphere + '.xlsx'))\n \n #figY = dfPoints_sorted_y.plot.bar(figsize=(20,10))\n print(dfPoints_sorted_y)\n \n #2nd Iteration - Splits anterior half of striatum into 3 subregions\n firstHalf = dfPoints_sorted_y[0]\n dfPoints_ant = dfPoints.sort_values(by=['y'])[:firstHalf]\n dfPoints_ant['y_iter2'] = pd.cut(dfPoints_ant['y'], bins=3)\n dfPoints_ant['y_iter2'].value_counts()\n dfPoints_ant.sort_values('y_iter2')\n dfPoints_ant_count = dfPoints_ant['y_iter2'].value_counts()\n dfPoints_ant_count.sort_index()\n \n ant0 = dfPoints_ant_count.sort_index()[0]\n ant1 = dfPoints_ant_count.sort_index()[1]\n ant2 = dfPoints_ant_count.sort_index()[2:]\n ant2 = ant2[0]\n \n #Check that bins were split correctly: \n if firstHalf != ant0+ant1+ant2:\n raise ValueError('Variables Not Equal!')\n else: print('Values Are Equal')\n \n #Split each of the 3 subregions into medial/lateral and count cells: \n dfPoints_ant0 = dfPoints_ant.sort_values(by=['y_iter2'])[:ant0]\n dfPoints_ant0['x_bins0'] = pd.cut(dfPoints_ant0['x'], bins=2)\n dfPoints_ant0_count = dfPoints_ant0['x_bins0'].value_counts()\n dfPoints_ant0_count.sort_index()\n \n dfPoints_ant1 = dfPoints_ant.sort_values(by=['y_iter2'])[ant0:ant0+ant1]\n dfPoints_ant1['x_bins1'] = pd.cut(dfPoints_ant1['x'], bins=2)\n dfPoints_ant1_count = dfPoints_ant1['x_bins1'].value_counts()\n dfPoints_ant1_count.sort_index()\n \n dfPoints_ant2 = dfPoints_ant.sort_values(by=['y_iter2'])[ant0+ant1:]\n dfPoints_ant2['x_bins2'] = pd.cut(dfPoints_ant2['x'], bins=2)\n dfPoints_ant2_count = dfPoints_ant2['x_bins2'].value_counts()\n dfPoints_ant2_count.sort_index()\n \n \n if hemisphere == '_left':\n aDLS_combined = dfPoints_ant0_count[0]\n mDLS_combined = dfPoints_ant1_count[0]\n pDLS_combined = dfPoints_ant2_count[0]\n aDMS_combined = dfPoints_ant0_count[1]\n mDMS_combined = dfPoints_ant1_count[1]\n pDMS_combined = dfPoints_ant2_count[1]\n else:\n aDLS_combined = dfPoints_ant0_count[1]\n mDLS_combined = dfPoints_ant1_count[1]\n pDLS_combined = dfPoints_ant2_count[1]\n aDMS_combined = dfPoints_ant0_count[0]\n mDMS_combined = dfPoints_ant1_count[0]\n pDMS_combined = dfPoints_ant2_count[0]\n # \n mouse_data = [sampleName, hemisphere, aDLS_combined, mDLS_combined, pDLS_combined, aDMS_combined, mDMS_combined, pDMS_combined]\n\n dataList.append(mouse_data)\n\nallData = pd.DataFrame(data=dataList, columns=['mouse', 'hemisphere', 'aDLS', 'mDLS', 'pDLS', 'aDMS', 'mDMS', 'pDMS']) \nallData.to_excel('/d2/studies/ClearMap/IA_iDISCO/Striatum_Subregion_Counts' + hemisphere + '_3bins_Jan11.xlsx')\n\n\n" ]
[ [ "numpy.nonzero", "pandas.cut", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
TamasKormendi/tensorflow-music-generator
[ "c01b41fd7e498c06f746df731aab7f7eab74697c" ]
[ "dataloader_progressive.py" ]
[ "import tensorflow as tf\nimport numpy as np\nimport utils\nimport glob\nimport os\n\n# Positive and negative range of a 16-bit signed int\n# with this we can scale the data to [-1, 1] inclusive range\nBIT_RANGE = 32767\n\nclass Dataloader(object):\n\n def __init__(self, window_length, batch_size, filepath, num_channels, augmentation_level=0):\n \"\"\"\n :param window_length: the amount of samples passed to the 1st conv layer\n :param batch_size: the amount of desired batches\n :param filepath: directory path to the directory where training data is stored\n \"\"\"\n self.num_channels = num_channels\n self.window_length = window_length\n self.batch_size = batch_size\n\n self.sampling_rate, self.all_sliced_samples = self.process_directory(filepath, augmentation_level=augmentation_level)\n\n # Adapted from https://stackoverflow.com/questions/14822184/is-there-a-ceiling-equivalent-of-operator-in-python/17511341#17511341\n # Basically math.ceil() but with support for big ints\n self.num_batches = -(-len(self.all_sliced_samples) // batch_size)\n\n def process_file(self, filepath):\n \"\"\"\n Load a 16-bit PCM wav file and preprocess it:\n Scale values to [-1, 1] - inclusive\n :return: Sampling rate and a list containing the data\n \"\"\"\n\n sampling_rate, raw_samples = utils.read_wav_file(filepath)\n\n scaled_samples = np.array(raw_samples)\n\n scaled_samples.reshape(-1, self.num_channels)\n scaled_samples.shape = (len(scaled_samples), self.num_channels)\n\n # Probably can be made float16\n scaled_samples = scaled_samples.astype(np.float32) / BIT_RANGE\n\n print(\"File at {} loaded\".format(filepath))\n\n return sampling_rate, scaled_samples\n\n def process_directory(self, directory_path, augmentation_level=0):\n \"\"\"\n Load all the wav files in a directory, pad them to be divisible by window_length, \\n\n slice them up into window_length chunks and return them as a numpy array\n :param directory_path: the path to the directory where the WAV files are\n :param augmentation_level: specify the amount of data augmentation. Only use with very small datasets\n :return: the sampling rate and all the data as a sliced (window_length chunks) numpy array\n \"\"\"\n all_samples = np.array([])\n all_samples.shape = (0, self.num_channels)\n\n sliced_samples = []\n sampling_rate = 0\n\n # All the wav files should have the same sampling rate\n for filename in glob.glob(os.path.join(directory_path, \"*.wav\")):\n sampling_rate, current_samples = self.process_file(filename)\n\n all_samples = np.concatenate((all_samples, current_samples))\n\n assert (len(all_samples) != 0), \"No training data provided\"\n\n # Data augmentation: offsets the start of samples by win_length // 10 and appends it to the\n # end of the all_samples list - can be very memory expensive so use it only for small datasets\n if augmentation_level > 0:\n assert (len(all_samples) > self.window_length), \"Data augmentation is switched on but there are fewer samples than the window length\"\n assert augmentation_level < 10, \"Data augmentation level should be below 10, it was {}\".format(augmentation_level)\n\n augmented_data_start = self.window_length // 10\n augmented_data_end = len(all_samples)\n\n for i in range(augmentation_level):\n all_samples = np.concatenate((all_samples, all_samples[augmented_data_start:augmented_data_end]))\n\n augmented_data_start += self.window_length // 10\n\n # Pad our all_samples array so it is divisible by window_length\n if len(all_samples) % self.window_length != 0:\n remainder = len(all_samples) % self.window_length\n\n padding_length = self.window_length - remainder\n if self.num_channels == 1:\n pad_list = [0] * padding_length\n pad_list = np.array(pad_list)\n pad_list.shape = (padding_length, 1)\n all_samples = np.concatenate((all_samples, pad_list))\n else:\n all_samples = np.concatenate((all_samples, [[0, 0]] * padding_length))\n\n # Slice all the data into window_length chunks so they can be batched later\n index = 0\n\n prev_slice_length = 0\n current_slice_length = 0\n counter = 0\n mismatch = False\n\n while index < len(all_samples):\n if mismatch:\n print(\"Not the last value\")\n\n current_slice = all_samples[index:index + self.window_length]\n\n if counter == 0:\n prev_slice_length = len(current_slice)\n current_slice_length = len(current_slice)\n counter += 1\n else:\n current_slice_length = len(current_slice)\n\n if current_slice_length != prev_slice_length:\n print(\"Slice length mismatch, previous: {}, current: {}\".format(prev_slice_length, current_slice_length))\n mismatch = True\n\n current_slice_reshaped = np.asarray(current_slice, dtype=np.float32)\n # Second argument is the channel amount\n current_slice_reshaped.shape = (self.window_length, self.num_channels)\n\n sliced_samples.append(current_slice_reshaped)\n\n index += self.window_length\n\n prev_slice_length = len(current_slice)\n\n print(\"All files loaded\")\n\n return sampling_rate, np.asarray(sliced_samples, dtype=np.float32)\n\n # This function is vaguely based on parts from a similar function from https://github.com/chrisdonahue/wavegan/blob/v1/loader.py\n # It is more true for the same function in \"dataloader.py\" but since the one here and there share a few similarities\n # I found it safer to mention it here too\n def get_next(self):\n \"\"\"\n Get the next window_size samples and return them to be used in an input feed_dict (for now)\n In the future might move to a batched implementation\n :return: Return the next window_size samples\n \"\"\"\n\n data_placeholder = tf.placeholder(self.all_sliced_samples.dtype, self.all_sliced_samples.shape, name=\"data\")\n\n # Create a dataset, shuffle it and and batch it\n\n dataset = tf.data.Dataset.from_tensor_slices(data_placeholder)\n dataset = dataset.shuffle(buffer_size=4096)\n\n # If (self.batch_size, True) the last batch gets dropped if size < normal batch_size\n # Current implementation is way too reliant on fixed batch sizes so the remainder is dropped\n dataset = dataset.batch(self.batch_size, True)\n\n dataset = dataset.repeat()\n iterator = dataset.make_initializable_iterator()\n\n return iterator" ]
[ [ "numpy.asarray", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.placeholder", "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
53X/TextAttack
[ "e6a7969abc1e28a2a8a7e2ace709b78eb9dc94be" ]
[ "textattack/models/helpers/lstm_for_classification.py" ]
[ "import torch\nimport torch.nn as nn\n\nimport textattack\nfrom textattack.models.helpers import GloveEmbeddingLayer\nfrom textattack.models.helpers.utils import load_cached_state_dict\nfrom textattack.shared import utils\n\n\nclass LSTMForClassification(nn.Module):\n \"\"\" A long short-term memory neural network for text classification. \n \n We use different versions of this network to pretrain models for text \n classification.\n \"\"\"\n\n def __init__(\n self,\n hidden_size=150,\n depth=1,\n dropout=0.3,\n num_labels=2,\n max_seq_length=128,\n model_path=None,\n ):\n super().__init__()\n if depth <= 1:\n # Fix error where we ask for non-zero dropout with only 1 layer.\n # nn.module.RNN won't add dropout for the last recurrent layer,\n # so if that's all we have, this will display a warning.\n dropout = 0\n self.drop = nn.Dropout(dropout)\n self.emb_layer = GloveEmbeddingLayer()\n self.word2id = self.emb_layer.word2id\n self.encoder = nn.LSTM(\n input_size=self.emb_layer.n_d,\n hidden_size=hidden_size // 2,\n num_layers=depth,\n dropout=dropout,\n bidirectional=True,\n )\n d_out = hidden_size\n self.out = nn.Linear(d_out, num_labels)\n self.tokenizer = textattack.models.tokenizers.SpacyTokenizer(\n self.word2id, self.emb_layer.oovid, self.emb_layer.padid, max_seq_length\n )\n\n if model_path is not None:\n self.load_from_disk(model_path)\n\n def load_from_disk(self, model_path):\n self.load_state_dict(load_cached_state_dict(model_path))\n self.word_embeddings = self.emb_layer.embedding\n self.lookup_table = self.emb_layer.embedding.weight.data\n self.to(utils.device)\n self.eval()\n\n def forward(self, _input):\n # ensure RNN module weights are part of single contiguous chunk of memory\n self.encoder.flatten_parameters()\n\n emb = self.emb_layer(_input.t())\n emb = self.drop(emb)\n\n output, hidden = self.encoder(emb)\n output = torch.max(output, dim=0)[0]\n\n output = self.drop(output)\n pred = self.out(output)\n return nn.functional.softmax(pred, dim=-1)\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.max", "torch.nn.LSTM", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
asmundkk/Robotics
[ "fd801b1ff35640fee99948762de720866e88e13f" ]
[ "Exersice6/problem3.py" ]
[ "from numpy import array, pi, sqrt\nfrom modern_robotics import IKinSpace\n\n\"\"\"IKinSpace:\nComputes inverse kinematics in the space frame for an open chain robot\n\n:param Slist: The joint screw axes in the space frame when the\n manipulator is at the home position, in the format of a\n matrix with axes as the columns\n:param M: The home configuration of the end-effector\n:param T: The desired end-effector configuration Tsd\n:param thetalist0: An initial guess of joint angles that are close to\n satisfying Tsd\n:param eomg: A small positive tolerance on the end-effector orientation\n error. The returned joint angles must give an end-effector\n orientation error less than eomg\n:param ev: A small positive tolerance on the end-effector linear position\n error. The returned joint angles must give an end-effector\n position error less than ev\n:return thetalist: Joint angles that achieve T within the specified\n tolerances,\n:return success: A logical value where TRUE means that the function found\n a solution and FALSE means that it ran through the set\n number of maximum iterations without finding a solution\n within the tolerances eomg and ev.\nUses an iterative Newton-Raphson root-finding method.\nThe maximum number of iterations before the algorithm is terminated has\nbeen hardcoded in as a variable called maxiterations. It is set to 20 at\nthe start of the function, but can be changed if needed.\"\"\"\n\nev = 0.0001\neomg = 0.001\ntheat_list_0 = array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\nT = array([[0, 1, 0, -0.5],\n [0, 0, -1, 0.1],\n [-1, 0, 0, 0.1],\n [0, 0, 0, 1 ]])\nw1 = 109 / 1000\nw2 = 82 / 1000\nl1 = 425 / 1000\nl2 = 392 / 1000\nh1 = 89 / 1000\nh2 = 95 / 1000\n\n# checking if T is outside the workspace:\nif sqrt(w1**2 + w2**2 + l1**2 + l2**2 + h1**2 + h2**2) < sqrt(0.5**2 + 2 * 0.1**2):\n print(\"T is out of range of the end effector\")\n exit()\n\n\nM = array([[-1, 0, 0, l1+l2],\n [0, 0, 1, w1+w2],\n [0, 1, 0, h1-h2],\n [0, 0, 0, 1]])\n\nSlist = array([[0, 0, 1, 0, 0, 0],\n [0, 1, 0, -h1, 0, 0],\n [0, 1, 0, -h1, 0, l1],\n [0, 1, 0, -h1, 0, l1+l2],\n [0, 0, -1, -w1, l1+l2, 0],\n [0, 1, 0, h2-h1, 0, l1+l2]]).T\n\ntuple1 = IKinSpace(Slist, M, T, theat_list_0, eomg, ev)\ntheta = tuple1[0]\nfound_solution = tuple1[1]\n\nprint(\"the result in deg\")\nfor item in theta:\n print((item % 2*pi) * 180 / pi)\n\nprint(\"\\nthe result in rad\")\nfor item in theta:\n print((item % 2*pi))\n\nprint()\nprint(\"was a solution found: \", found_solution)\n" ]
[ [ "numpy.array", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidguzmanr/siamese-neural-networks
[ "f5ab87c92cddba760b8dbd2e8e33fc7a04cfb1de" ]
[ "siamese/train_lightning.py" ]
[ "import torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\nfrom torchmetrics import Accuracy\n\nfrom torchvision import transforms\nfrom torchvision.datasets import Omniglot\n\nfrom pytorch_lightning import LightningModule\nfrom pytorch_lightning.utilities.cli import LightningCLI\n\nfrom dataset.dataset_pairs import OmniglotPairs\nfrom model.network import SiameseNetwork\n\nfrom typing import Any\n\nclass SiameseModel(LightningModule):\n def __init__(\n self,\n batch_size: int = 16,\n lr: float = 1e-3,\n weight_decay: float = 1e-3,\n num_workers: int = 4\n ) -> None:\n super().__init__()\n\n self.save_hyperparameters()\n self.model = SiameseNetwork()\n self.train_acc = Accuracy()\n self.val_acc = Accuracy()\n self.test_acc = Accuracy()\n\n # Original dataset\n self.omniglot_background = Omniglot(\n root='data',\n transform=self.transform,\n background=True,\n download=True \n )\n\n self.omniglot_evaluation = Omniglot(\n root='data',\n transform=transforms.ToTensor(),\n background=False,\n download=True \n )\n\n def forward(self, x1, x2):\n return self.model(x1, x2)\n\n def training_step(self, batch, batch_idx):\n x1, x2, y = batch\n logits = self.forward(x1, x2)\n preds = torch.cat((1 - logits.sigmoid(), logits.sigmoid()), dim=1)\n loss = F.binary_cross_entropy_with_logits(logits, y.unsqueeze(dim=1).float())\n\n self.log('train_loss', loss, on_step=False, on_epoch=True)\n self.log('train_acc', self.train_acc(preds, y), on_step=False, on_epoch=True)\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n x1, x2, y = batch\n logits = self.forward(x1, x2)\n preds = torch.cat((1 - logits.sigmoid(), logits.sigmoid()), dim=1)\n loss = F.binary_cross_entropy_with_logits(logits, y.unsqueeze(dim=1).float())\n \n self.log('val_loss', loss, on_step=False, on_epoch=True)\n self.log('val_acc', self.val_acc(preds, y), on_step=False, on_epoch=True)\n\n return loss\n\n def test_step(self, batch, batch_idx):\n x1, x2, y = batch\n logits = self.forward(x1, x2)\n preds = torch.cat((1 - logits.sigmoid(), logits.sigmoid()), dim=1)\n loss = F.binary_cross_entropy_with_logits(logits, y.unsqueeze(dim=1).float())\n \n self.log('test_loss', loss, on_step=False, on_epoch=True)\n self.log('test_acc', self.test_acc(preds, y), on_step=False, on_epoch=True)\n\n return loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.model.parameters(), \n lr=self.hparams.lr, \n weight_decay=self.hparams.weight_decay\n )\n # scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)\n # return [optimizer], [scheduler]\n\n return optimizer\n\n @property\n def transform(self):\n return transforms.ToTensor()\n\n # def prepare_data(self) -> None:\n # Download the data, in case it hasn't been downloaded\n # OmniglotPairs()\n\n def train_dataloader(self):\n train_dataset = OmniglotPairs(\n dataset=self.omniglot_background,\n n_pairs=200_000\n )\n\n train_loader = DataLoader(\n train_dataset, \n batch_size=self.hparams.batch_size, \n num_workers=self.hparams.num_workers\n )\n\n return train_loader\n\n def val_dataloader(self):\n evaluation_dataset = OmniglotPairs(\n dataset=self.omniglot_evaluation,\n n_pairs=20_000\n )\n\n validation_dataset, _ = random_split(\n dataset=evaluation_dataset,\n lengths=[10_000, 10_000],\n generator=torch.Generator().manual_seed(42)\n )\n\n validation_loader = DataLoader(\n validation_dataset, \n batch_size=self.hparams.batch_size, \n num_workers=self.hparams.num_workers\n )\n\n return validation_loader\n\n def test_dataloader(self):\n evaluation_dataset = OmniglotPairs(\n dataset=self.omniglot_evaluation,\n n_pairs=20_000\n )\n\n _, test_dataset = random_split(\n dataset=evaluation_dataset,\n lengths=[10_000, 10_000],\n generator=torch.Generator().manual_seed(42)\n )\n\n test_loader = DataLoader(\n test_dataset, \n batch_size=self.hparams.batch_size, \n num_workers=self.hparams.num_workers\n )\n\n return test_loader\n\ndef cli_main():\n # The LightningCLI removes all the boilerplate associated with arguments parsing. This is purely optional.\n cli = LightningCLI(SiameseModel, seed_everything_default=42, save_config_overwrite=True, run=False)\n cli.trainer.fit(cli.model, datamodule=cli.datamodule)\n cli.trainer.test(ckpt_path='best', datamodule=cli.datamodule)\n\nif __name__ == '__main__':\n cli_main()" ]
[ [ "torch.Generator", "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wmvanvliet/posthoc
[ "a011a4219cee1e80cf77895543597438f71cd299" ]
[ "posthoc/beamformer.py" ]
[ "# encoding: utf-8\nimport numpy as np\nfrom sklearn.base import TransformerMixin, RegressorMixin\nfrom sklearn.linear_model import LinearModel\n\nfrom .cov_estimators import Empirical\n\n\nclass Beamformer(LinearModel, TransformerMixin, RegressorMixin):\n '''A beamformer filter.\n\n A beamformer filter attempts to isolate a specific signal in the data. The\n signal of interest is specified as an activation pattern.\n\n By default, a linear constrained minimum variance (LCMV) beamformer is\n used. This beamformer passes a signal conforming to the given template with\n unit gain (self.coef_ @ template == I), while minimizing overall output.\n Other types of beamformers can be constructed by using the\n `normalizer_modifier` parameter.\n\n Parameters\n ----------\n template : ndarray, shape (n_features,) | (n_signals, n_features)\n Activation template(s) of the signal(s) to extract.\n center : bool (default: True)\n Whether to remove the data mean before applying the filter.\n WARNING: only set to False if the data has been pre-centered. Applying\n the filter to un-normalized data may result in inaccuracies.\n normalize : bool (default: True)\n Whether to normalize (std. dev = 1) the data before fitting the\n beamformer. Can make the filter more robust.\n cov : instance of CovEstimator | function | None\n The method used to estimate the covariance. Can either be one of the\n predefined CovEstimator objects, or a function. If a function is used,\n it must have the signature: ``def cov_modifier(cov, X, y)``, take the\n training data as input and return a matrix that will be added to the\n emperical covariance matrix. Defaults to `None`, which means the\n default empirical estimator of the covariance matrix is used.\n normalizer_modifier : function | None\n Function that takes a normalizer (an ndarray of shape (n_targets,\n n_targets)) and modifies it. Must have the signature:\n `def normalizer_modifier(normalizer, X, y, template, coef)`\n and return the modified normalizer. Defaults to `None`, which means no\n modification of the normalizer.\n\n Attributes\n ----------\n coef_ : ndarray, shape (n_channels * n_samples, n_signals)\n The filter weights.\n '''\n def __init__(self, template, center=True, normalize=False,\n cov=None, normalizer_modifier=None,\n method='auto'):\n template = np.asarray(template)\n if template.ndim == 1:\n self.template = template[np.newaxis, :]\n else:\n self.template = template\n self.center = center\n self.fit_intercept = self.center\n self.normalize = normalize\n if cov is None:\n self.cov = Empirical()\n else:\n self.cov = cov\n self.normalizer_modifier = normalizer_modifier\n self.method = method\n\n def fit(self, X, y=None):\n \"\"\"Fit the beamformer to the data.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n The data.\n y : None\n Unused.\n \"\"\"\n n_samples, n_features = X.shape\n\n X, _, X_offset, _, X_scale = LinearModel._preprocess_data(\n X=X, y=np.zeros(n_samples),\n fit_intercept=self.center,\n normalize=self.normalize,\n copy=True\n )\n\n # Compute weights\n coef = self.cov.fit(X).inv_dot(X, self.template.T).T\n\n # The default normalizer constructs an LCMV beamformer\n normalizer = np.linalg.pinv(coef.dot(self.template.T))\n\n # Modify the normalizer with the user specified function\n if self.normalizer_modifier is not None:\n normalizer = self.normalizer_modifier(normalizer, X, None,\n self.template.T, coef)\n\n # Apply the normalizer\n self.coef_ = normalizer.dot(coef)\n\n # Undo scaling if self.normalize == True\n self._set_intercept(X_offset, 0, X_scale)\n\n return self\n\n def transform(self, X):\n \"\"\"Apply the beamformer to the data.\n\n Parameters\n ----------\n X : ndarray, shape (n_samples, n_features)\n The data.\n\n Returns\n -------\n X_trans : ndarray, shape (n_samples, n_signals)\n The transformed data.\n \"\"\"\n return self.predict(X)\n" ]
[ [ "numpy.asarray", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Goosang-Yu/pe_library
[ "a63de7a81ce359e6c9125299a0338dea6247e645" ]
[ "source_code/miniseq_lib_barcode_sorting.py" ]
[ "import Bio.SeqIO, os\nimport pandas as pd\nimport sys, time, regex\nfrom tqdm import tqdm\n\nstart = time.time()\n\ndef main():\n sAnalysis_Tag = '63_GS_PE off-target_283T_2_1rxn_220118'\n BaseDIR = r'C:\\Users\\home\\Desktop\\220128_miniseq'\n FASTQ_file = r'%s\\%s\\%s.fastq' % (BaseDIR, sAnalysis_Tag, sAnalysis_Tag.split('_')[0])\n Barcode_file = r'%s\\C_PECV6K_with_refseq_211112.csv' % BaseDIR\n OutDIR = r'%s\\%s\\output_PECV6K' % (BaseDIR, sAnalysis_Tag)\n sRE = '[T]{4}[ACGT]{16}' ## for PECV6K = 16 / for off-target = 20\n sError = 'ErrorFree'\n\n dict_brcd = make_bc_list_dictionary(Barcode_file)\n os.makedirs(OutDIR, exist_ok=True)\n\n dict_brcd_count, dict_read_type_count = find_barcode_in_NGSread(FASTQ_file, sRE, dict_brcd, sError)\n\n dict_to_csv(dict_brcd_count, OutDIR, 'bc_count_%s' % sAnalysis_Tag, 1)\n dict_to_csv(dict_read_type_count, OutDIR, 'read_count_%s' % sAnalysis_Tag)\n\n\n\ndef find_barcode_in_NGSread(FASTQ_file, sRE, dict_brcd, sError):\n fastq_info = Bio.SeqIO.parse(FASTQ_file, 'fastq')\n\n dict_sOutput = {brcd: [] for brcd in dict_brcd.keys()}\n dict_sOutput2 = {brcd: 0 for brcd in dict_brcd.keys()}\n dict_sOutput3 = {'conv': {'WT': 0, 'ED': 0, 'Other': 0},\n 'opti': {'WT': 0, 'ED': 0, 'Other': 0},\n 'Error_prone': {'WT': 0, 'ED': 0, 'Other': 0}}\n\n for sSeqData in tqdm(fastq_info, desc='Sorting from FASTQ data', ncols=100, total=len(FASTQ_file)/4):\n\n sReadID = str(sSeqData.id)\n sNGSSeq = str(sSeqData.seq)\n\n for sReIndex in regex.finditer(sRE, sNGSSeq, overlapped=True):\n nIndexStart = sReIndex.start()\n nIndexEnd = sReIndex.end()\n sBarcodeMatch = sNGSSeq[nIndexStart:nIndexEnd]\n sRefSeqCheck = sNGSSeq[:nIndexStart+24]\n sTargetSeq = sNGSSeq[nIndexEnd-2:-40]\n\n ### Skip Non-barcodes ###\n try:\n dict_refSeq = dict_brcd[sBarcodeMatch]\n except KeyError:\n continue\n #########################\n\n ## Skip error in Refseq ##\n if sError == 'ErrorFree':\n if dict_refSeq['convRef'] in sRefSeqCheck: read_type = 'conv'\n elif dict_refSeq['optiRef'] in sRefSeqCheck: read_type = 'opti'\n else: read_type = 'Error_prone'\n ##########################\n\n if dict_brcd[sBarcodeMatch]['WTSeq'] in reverse_complement(sTargetSeq): product_type = 'WT'\n elif dict_brcd[sBarcodeMatch]['EDSeq'] in reverse_complement(sTargetSeq): product_type = 'ED'\n else: product_type = 'Other'\n\n dict_sOutput2[sBarcodeMatch] += 1\n dict_sOutput3[read_type][product_type] += 1\n\n # loop END: i, sReadLine\n # loop END: sSeqData\n\n return dict_sOutput2, dict_sOutput3\n\n\ndef reverse_complement(sSeq):\n dict_sBases = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N', '.': '.', '*': '*',\n 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}\n list_sSeq = list(sSeq) # Turns the sequence in to a gigantic list\n list_sSeq = [dict_sBases[sBase] for sBase in list_sSeq]\n return ''.join(list_sSeq)[::-1]\n\n\ndef make_bc_list_dictionary(Barcode_file):\n\n df_bc_list = pd.read_csv(Barcode_file)\n df_bc_list.columns = ['Barcode', 'convRef', 'optiRef', 'WTSeq', 'EDSeq']\n dict_brcd = {}\n\n for idx in df_bc_list.index:\n data = df_bc_list.loc[idx]\n barcode = data['Barcode'].upper()\n convRef = data['convRef'].upper()\n optiRef = data['optiRef'].upper()\n WT_Seq = data['WTSeq'].upper()\n ED_Seq = data['EDSeq'].upper()\n\n dict_brcd[barcode] = {'convRef': convRef, 'optiRef': optiRef, 'WTSeq': WT_Seq, 'EDSeq': ED_Seq}\n\n return dict_brcd\n\ndef dict_to_csv(dictionary, OutDIR, Output_Tag, T=0):\n df = pd.DataFrame(dict([(key, pd.Series(val)) for key, val in dictionary.items()])).sort_index(axis=1)\n if T == 1: df.T.to_csv('%s/%s.csv' % (OutDIR, Output_Tag))\n else: df.to_csv('%s/%s.csv' % (OutDIR, Output_Tag))\n\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n main()\n else:\n function_name = sys.argv[1]\n function_parameters = sys.argv[2:]\n if function_name in locals().keys():\n locals()[function_name](*function_parameters)\n else:\n sys.exit('ERROR: function_name=%s, parameters=%s' % (function_name, function_parameters))\n # if END: len(sys.argv)\n# if END: __name__" ]
[ [ "pandas.read_csv", "pandas.Series" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
WISDEM/DriveSE
[ "3edf703897ef55106b6f2574031322438b5ad326" ]
[ "src/drivese/hubse_components.py" ]
[ "\"\"\"\nhubse_components.py\nCopyright (c) NREL. All rights reserved.\n\nThis is a modified version of hubse_components.py that models the hub and spinner as spherical (rather than\ncylindrical) shapes. It is based on Excel spreadsheets by Scott Carron.\nGNS 2019 06 17\n\"\"\"\n\nimport numpy as np\nfrom math import pi, cos, sqrt, sin, exp, radians\nimport sys\nimport warnings\nwarnings.simplefilter(\"error\")\n\nfrom drivese.drivese_utils import get_distance_hub2mb\n\n# -------------------------------------------------\n\ndef inertiaSphereShell(mass, diameter, thickness, debug=False):\n ''' Return moment of inertia of a spherical shell '''\n radius = 0.5 * diameter\n insideRadius = radius - thickness\n try:\n dr5 = radius ** 5 - insideRadius ** 5\n dr3 = radius ** 3 - insideRadius ** 3\n I = 0.4 * mass \\\n * (radius ** 5 - insideRadius ** 5) \\\n / (radius ** 3 - insideRadius ** 3)\n except RuntimeWarning:\n sys.stderr.write('\\n*** inertiaSphereShell: ERROR mass {:.1f} Rad {:.4f} IRad {:.4f} Thick {:.4f}\\n\\n'.format(mass, \n radius, insideRadius, thickness))\n I = 0\n \n if debug:\n sys.stderr.write('iSphShell: mass {:.1f} kg diam {:.1f} m thick {:.2f} m\\n'.format(mass, diameter, thickness))\n sys.stderr.write('iSphShell: I {:.2f} kg-m2\\n'.format(I))\n return np.array([I, I, I])\n \n# -------------------------------------------------\n\nclass Hub_System_Adder(object):\n ''' \n Compute hub mass, cm, and I\n '''\n\n def __init__(self, blade_number, debug=False):\n\n super(Hub_System_Adder, self).__init__()\n self.mass_adder = Hub_Mass_Adder(blade_number, debug=debug)\n self.cm_adder = Hub_CM_Adder()\n \n self.debug = debug\n\n def compute(self, rotor_diameter, blade_mass, distance_hub2mb, shaft_angle, MB1_location, hub_mass, hub_diameter, hub_thickness, pitch_system_mass, spinner_mass):\n\n (self.rotor_mass, self.hub_system_mass, self.hub_system_I, self.hub_I) = self.mass_adder.compute(blade_mass, hub_mass, hub_diameter,\n hub_thickness, pitch_system_mass, spinner_mass)\n self.hub_system_cm = self.cm_adder.compute(rotor_diameter, distance_hub2mb, shaft_angle, MB1_location)\n\n return(self.rotor_mass, self.hub_system_mass, self.hub_system_cm, self.hub_system_I, self.hub_I)\n\n# -------------------------------------------------\n\nclass Hub_Mass_Adder(object):\n ''' \n Compute hub mass and I\n Excluding cm here, because it has a dependency on main bearing location, which can only be calculated once the full rotor mass is set\n '''\n\n def __init__(self, blade_number, debug=False):\n\n super(Hub_Mass_Adder, self).__init__()\n self.blade_number = blade_number\n \n self.debug = debug\n\n def compute(self, blade_mass, hub_mass, hub_diameter, hub_thickness, pitch_system_mass, spinner_mass):\n\n # variables\n self.blade_mass = blade_mass #Float(iotype='in', units='kg', desc='mass of one blade')\n self.hub_mass = hub_mass #Float(iotype='in', units='kg',desc='mass of Hub')\n self.hub_diameter = hub_diameter #Float(3.0,iotype='in', units='m', desc='hub diameter')\n self.hub_thickness = hub_thickness #Float(iotype='in', units='m', desc='hub thickness')\n self.pitch_system_mass = pitch_system_mass #Float(iotype='in', units='kg',desc='mass of Pitch System')\n self.spinner_mass = spinner_mass #Float(iotype='in', units='kg',desc='mass of spinner')\n \n # outputs\n self.hub_system_I = np.zeros(3) #Array(iotype='out', desc='mass moments of Inertia of hub [Ixx, Iyy, Izz, Ixy, Ixz, Iyz] around its center of mass in yaw-aligned c.s.')\n self.hub_system_mass = 0.0 #Float(iotype='out', units='kg',desc='mass of hub system')\n self.rotor_mass = 0.0\n\n self.hub_system_mass = self.hub_mass + self.pitch_system_mass + self.spinner_mass\n self.rotor_mass = self.hub_system_mass + self.blade_number*self.blade_mass\n\n #add I definitions here\n hub_I = np.zeros(3)\n\n hub_rad = 0.5 * self.hub_diameter\n cav_rad = hub_rad - self.hub_thickness\n t5 = (hub_rad**5 - cav_rad**5)\n t3 = (hub_rad**3 - cav_rad**3)\n if self.debug:\n sys.stderr.write('SphHMA::compute(): Thick {:.3f} M Diam {:.2f} m H {:.3f} C {:.3f} T5 {:.3f} T3 {:.3f}\\n'.format(\n hub_thickness, hub_diameter, hub_rad, cav_rad, t5, t3))\n \n hub_I = inertiaSphereShell(self.hub_mass, self.hub_diameter, self.hub_thickness, debug=self.debug)\n '''\n hub_I[0] = 0.4 * self.hub_mass \\\n * ((self.hub_diameter / 2) ** 5 - (self.hub_diameter / 2 - self.hub_thickness) ** 5) \\\n / ((self.hub_diameter / 2) ** 3 - (self.hub_diameter / 2 - self.hub_thickness) ** 3)\n hub_I[1] = hub_I[0]\n hub_I[2] = hub_I[1]\n '''\n \n pitch_system_I = np.zeros(3)\n pitch_system_I[0] = self.pitch_system_mass * (self.hub_diameter ** 2) / 4\n pitch_system_I[1] = pitch_system_I[0]\n pitch_system_I[2] = pitch_system_I[1]\n\n if self.hub_diameter == 0:\n spinner_diameter = 3.30\n else:\n spinner_diameter = self.hub_diameter\n spinner_thickness = spinner_diameter * (0.055 / 3.30) # 0.055 for 1.5 MW outer diameter of 3.3 - using proportional constant\n\n spinner_I = inertiaSphereShell(self.spinner_mass, spinner_diameter, spinner_thickness, debug=self.debug)\n '''\n spinner_I = np.zeros(3)\n spinner_I[0] = 0.4 * self.spinner_mass \\\n * ((spinner_diameter / 2) ** 5 - (spinner_diameter / 2 - spinner_thickness) ** 5) \\\n / ((spinner_diameter / 2) ** 3 - (spinner_diameter / 2 - spinner_thickness) ** 3)\n spinner_I[1] = spinner_I[0]\n spinner_I[2] = spinner_I[1]\n '''\n \n #add moments of inertia\n #I = np.zeros(3)\n #for i in (range(0,3)): # calculating MOI, at nacelle center of gravity with origin at tower top center / yaw mass center, ignoring masses of non-drivetrain components / auxiliary systems\n # calculate moments around CM\n # sum moments around each components CM\n #I[i] = hub_I[i] + pitch_system_I[i] + spinner_I[i]\n self.hub_system_I = np.r_[hub_I + pitch_system_I + spinner_I, np.zeros(3)]\n \n if self.debug:\n sys.stderr.write('SphHMA: hub_system_mass {:8.1f} kg\\n'.format(self.hub_system_mass))\n sys.stderr.write(' hub_mass {:8.1f} kg\\n'.format(self.hub_mass))\n sys.stderr.write(' pitch_system_mass {:8.1f} kg\\n'.format(self.pitch_system_mass))\n sys.stderr.write(' spinner_mass {:8.1f} kg\\n'.format(self.spinner_mass))\n sys.stderr.write(' blade_mass {:8.1f} kg = {} * {:.1f} kg\\n'.format(self.blade_number*self.blade_mass, \n self.blade_number, self.blade_mass))\n sys.stderr.write(' rotor_mass {:8.1f} kg\\n'.format(self.rotor_mass))\n \n #for i in range(3):\n # sys.stderr.write('Inertia {} H {:.2f} {:.2f} S {:.2f} {:.2f}\\n'.format(i, hub_I[i], hI[i], spinner_I[i], sI[i]))\n\n return(self.rotor_mass, self.hub_system_mass, self.hub_system_I, hub_I)\n\n# -------------------------------------------------\n\n\nclass Hub_CM_Adder(object):\n ''' \n Compute hub cm\n Separating cm here, because it has a dependency on main bearing location, which can only be calculated once the full rotor mass is set\n '''\n\n def __init__(self):\n\n super(Hub_CM_Adder, self).__init__()\n\n def compute(self, rotor_diameter, distance_hub2mb, shaft_angle, MB1_location):\n\n # variables\n self.rotor_diameter = rotor_diameter #Float(iotype='in', units='m', desc='rotor diameter')\n self.distance_hub2mb = distance_hub2mb #Float(0.0,iotype='in', units = 'm', desc = 'distance between hub center and upwind main bearing')\n self.shaft_angle = shaft_angle #Float(iotype = 'in', units = 'deg', desc = 'shaft angle')\n self.MB1_location = MB1_location #Array(iotype = 'in', units = 'm', desc = 'center of mass of main bearing in [x,y,z] for an arbitrary coordinate system')\n \n # outputs\n self.hub_system_cm = np.zeros(3) #Array(iotype='out', units='m',desc='center of mass of the hub relative to tower to in yaw-aligned c.s.')\n \n if self.distance_hub2mb > 0:\n distance_hub2mb = self.distance_hub2mb\n else:\n distance_hub2mb = get_distance_hub2mb(self.rotor_diameter)\n\n cm = np.zeros(3)\n cm[0] = self.MB1_location[0] - distance_hub2mb\n cm[1] = 0.0\n cm[2] = self.MB1_location[2] + distance_hub2mb*sin(self.shaft_angle)\n self.hub_system_cm = (cm)\n\n return(self.hub_system_cm)\n\n#%% -------------------------------------------------\n\nclass Hub(object):\n ''' Sph_Hub class \n The Sph_Hub class is used to represent the hub component of a wind turbine. \n It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.\n It contains an update method to determine the mass, mass properties, and dimensions of the component. \n\n 2019 04 24 - GNS\n Conversion from kW to MW actually coverted to W - proper factor of 1e-3 is now used \n '''\n\n def __init__(self, blade_number, debug=False):\n\n super(Hub, self).__init__()\n \n self.blade_number = blade_number\n self.debug = debug\n self.main_flange_thick = None\n \n def compute(self, blade_root_diameter, rotor_rpm, blade_mass, rotor_diameter, blade_length): \n \n if self.blade_number != 3:\n sys.stderr.write('\\n***ERROR: spherical_hub only works with 3-bladed hubs\\n\\n')\n return None, None, None\n \n if self.debug:\n sys.stderr.write('Hub: INPUTS BRD {:.1f} m RPM {:.1f} BMass {:.1f} RDiam {:.1f} m BLen {:.1f} m\\n'.format(blade_root_diameter, \\\n rotor_rpm, blade_mass, rotor_diameter, blade_length))\n \n # Parameters / 'constants'\n \n HUB_CIRC_INCR_PCT = 20 # % Initial Spherical Hub Circumference Increase Factor (Percentage) \n ROTOR_SHUTDOWN_TIME = 1 # sec Rotor Shutdown Time \n FINAL_ROTOR_RADPS = 0 # rad/s Final Rotor Speed \n YIELD_STRENGTH_CAST = 200 # Mpa Yield Strength of Hub Casting \n RESERVE_FACTOR = 2 # Reserve Factor\n STRESS_CON_FACTOR = 2.5 # Stress Concentration Factor\n HUB_DENS = 7200 # kg/m3 Density of Hub \n FLANGE_THICK_FACTOR = 4 # Ratio of flange thickness to shell thickness\n \n ''' Can we use HUB_DENS for densities of spherical cap and main flange too? '''\n \n rotor_radps = rotor_rpm * 2 * pi / 60 # rad/s Power Production rad/s \n ang_accel = (rotor_radps-FINAL_ROTOR_RADPS) / (ROTOR_SHUTDOWN_TIME-0) # rad/s2 Angular Acceleration \n \n # Hub Design Allowable and Material Properties \n stress_allow = YIELD_STRENGTH_CAST / (STRESS_CON_FACTOR * RESERVE_FACTOR) # Mpa (N/mm2) Stress Allowable \n stress_allow_pa = stress_allow * 1000000 # N/m2 \n \n # Hub Geometry \n init_hub_diam = blade_root_diameter / (sin(radians(120/2))) # m Initial Spherical Hub Diameter determined by a Circle enclosing an Equilateral Triangle with Length equal to Blade Diameter\n init_hub_circ = pi * init_hub_diam # m Initial Spherical Hub Circumference of Cross-Section \n dsgn_hub_circ = init_hub_circ * (1+(HUB_CIRC_INCR_PCT/100)) # m Design Spherical Hub Circumference \n dsgn_hub_diam = dsgn_hub_circ / pi # m Design Spherical Hub Diameter (OD) \n \n # Hub Design Load \n blade_cm = ((rotor_diameter/2) - blade_length) + (blade_length/3) # m Blade Center of Mass (from Hub Rotational Axis)\n blade_mmi_edge = blade_mass * blade_cm**2 # kgm2 Mass Moment of Inertia (mr2) - Edgewise \n blade_torque = blade_mmi_edge * ang_accel # Nm Torque from Blade \n hub_torque = blade_torque * self.blade_number # Nm Torque on Hub (Total)\n \n # Hub Mass Calculations \n sph_hub_shell_thick = ((((dsgn_hub_diam**4) - ((32/ pi)*(hub_torque*dsgn_hub_diam/2/stress_allow_pa)))**(1/4)) - dsgn_hub_diam) / (-2) # m Spherical Hub Shell Thickness \n sph_hub_shell_thick_mm = sph_hub_shell_thick * 1000 # mm \n sph_hub_vol = (4/3) * pi * ((dsgn_hub_diam/2)**3 - ((dsgn_hub_diam-2*sph_hub_shell_thick)/2)**3) # m3 Spherical Hub Volume \n sph_hub_mass = sph_hub_vol * HUB_DENS # kg Spherical Hub Mass \n \n sph_cap_area = 2 * pi * (dsgn_hub_diam/2) \\\n * ((dsgn_hub_diam/2) - sqrt((dsgn_hub_diam/2)**2 - (blade_root_diameter/2)**2)) # m2 Spherical Cap Area (1 blade root cutout) \n sph_cap_vol = sph_cap_area * sph_hub_shell_thick # m3 Spherical Cap Volume (1 blade root cutout)\n sph_cap_vol_tot = self.blade_number * sph_cap_vol # m3 Spherical Cap Volume (3 blade root cutouts)\n sph_cap_mass = sph_cap_vol_tot * HUB_DENS # kg Spherical Cap Mass (3 blade root cutouts) \n \n #main_flange_OD = 0.6 * dsgn_hub_diam # m CALCULATED / ASSUMPTION IN HUBSE Main Flange OD\n #main_flange_ID = main_flange_OD - (2*(dsgn_hub_diam/10)) # m CALCULATED / ASSUMPTION IN HUBSE Main Flange ID \n #main_flange_thick = 5 * sph_hub_shell_thick # m CALCULATED / ASSUMPTION IN HUBSE Main Flange Thickness \n # Rev02 changes constant terms in 3 lines above - 2019 07 08\n main_flange_OD = 0.5 * dsgn_hub_diam # m CALCULATED / ASSUMPTION IN HUBSE Main Flange OD\n main_flange_ID = main_flange_OD - (2*(dsgn_hub_diam/20)) # m CALCULATED / ASSUMPTION IN HUBSE Main Flange ID \n main_flange_thick = FLANGE_THICK_FACTOR * sph_hub_shell_thick # m CALCULATED / ASSUMPTION IN HUBSE Main Flange Thickness \n main_flange_vol = pi * main_flange_thick * ((main_flange_OD/2)**2 - (main_flange_ID/2)**2) # m3 Main Flange Volume \n main_flange_mass = main_flange_vol * HUB_DENS # kg Mass of Main Flange\n \n hub_mass = main_flange_mass + sph_hub_mass # kg Total Hub Mass \n \n # Hub Centroid Calculations \n main_flange_cm = main_flange_thick / 2 # m Center of Mass (Main Flange) \n mmf = main_flange_mass # kg Mass (Main Flange) \n sphere_cm = dsgn_hub_diam / 2 # m Center of Mass (Sphere) \n msph = sph_hub_mass # kg Mass (Sphere) \n if (mmf + msph) < 0.01:\n \tsys.stderr.write('\\n*** Hub::compute() ERROR: mmf {:.2f} msph {:.2f}\\n\\n'.format(mmf, msph))\n \thub_cm = 0.0\n else:\n hub_cm = (mmf*main_flange_cm + msph*sphere_cm) / (mmf + msph) # m Hub Center of Mass \n \n # Hub Mass Calculations \n cost_cast_iron = 3 # USD/kg Casting House Costs for Cast Iron \n hub_cost = hub_mass * cost_cast_iron # USD Hub Cost\n \n # Save some values\n self.main_flange_thick = main_flange_thick\n \n if self.debug:\n sys.stderr.write('Sph_Hub: mass {:.1f} kg Diam {:.1f} m CM {:.2f} m COST ${:.2f} ShellThick {:.3f} FlangeThick {:.3f}\\n'.format(hub_mass,\n dsgn_hub_diam, hub_cm, hub_cost, sph_hub_shell_thick, main_flange_thick))\n\n return hub_mass, dsgn_hub_diam, hub_cm, hub_cost, sph_hub_shell_thick\n\n#-------------------------------------------------------------------------\n\nclass PitchSystem(object):\n '''\n PitchSystem class\n The PitchSystem class is used to represent the pitch system of a wind turbine.\n It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.\n It contains an update method to determine the mass, mass properties, and dimensions of the component.\n '''\n\n def __init__(self, blade_number, debug=False):\n\n super(PitchSystem, self).__init__()\n \n self.blade_number = blade_number\n \n self.debug = debug\n\n def compute(self, blade_mass, rotor_bending_moment_y):\n\n # variables\n self.blade_mass = blade_mass #Float(iotype='in', units='kg', desc='mass of one blade')\n self.rotor_bending_moment_y = rotor_bending_moment_y #Float(iotype='in', units='N*m', desc='flapwise bending moment at blade root')\n \n # parameters\n #blade_number = Int(3, iotype='in', desc='number of turbine blades')\n \n # outputs\n self.mass = 0.0 #Float(0.0, iotype='out', units='kg', desc='overall component mass')\n\n # -------- Sunderland method for calculating pitch system masses --------\n pitchmatldensity = 7860.0 # density of pitch system material (kg / m^3) - assuming BS1503-622 (same material as LSS)\n pitchmatlstress = 371000000.0 # allowable stress of hub material (N / m^2)\n\n hubpitchFact = 1.0 # default factor is 1.0 (0.54 for modern designs)\n self.mass = hubpitchFact * (0.22 * self.blade_mass * self.blade_number \\\n + 12.6 * np.abs(self.rotor_bending_moment_y) * (pitchmatldensity / pitchmatlstress))\n #+ 12.6 * self.rotor_bending_moment_y * (pitchmatldensity / pitchmatlstress))\n # mass of pitch system based on Sunderland model\n # 2019 04 29 - mass is probably a function of abs(rotor_moment_y) - without abs, we can get negative masses\n # -------- End Sunderland method --------\n \n if self.debug:\n sys.stderr.write('PitchSystem IN : blade mass {:.1f} kg rbmy {:.1f} Nm\\n'.format(blade_mass, self.rotor_bending_moment_y))\n sys.stderr.write('PitchSystem OUT: mass {:.1f} kg\\n'.format(self.mass))\n \n return(self.mass)\n\n#-------------------------------------------------------------------------\n\nclass Spinner(object):\n '''\n Sph_Spinner class\n The Sph_SpinnerClass is used to represent the spinner of a wind turbine.\n It contains the general properties for a wind turbine component as well as additional design load and dimensional attributes as listed below.\n It contains an update method to determine the mass, mass properties, and dimensions of the component.\n '''\n\n def __init__(self, blade_number, debug=False):\n super(Spinner, self).__init__()\n \n self.blade_number = blade_number\n self.debug = debug\n\n def computeOLD(self, blade_root_diameter):\n\n if self.blade_number != 3:\n sys.stderr.write('\\n***ERROR: spherical_spinner only works with 3-bladed hubs\\n\\n')\n return None, None, None\n \n # Parameters / 'constants'\n HUB_CIRC_INCR_PCT = 20 # % C12 Initial Spherical Hub Circumference Increase Factor (Percentage) \n OSHA_CLEARANCE = 0.6 # m C15 OSHA Clearance between Spinner and Hub \n COMP_SHELL_THICK = 0.007 # m C17 Spinner Composite Shell Thickness \n SPIN_HOLE_INCR_PCT = 20 # % C19 Spinner Blade Access Hole Increase Factor \n COMP_DENS = 2100 # kg/m3 C28 Density of Composite (GFRP) \n #STEEL_DENS = 7850 # kg/m3 C29 Density of Steel (S355) \n RATIO_COMP_STEEL = 2.0 # C47 Composite to Steel Mass Ratio \n COST_COMPOSITE = 7.0 # USD/kg C56 Composite Spinner Shell Cost \n COST_SMALL_STEEL = 3.0 # USD/kg C57 Small Steel Part Hardware Costs \n \n # Spinner Geometry \n init_hub_diam = blade_root_diameter / (sin(radians(120/2))) # m C10 CALCHUB Initial Spherical Hub Diameter determined by a Circle enclosing an Equilateral Triangle with Length equal to Blade Diameter \n init_hub_circ = pi * init_hub_diam # m C11 CALCHUB Initial Spherical Hub Circumference of Cross-Section \n dsgn_hub_circ = init_hub_circ * (1+(HUB_CIRC_INCR_PCT/100)) # m C13 CALCHUB Design Spherical Hub Circumference \n dsgn_hub_diam = dsgn_hub_circ / pi # m C14 CALCHUB Design Spherical Hub Diameter (OD) \n sph_spin_diam = dsgn_hub_diam + (2 * OSHA_CLEARANCE) # m C16 CALCSPN Spherical Spinner Diameter (OD) \n \n spin_acc_hole_diam = blade_root_diameter * ((100+SPIN_HOLE_INCR_PCT)/100) # m C20 CALCSPN Spinner Blade Access Hole Diameter \n \n \n # Spinner Design Load \n #C24 BASSSPN ULS Load Case (Driving Load Case) Aero/OSHA \n \n # Spinner Design Allowable and Material Properties \n #C27 BASSSPN Stress Allowable(s) N/A Mpa (N/mm2)\n \n # Spinner Mass Calculations \n comp_shell_vol = (4/3)*pi*((sph_spin_diam/2)**3-((sph_spin_diam-2*COMP_SHELL_THICK)/2)**3) # m C32 CALCSPN Spherical Spinner Composite Shell Volume 3\n comp_shell_mass = comp_shell_vol * COMP_DENS # kg C33 CALCSPN Spherical Spinner Composite Shell Mass \n \n sph_cap_area = 2 * pi * (sph_spin_diam/2) \\\n * ((sph_spin_diam/2) - sqrt((sph_spin_diam/2)**2 - (spin_acc_hole_diam/2)**2)) # m2 C35 CALCSPN Spherical Cap Area (1 blade root cutout) \n sph_cap_vol = sph_cap_area * COMP_SHELL_THICK # m3 C36 CALCSPN Spherical Cap Volume (1 blade root cutout) \n sph_cap_tot_vol = self.blade_number * sph_cap_vol # m3 C37 CALCSPN Spherical Cap Volume (3 blade root cutouts) \n sph_cap_mass = sph_cap_tot_vol * COMP_DENS # kg C38 CALCSPN Spherical Cap Mass (3 blade root cutouts) \n \n main_flange_od = 0.6 * dsgn_hub_diam # m C40 CASSHUB Main Flange OD \n main_flange_cap_area = 2 * pi * (sph_spin_diam/2) \\\n * ((sph_spin_diam/2) - sqrt((sph_spin_diam/2)**2 - (main_flange_od/2)**2)) # m2 C41 CALCSPN Main Flange Spherical Cap Area \n main_flange_cap_vol = main_flange_cap_area * COMP_SHELL_THICK # m3 C42 CALCSPN Main Flange Spherical Cap Volume \n main_flange_cap_mass = main_flange_cap_vol * COMP_DENS # kg C43 CALCSPN Main Flange Spherical Cap Mass \n \n tot_composite_mass = comp_shell_mass - sph_cap_mass - main_flange_cap_mass # kg C45 CALCSPN Total Composite Spinner Shell Mass \n \n tot_steel_mass = tot_composite_mass / RATIO_COMP_STEEL # kg C48 CALCSPN Total Steel Mass \n \n tot_spinner_mass = tot_composite_mass + tot_steel_mass # kg C50 CALCSPN Total Spinner Mass \n \n # Spinner Centroid Calculations \n spin_cm = sph_spin_diam / 2 # m C53 CALCSPN Spinner Center of Mass (Spherical Shell and Front and Rear Steel Hardware) \n \n # Spinner Inertia Calculations\n spin_I = [0.0, 0.0, 0.0]\n \n # Spinner Cost Calculations \n spinner_cost = (COST_COMPOSITE * tot_composite_mass) + (COST_SMALL_STEEL * tot_steel_mass) # USD C58 CALCSPN Total Spinner Cost \n \n if self.debug:\n sys.stderr.write('Sph_Spinner: mass {:.1f} kg = Steel {:.1f} kg + Composite {:.1f} kg\\n'.format(tot_spinner_mass,\n tot_steel_mass, tot_composite_mass))\n sys.stderr.write('Sph_Spinner: size IHD {:.1f} m DHD {:.1f} m SAHD {:.1f} m\\n'.format(init_hub_diam,\n dsgn_hub_diam, spin_acc_hole_diam))\n\n return tot_spinner_mass, spin_cm, spinner_cost\n\n def compute(self, blade_root_diameter):\n ''' This version of compute implements the REV02 rewrite of the spinner that Scott Caron delivered on 2019 07 07 '''\n \n if self.blade_number != 3:\n sys.stderr.write('\\n***ERROR: spherical_spinner only works with 3-bladed hubs\\n\\n')\n return None, None, None\n \n # Parameters / 'constants'\n OSHA_CLEARANCE = 0.5 # m C17 Clearance between Spinner and Hub \n SPIN_HOLE_INCR_PCT = 20 # % C22 Spinner Blade Access Hole Increase Diameter Factor \n N_FRONT_BRACKETS = 3 # C24 Number of Front Spinner Brackets \n N_REAR_BRACKETS = 3 # C25 Number of Rear Spinner Brackets \n EXTR_GUST = 70 # m/s C29 Extreme Gust Velocity \n EXTR_GUST_LOAD_FACTOR = 1.5 # C31 Extreme Gust Load Factor \n COMP_TENSILE_STRENGTH = 60 # Mpa (N/mm2) C41 Composite Shell Tensile Strength \n COMP_RESERVE_FACTOR = 1.5 # C42 Composite Shell Reserve Factor \n COMP_DENSITY = 1600 # kg/m3 C44 Density of Composite Shell \n S235_YIELD_STRENGTH = 235 # Mpa (N/mm2) C46 S235 Yield Strength (Base) \n S235_YIELD_STRENGTH_THICK = 225 # Mpa (N/mm2) C47 S235 Yield Strength (t>16mm) \n S235_RESERVE_FACTOR = 1.5 # C48 S235 Reserve Factor \n S235_DENSITY = 7850 # kg/m3 C50 Density of Steel (S355) \n HUB_CIRC_INCR_PCT = 20 # % C13 Initial Spherical Hub Circumference Increase Factor (Percentage) \n SPIN_SHELL_COMP_COST = 7.00 # USD/kg C92 Composite Spinner Shell Cost \n SMALL_STEEL_COST = 3.00 # USD/kg C93 Small Steel Part Hardware Costs \n \n # set specific material properties\n \n steel_density = S235_DENSITY\n steel_yield_strength = S235_YIELD_STRENGTH\n steel_yield_strength_thick = S235_YIELD_STRENGTH_THICK\n steel_reserve_factor = S235_RESERVE_FACTOR\n \n init_hub_diam = blade_root_diameter / (sin(radians(120/2))) # m C11 CALCULATED IN HUBSE Initial Spherical Hub Diameter \n init_hub_circ = pi * init_hub_diam # m C12 CALCULATED IN HUBSE Initial Spherical Hub Circumference of Cross-Section \n dsgn_hub_circ = init_hub_circ * (1+(HUB_CIRC_INCR_PCT/100)) # m C14 CALCULATED IN HUBSE Design Spherical Hub Circumference \n dsgn_hub_diam = dsgn_hub_circ / pi # m C15 CALCULATED IN HUBSE Design Spherical Hub Diameter (OD) \n \n sph_spin_diam = dsgn_hub_diam + (2*OSHA_CLEARANCE) # m C18 CALC Spherical Spinner Diameter (OD) \n sph_spin_rad = 0.5 * sph_spin_diam \n sph_spin_circ = pi * sph_spin_diam # m C19 CALC Spherical Spinner Circumference \n spin_panel_width = (sph_spin_circ - dsgn_hub_circ) / 3 # m C20 CALC Spinner Panel Width between Blade Cutouts \n \n spin_acc_hole_diam = blade_root_diameter * ((100+SPIN_HOLE_INCR_PCT)/100) # m C23 CALC Spinner Blade Access Hole Diameter \n \n extr_gust_pressure = 0.5 * 1.225 * (EXTR_GUST ** 2) # N/m2 C30 CALC Extreme Gust Pressure \n extr_gust_dsgn_pressure = extr_gust_pressure * EXTR_GUST_LOAD_FACTOR # N/m2 C32 CALC Extreme Gust Design Pressure \n \n allow_tensile_strength = COMP_TENSILE_STRENGTH / COMP_RESERVE_FACTOR # Mpa (N/mm2) C43 CALC Composite Shell Design Allowable Tensile Strength \n allow_yield_strength = steel_yield_strength_thick / steel_reserve_factor # Mpa (N/mm2) C49 CALC S235 Design Allowable Yield Strength \n \n flat_plate_length = sph_spin_diam # m C54 CALC Flat plate length (a) \n flat_plate_width = spin_panel_width # m C55 CALC Flat Plate width (b) \n spin_shell_thickness = sqrt((0.75 * extr_gust_dsgn_pressure * flat_plate_width ** 2) / ((allow_tensile_strength*1000000)*(1.61*(flat_plate_width/flat_plate_length) ** 3 + 1))) # m C56 CALC Spinner shell Thickness \n spin_shell_volume = (4/3) *pi * (sph_spin_rad ** 3 - ((sph_spin_diam - 2*spin_shell_thickness)/2) ** 3) # m3 C57 CALC Spherical Spinner Composite Shell Volume \n spin_shell_mass = spin_shell_volume * COMP_DENSITY # kg C58 CALC Spherical Spinner Composite Shell Mass \n \n sph_cap_area = 2 *pi * sph_spin_rad * (sph_spin_rad - sqrt(sph_spin_rad ** 2 - (spin_acc_hole_diam/2) ** 2)) # m2 C60 CALC Spherical Cap Area (1 blade root cutout) \n sph_cap_volume = sph_cap_area * spin_shell_thickness # m3 C61 CALC Spherical Cap Volume (1 blade root cutout) \n sph_cap_volume = 3 * sph_cap_volume # m3 C62 CALC Spherical Cap Volume (3 blade root cutouts) \n sph_cap_mass = sph_cap_volume * COMP_DENSITY # kg C63 CALC Spherical Cap Mass (3 blade root cutouts) \n \n main_flange_diam = 0.6 * dsgn_hub_diam # m C65 CALCULATED / ASSUMPTION IN HUBSE Main Flange OD \n main_flange_area = 2 * pi * sph_spin_rad * (sph_spin_rad - sqrt(sph_spin_rad ** 2 - (main_flange_diam/2) ** 2)) # m2 C66 CALC Main Flange Spherical Cap Area \n main_flange_volume = main_flange_area * spin_shell_thickness # m3 C67 CALC Main Flange Spherical Cap Volume \n main_flange_mass = main_flange_volume * COMP_DENSITY # kg C68 CALC Main Flange Spherical Cap Mass \n spin_shell_mass = spin_shell_mass - sph_cap_mass - main_flange_mass # kg C70 CALC Total Composite Spinner Shell Mass \n \n #C73 BASE Description in Notes \n spin_frontal_area = pi * (sph_spin_diam ** 2)/4 # m2 C74 CALC Spinner Frontal Area \n frontal_gust_load = spin_frontal_area * extr_gust_dsgn_pressure # N C75 CALC Extreme Gust Load on Frontal Area \n bracket_load = frontal_gust_load / (N_FRONT_BRACKETS + N_REAR_BRACKETS) # N C76 CALC Load on Single Bracket \n bracket_bending_moment = bracket_load * OSHA_CLEARANCE # Nm C77 CALC Bending Moment on Bracket \n bracket_width = spin_panel_width / 2 # m C78 CALC Steel bracket width (b) \n bracket_length = OSHA_CLEARANCE # m C79 CALC Steel Bracket Length \n bracket_thickness = sqrt((6 * bracket_bending_moment) / (bracket_width * allow_yield_strength * 1000000)) # m C80 CALC Steel bracket thickness \n bracket_flange_length = bracket_length * 0.25 # m C81 CALC Steel Bracket attachment flange Length \n bracket_volume = (bracket_length + bracket_flange_length + bracket_flange_length) * bracket_width * bracket_thickness # m3 C82 CALC Steel Bracket Volume \n bracket_mass = bracket_volume * steel_density # kg C83 CALC Steel Bracket Mass (Individual Bracket) \n bracket_mass_total = bracket_mass * (N_FRONT_BRACKETS + N_REAR_BRACKETS) # kg C84 CALC Steel Bracket Mass (Total) \n \n spinner_mass = spin_shell_mass + bracket_mass_total # kg C86 CALC Total Spinner Mass (Composite Shell plus Steel Hardware) \n spinner_cm = sph_spin_diam / 2 # m C89 CALC Spinner Center of Mass (Sph Shell and Front/Rear Steel Hardware) \n spinner_cost = (spin_shell_mass * SPIN_SHELL_COMP_COST) + (bracket_mass_total * SMALL_STEEL_COST) # USD C94 CALC Total Spinner Cost \n\n if self.debug:\n sys.stderr.write('Sph_Spinner: mass {:.1f} kg = Shell {:.1f} kg + Bracket {:.1f} kg\\n'.format(spinner_mass,\n spin_shell_mass, bracket_mass_total))\n sys.stderr.write('Sph_Spinner: size IHD {:.1f} m DHD {:.1f} m SAHD {:.1f} m\\n'.format(init_hub_diam,\n dsgn_hub_diam, spin_acc_hole_diam))\n sys.stderr.write('Sph_Spinner: cost ${:.2f} CM {:.2f} m\\n'.format(spinner_cost, spinner_cm))\n\n return spinner_mass, spinner_cm, spinner_cost\n\n#%%---------------------------------\n \nif __name__ == \"__main__\":\n\n # TODO: raw python hub component examples\n \n blade_root_diameter = 4.0\n blade_mass = 17000\n rotor_bending_moment_y = 0\n rotor_rpm = 12.1\n rotor_diameter = 126.0\n blade_length = 61.0\n \n if False: # BAR params\n blade_root_diameter = 4.5\n blade_mass = 60800\n rotor_bending_moment_y = 0\n rotor_rpm = 7.9\n rotor_diameter = 206.0\n blade_length = 100.0\n \n spin = Spinner(blade_number=3, debug=True)\n tot_spinner_mass, spin_cm, spinner_cost = spin.compute(blade_root_diameter)\n \n pitch = PitchSystem(blade_number=3, debug=True)\n ps_mass = pitch.compute(blade_mass, rotor_bending_moment_y)\n \n hub = Hub(blade_number=3, debug=True)\n hub_mass, dsgn_hub_diam, hub_cm, hub_cost, sph_hub_shell_thick = hub.compute(blade_root_diameter, \n rotor_rpm, blade_mass, rotor_diameter, blade_length)\n \n pass\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aditya02acharya/William-s-Visual-Search
[ "b2ad7c637a0d61e5023d71463b690edfadbd99c3" ]
[ "ExperienceBuffer.py" ]
[ "import numpy as np\nimport random\nfrom GlobalConstants import TUPLE_SIZE\n\n\nclass ExperienceBuffer(object):\n\n def __init__(self, buffer_size=100000):\n self.buffer = []\n self.buffer_size = buffer_size\n\n def add(self, experience):\n\n if len(self.buffer) >= self.buffer_size:\n self.buffer[random.randint(0, self.buffer_size-1)] = experience\n else:\n self.buffer.append(experience)\n\n def sample(self, batch_size, trace_length):\n\n sampled_episodes = random.sample(self.buffer, batch_size)\n sampled_traces = []\n for episode in sampled_episodes:\n point = np.random.randint(0, len(episode)+1-trace_length)\n sampled_traces.append(episode[point:point+trace_length])\n\n sampled_traces = np.array(sampled_traces)\n\n return np.reshape(sampled_traces, [batch_size*trace_length,TUPLE_SIZE])" ]
[ [ "numpy.reshape", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yhung119/show-and-tell-image-captioning
[ "6eace30dc30e3bc4bd384790b1eaf1f94d890288" ]
[ "datasets/flickr8k.py" ]
[ "import os\r\nimport torch\r\nimport numpy as np\r\nfrom torch.utils.data import Dataset\r\nimport torchvision\r\nfrom torchvision import transforms\r\nfrom PIL import Image\r\nfrom pycocotools.coco import COCO\r\nimport nltk\r\nfrom .build_vocab import Vocabulary\r\nimport pickle\r\nimport json\r\nimport argparse\r\nfrom collections import defaultdict as dd\r\n\r\n\r\nclass Flickr8kDataset(Dataset):\r\n \"\"\"\r\n Flickr Custom Dataset\r\n \"\"\"\r\n\r\n def __init__(self, root, annFile, vocab=None, transform=None):\r\n \"\"\"\r\n Set the path for images, captions, and vocabulary wrapper\r\n\r\n Args:\r\n root: Image root [./data/flickr8k/]\r\n annFile: Json annotations for images\r\n vocab:\r\n transform:\r\n \"\"\"\r\n self.root = root\r\n self.annFile = annFile\r\n self.vocab = vocab\r\n self.transform = transform\r\n self.coco = COCO(annFile)\r\n self.ids = list(self.coco.anns.keys())\r\n\r\n def __getitem__(self, index):\r\n \"\"\"\r\n returns one data pair (image and caption)\r\n \"\"\"\r\n coco = self.coco\r\n vocab = self.vocab\r\n ann_id = self.ids[index]\r\n caption = coco.anns[ann_id]['caption']\r\n img_id = coco.anns[ann_id]['image_id']\r\n\r\n image = Image.open(os.path.join(self.root, img_id)).convert('RGB')\r\n if self.transform is not None:\r\n image = self.transform(image)\r\n # # Convert caption (string) to word ids.\r\n tokens = nltk.tokenize.word_tokenize(str(caption).lower())\r\n caption = []\r\n caption.append(vocab('<start>'))\r\n for token in tokens:\r\n caption.append(vocab(token))\r\n caption.append(vocab('<end>'))\r\n target = torch.Tensor(caption)\r\n return image, target, img_id\r\n\r\n def __len__(self):\r\n return len(self.ids)\r\n\r\n\r\ndef collate_fn(data):\r\n \"\"\"\r\n Pad the captions to have equal (maxiimal) length\r\n\r\n Returns:\r\n images: shape (batch_size, 3, 224, 224)\r\n captions: shape (batch_size, padded_length)\r\n lengths: valid lengths for each padded captions shape (batch_size, )\r\n \"\"\"\r\n data.sort(key=lambda x: len(x[1]), reverse=True)\r\n\r\n images, captions, img_id = zip(*data)\r\n images = torch.stack(images, 0)\r\n lengths = [len(cap) for cap in captions]\r\n # important to initilize as zero <pad>\r\n targets = torch.zeros(len(captions), max(lengths)).long()\r\n for i, cap in enumerate(captions):\r\n end = lengths[i]\r\n targets[i, :end] = cap[:end]\r\n\r\n return images, targets, lengths, img_id\r\n\r\n\r\n\r\ndef makejson():\r\n tokenpath = '../data/flickr8k/Flickr8k_text/Flickr8k.token.txt'\r\n ann_list = dd(list)\r\n imageinfo_list = {}\r\n buildvocab_dict = {'annotations': []}\r\n #imagetoann_dict = dd(list)\r\n ann_out = '../data/flickr8k/Flickr8k_text/flickr8k_ann.json'\r\n buildvocab_out = '../data/flickr8k/Flickr8k_text/buildvocab.json'\r\n #imagetoann_out = '../data/Flickr8k_text/flickr8k_imagetoannID.json'\r\n #imagetocaption_dict = dd(list)\r\n #imagetocaption_out = '../data/Flickr8k_text/flickr8k_imagetocaption.json'\r\n imageinfo_out = '../data/flickr8k/Flickr8k_text/flickr8k_imageinfo.json'\r\n id = 0\r\n with open(tokenpath,'r') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n annID_dict = {}\r\n imageinfo_dict = {}\r\n annID = line.rstrip().split('\\t')[0]\r\n image_file = annID.split('#')[0]\r\n caption = line.rstrip().split('\\t')[1]\r\n imageinfo_dict['id'] = image_file\r\n imageinfo_dict['file_name'] = image_file\r\n annID_dict['caption'] = caption\r\n annID_dict['image_id'] = image_file\r\n annID_dict['id'] = id\r\n buildvocab_dict['annotations'].append(annID_dict)\r\n id += 1\r\n annID_dict['caption_number'] = annID\r\n ann_list[image_file].append(annID_dict)\r\n imageinfo_list[image_file] = imageinfo_dict\r\n\r\n # imagetoann_dict[image_ID].append(annID)\r\n # imagetocaption_dict[image_ID].append(caption)\r\n with open(ann_out,'w') as outfile:\r\n json.dump(ann_list, outfile )\r\n with open(imageinfo_out,'w') as outfile:\r\n json.dump(imageinfo_list, outfile)\r\n with open(buildvocab_out,'w') as outfile:\r\n json.dump(buildvocab_dict, outfile)\r\n\r\n # with open(imagetoann_out, 'w') as outfile:\r\n # json.dump(imagetoann_dict, outfile)\r\n # with open(imagetocaption_out, 'w') as outfile:\r\n # json.dump(imagetocaption_dict, outfile)\r\n\r\n\r\ndef generate_test_entries(annFile= \"../data/flickr8k/Flickr8k_text/flickr8k_ann.json\" , root=\"../data/flickr8k/Flickr8k_text/\",\r\n new_valid_filename=\"captions_flickr8k_val.json\",\r\n new_test_filename=\"captions_flickr8k_test.json\",\r\n new_train_filename=\"captions_flickr8k_train.json\",\r\n imageinfo_filename = \"../data/flickr8k/Flickr8k_text/flickr8k_imageinfo.json\"):\r\n \"\"\"\r\n reserves 4k images from validation as test\r\n \"\"\"\r\n with open(annFile,'r') as f:\r\n ann = json.load(f)\r\n with open(imageinfo_filename,'r') as f:\r\n imageinfo = json.load(f)\r\n\r\n\r\n train_origin = root + \"Flickr_8k.trainImages.txt\"\r\n test_origin = root + \"Flickr_8k.testImages.txt\"\r\n valid_origin = root + \"Flickr_8k.devImages.txt\"\r\n\r\n train_dict = {'images': [], 'annotations': []}\r\n test_dict = {'images': [], 'annotations': []}\r\n valid_dict = {'images': [], 'annotations': []}\r\n\r\n with open(train_origin,\"r\") as f :\r\n lines = f.readlines()\r\n for line in lines:\r\n image = line.rstrip()\r\n caption_list = ann[image]\r\n for caption in caption_list:\r\n train_dict['annotations'].append(caption)\r\n train_dict['images'].append(imageinfo[image])\r\n\r\n with open(valid_origin,\"r\") as f :\r\n lines = f.readlines()\r\n for line in lines:\r\n image = line.rstrip()\r\n caption_list = ann[image]\r\n for caption in caption_list:\r\n valid_dict['annotations'].append(caption)\r\n valid_dict['images'].append(imageinfo[image])\r\n\r\n with open(test_origin,\"r\") as f :\r\n lines = f.readlines()\r\n for line in lines:\r\n image = line.rstrip()\r\n caption_list = ann[image]\r\n for caption in caption_list:\r\n test_dict['annotations'].append(caption)\r\n test_dict['images'].append(imageinfo[image])\r\n\r\n\r\n\r\n print(\"Saving %d val images, %d val annotations\" % (len(valid_dict[\"images\"]), len(valid_dict[\"annotations\"])))\r\n with open(os.path.join(root, new_valid_filename), \"w\") as f:\r\n json.dump(valid_dict, f)\r\n\r\n print(\"Saving %d test images %d test annotations\" % (\r\n len(test_dict[\"images\"]), len(test_dict[\"annotations\"])))\r\n with open(os.path.join(root, new_test_filename), \"w\") as f:\r\n json.dump(test_dict, f)\r\n\r\n print(\"Saving %d train images %d train annotations\" % (\r\n len(train_dict[\"images\"]), len(train_dict[\"annotations\"])))\r\n with open(os.path.join(root, new_train_filename), \"w\") as f:\r\n json.dump(train_dict, f)\r\n\r\ndef get_vocab():\r\n with open(\"./data/flickr8k/Flickr8k_text/vocab.pkl\", 'rb') as f:\r\n vocab = pickle.load(f)\r\n return vocab\r\n\r\ndef get_data_loader(mode, transform, vocab, batch_size=4, shuffle=True, num_workers=0,data_dir = 1):\r\n \"\"\"\r\n\tReturns Data loader for custom coco dataset\r\n\r\n\tParams:\r\n\t\tmode:\t\t[train | val | test]\r\n\t\tvocab: \tloaded file from ./data/coco/vocab.pkl\r\n\t\ttransform: \tpytorch transformer\r\n\t\tbatch_size: num of images in a batch [default:4]\r\n\t\tshuffle:\tshuffle or not [default: true]\r\n\t\tnum_workers:thread used for dataloader [default:0]\r\n\t\"\"\"\r\n assert (mode in [\"train\", \"val\", \"test\"])\r\n root = \"./data/flickr8k/Flicker8k_Dataset/\"\r\n annFile = \"./data/flickr8k/Flickr8k_text/captions_flickr8k_\" + mode + \".json\"\r\n\r\n dataset = Flickr8kDataset(root=root,\r\n\t\t\t\t\t annFile=annFile,\r\n\t\t\t\t\t vocab=vocab,\r\n\t\t\t\t\t transform=transform)\r\n\r\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\r\n\t\t\t\t\t\t\t\t\t\t batch_size=batch_size,\r\n\t\t\t\t\t\t\t\t\t\t shuffle=shuffle,\r\n\t\t\t\t\t\t\t\t\t\t num_workers=num_workers,\r\n\t\t\t\t\t\t\t\t\t\t collate_fn=collate_fn,\r\n\t\t\t\t\t\t\t\t\t\t )\r\n return data_loader\r\n\r\n\r\ndef main(args):\r\n makejson()\r\n generate_test_entries()\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--json', type=str, default=\"./data/Flickr8k_text/flickr8k_ann.json\", help=\"path for val annoations\")\r\n args = parser.parse_args()\r\n main(args)\r\n\r\n\r\n\r\n\r\n" ]
[ [ "torch.stack", "torch.utils.data.DataLoader", "torch.Tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xuzhiqi1997/pandapower
[ "d93d1af88a7a7ab7a7fc00561c07fd91bc8a029a" ]
[ "pandapower/test/control/test_const_control.py" ]
[ "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport pytest\nimport numpy as np\nimport pandas as pd\n\nimport pandapower as pp\nimport pandapower.networks as nw\nimport pandapower.control\nimport pandapower.timeseries\nimport logging as log\n\nlogger = log.getLogger(__name__)\n\n\ndef test_write():\n net = nw.simple_four_bus_system()\n ds = pp.timeseries.DFData(pd.DataFrame(data=[[0., 1., 2.], [2., 3., 4.]]))\n c1 = pp.control.ConstControl(net, 'sgen', 'p_mw', element_index=[0, 1], profile_name=[0, 1], data_source=ds)\n pp.create_sgen(net, 0, 0)\n c2 = pp.control.ConstControl(net, 'sgen', 'p_mw', element_index=[2], profile_name=[2], data_source=ds,\n scale_factor=0.5)\n for t in range(2):\n c1.time_step(net, t)\n c1.control_step(net)\n c2.time_step(net, t)\n c2.control_step(net)\n assert np.all(net.sgen.p_mw.values == ds.df.loc[t].values * np.array([1, 1, 0.5]))\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
yzjba/FATE
[ "bdda535c7d8a974fc2c43102837964b7da199730" ]
[ "federatedml/tree/hetero/hetero_secureboosting_tree_guest.py" ]
[ "#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n################################################################################\n#\n#\n################################################################################\n\n# =============================================================================\n# HeteroSecureBoostingGuest \n# =============================================================================\n\nimport functools\nfrom operator import itemgetter\n\nimport numpy as np\nfrom federatedml.tree.tree_core.predict_cache import PredictDataCache\nfrom federatedml.util.io_check import assert_io_num_rows_equal\nfrom numpy import random\n\nfrom arch.api.utils import log_utils\nfrom fate_flow.entity.metric import Metric\nfrom fate_flow.entity.metric import MetricMeta\nfrom federatedml.feature.binning.quantile_binning import QuantileBinning\nfrom federatedml.feature.fate_element_type import NoneType\nfrom federatedml.loss import FairLoss\nfrom federatedml.loss import HuberLoss\nfrom federatedml.loss import LeastAbsoluteErrorLoss\nfrom federatedml.loss import LeastSquaredErrorLoss\nfrom federatedml.loss import LogCoshLoss\nfrom federatedml.loss import SigmoidBinaryCrossEntropyLoss\nfrom federatedml.loss import SoftmaxCrossEntropyLoss\nfrom federatedml.loss import TweedieLoss\nfrom federatedml.optim.convergence import converge_func_factory\nfrom federatedml.param.evaluation_param import EvaluateParam\nfrom federatedml.param.feature_binning_param import FeatureBinningParam\nfrom federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta\nfrom federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta\nfrom federatedml.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta\nfrom federatedml.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam\nfrom federatedml.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo\nfrom federatedml.secureprotol import IterativeAffineEncrypt\nfrom federatedml.secureprotol import PaillierEncrypt\nfrom federatedml.secureprotol.encrypt_mode import EncryptModeCalculator\nfrom federatedml.statistic import data_overview\nfrom federatedml.transfer_variable.transfer_class.hetero_secure_boost_transfer_variable import \\\n HeteroSecureBoostingTreeTransferVariable\nfrom federatedml.tree import BoostingTree\nfrom federatedml.tree import HeteroDecisionTreeGuest\nfrom federatedml.util import consts\nfrom federatedml.util.classify_label_checker import ClassifyLabelChecker\nfrom federatedml.util.classify_label_checker import RegressionLabelChecker\n\nLOGGER = log_utils.getLogger()\n\n\nclass HeteroSecureBoostingTreeGuest(BoostingTree):\n def __init__(self):\n super(HeteroSecureBoostingTreeGuest, self).__init__()\n\n self.convegence = None\n self.y = None\n self.F = None\n self.predict_F = None\n self.data_bin = None\n self.loss = None\n self.init_score = None\n self.classes_dict = {}\n self.classes_ = []\n self.num_classes = 0\n self.classify_target = \"binary\"\n self.feature_num = None\n self.encrypter = None\n self.grad_and_hess = None\n self.tree_dim = 1\n self.tree_meta = None\n self.trees_ = []\n self.history_loss = []\n self.bin_split_points = None\n self.bin_sparse_points = None\n self.encrypted_mode_calculator = None\n self.predict_data_cache = PredictDataCache()\n\n self.feature_importances_ = {}\n self.role = consts.GUEST\n\n self.transfer_variable = HeteroSecureBoostingTreeTransferVariable()\n self.data_alignment_map = {}\n\n def set_loss(self, objective_param):\n loss_type = objective_param.objective\n params = objective_param.params\n LOGGER.info(\"set objective, objective is {}\".format(loss_type))\n if self.task_type == consts.CLASSIFICATION:\n if loss_type == \"cross_entropy\":\n if self.num_classes == 2:\n self.loss = SigmoidBinaryCrossEntropyLoss()\n else:\n self.loss = SoftmaxCrossEntropyLoss()\n else:\n raise NotImplementedError(\"objective %s not supported yet\" % (loss_type))\n elif self.task_type == consts.REGRESSION:\n if loss_type == \"lse\":\n self.loss = LeastSquaredErrorLoss()\n elif loss_type == \"lae\":\n self.loss = LeastAbsoluteErrorLoss()\n elif loss_type == \"huber\":\n self.loss = HuberLoss(params[0])\n elif loss_type == \"fair\":\n self.loss = FairLoss(params[0])\n elif loss_type == \"tweedie\":\n self.loss = TweedieLoss(params[0])\n elif loss_type == \"log_cosh\":\n self.loss = LogCoshLoss()\n else:\n raise NotImplementedError(\"objective %s not supported yet\" % (loss_type))\n else:\n raise NotImplementedError(\"objective %s not supported yet\" % (loss_type))\n\n def convert_feature_to_bin(self, data_instance):\n LOGGER.info(\"convert feature to bins\")\n param_obj = FeatureBinningParam(bin_num=self.bin_num)\n\n if self.use_missing:\n binning_obj = QuantileBinning(param_obj, abnormal_list=[NoneType()])\n else:\n binning_obj = QuantileBinning(param_obj)\n\n binning_obj.fit_split_points(data_instance)\n self.data_bin, self.bin_split_points, self.bin_sparse_points = binning_obj.convert_feature_to_bin(data_instance)\n LOGGER.info(\"convert feature to bins over\")\n\n def set_y(self):\n LOGGER.info(\"set label from data and check label\")\n self.y = self.data_bin.mapValues(lambda instance: instance.label)\n self.check_label()\n\n def generate_flowid(self, round_num, tree_num):\n LOGGER.info(\"generate flowid, flowid {}\".format(self.flowid))\n return \".\".join(map(str, [self.flowid, round_num, tree_num]))\n\n def check_label(self):\n LOGGER.info(\"check label\")\n if self.task_type == consts.CLASSIFICATION:\n self.num_classes, self.classes_ = ClassifyLabelChecker.validate_label(self.data_bin)\n if self.num_classes > 2:\n self.classify_target = \"multinomial\"\n self.tree_dim = self.num_classes\n\n range_from_zero = True\n for _class in self.classes_:\n try:\n if _class >= 0 and _class < self.num_classes and isinstance(_class, int):\n continue\n else:\n range_from_zero = False\n break\n except:\n range_from_zero = False\n\n self.classes_ = sorted(self.classes_)\n if not range_from_zero:\n class_mapping = dict(zip(self.classes_, range(self.num_classes)))\n self.y = self.y.mapValues(lambda _class: class_mapping[_class])\n\n else:\n RegressionLabelChecker.validate_label(self.data_bin)\n\n self.set_loss(self.objective_param)\n\n def generate_encrypter(self):\n LOGGER.info(\"generate encrypter\")\n if self.encrypt_param.method.lower() == consts.PAILLIER.lower():\n self.encrypter = PaillierEncrypt()\n self.encrypter.generate_key(self.encrypt_param.key_length)\n elif self.encrypt_param.method.lower() == consts.ITERATIVEAFFINE.lower():\n self.encrypter = IterativeAffineEncrypt()\n self.encrypter.generate_key(self.encrypt_param.key_length)\n else:\n raise NotImplementedError(\"encrypt method not supported yes!!!\")\n\n self.encrypted_calculator = EncryptModeCalculator(self.encrypter, self.calculated_mode, self.re_encrypted_rate)\n\n @staticmethod\n def accumulate_f(f_val, new_f_val, lr=0.1, idx=0):\n f_val[idx] += lr * new_f_val\n return f_val\n\n def update_feature_importance(self, tree_feature_importance):\n for fid in tree_feature_importance:\n if fid not in self.feature_importances_:\n self.feature_importances_[fid] = 0\n\n self.feature_importances_[fid] += tree_feature_importance[fid]\n\n def update_f_value(self, new_f=None, tidx=-1, mode=\"train\"):\n LOGGER.info(\"update tree f value, tree idx is {}\".format(tidx))\n if mode == \"train\" and self.F is None:\n if self.tree_dim > 1:\n self.F, self.init_score = self.loss.initialize(self.y, self.tree_dim)\n else:\n self.F, self.init_score = self.loss.initialize(self.y)\n else:\n accumulate_f = functools.partial(self.accumulate_f,\n lr=self.learning_rate,\n idx=tidx)\n\n if mode == \"train\":\n self.F = self.F.join(new_f, accumulate_f)\n else:\n self.predict_F = self.predict_F.join(new_f, accumulate_f)\n\n def compute_grad_and_hess(self):\n LOGGER.info(\"compute grad and hess\")\n loss_method = self.loss\n if self.task_type == consts.CLASSIFICATION:\n self.grad_and_hess = self.y.join(self.F, lambda y, f_val: \\\n (loss_method.compute_grad(y, loss_method.predict(f_val)), \\\n loss_method.compute_hess(y, loss_method.predict(f_val))))\n else:\n self.grad_and_hess = self.y.join(self.F, lambda y, f_val:\n (loss_method.compute_grad(y, f_val),\n loss_method.compute_hess(y, f_val)))\n\n def compute_loss(self):\n LOGGER.info(\"compute loss\")\n if self.task_type == consts.CLASSIFICATION:\n loss_method = self.loss\n y_predict = self.F.mapValues(lambda val: loss_method.predict(val))\n loss = loss_method.compute_loss(self.y, y_predict)\n elif self.task_type == consts.REGRESSION:\n if self.objective_param.objective in [\"lse\", \"lae\", \"logcosh\", \"tweedie\", \"log_cosh\", \"huber\"]:\n loss_method = self.loss\n loss = loss_method.compute_loss(self.y, self.F)\n else:\n loss_method = self.loss\n y_predict = self.F.mapValues(lambda val: loss_method.predict(val))\n loss = loss_method.compute_loss(self.y, y_predict)\n\n return float(loss)\n\n def get_grad_and_hess(self, tree_idx):\n LOGGER.info(\"get grad and hess of tree {}\".format(tree_idx))\n grad_and_hess_subtree = self.grad_and_hess.mapValues(\n lambda grad_and_hess: (grad_and_hess[0][tree_idx], grad_and_hess[1][tree_idx]))\n return grad_and_hess_subtree\n\n def check_convergence(self, loss):\n LOGGER.info(\"check convergence\")\n if self.convegence is None:\n self.convegence = converge_func_factory(\"diff\", self.tol)\n\n return self.convegence.is_converge(loss)\n\n def sample_valid_features(self):\n LOGGER.info(\"sample valid features\")\n if self.feature_num is None:\n self.feature_num = self.bin_split_points.shape[0]\n\n choose_feature = random.choice(range(0, self.feature_num), \\\n max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)\n\n valid_features = [False for i in range(self.feature_num)]\n for fid in choose_feature:\n valid_features[fid] = True\n return valid_features\n\n def sync_tree_dim(self):\n LOGGER.info(\"sync tree dim to host\")\n\n self.transfer_variable.tree_dim.remote(self.tree_dim,\n role=consts.HOST,\n idx=-1)\n\n def sync_stop_flag(self, stop_flag, num_round):\n LOGGER.info(\"sync stop flag to host, boost round is {}\".format(num_round))\n\n self.transfer_variable.stop_flag.remote(stop_flag,\n role=consts.HOST,\n idx=-1,\n suffix=(num_round,))\n\n def sync_predict_start_round(self, num_round):\n LOGGER.info(\"sync predict start round {}\".format(num_round))\n self.transfer_variable.predict_start_round.remote(num_round,\n role=consts.HOST,\n idx=-1)\n\n def fit(self, data_inst, validate_data=None):\n LOGGER.info(\"begin to train secureboosting guest model\")\n self.gen_feature_fid_mapping(data_inst.schema)\n self.validation_strategy = self.init_validation_strategy(data_inst, validate_data)\n data_inst = self.data_alignment(data_inst)\n self.convert_feature_to_bin(data_inst)\n self.set_y()\n self.update_f_value()\n self.generate_encrypter()\n\n self.sync_tree_dim()\n\n self.callback_meta(\"loss\",\n \"train\",\n MetricMeta(name=\"train\",\n metric_type=\"LOSS\",\n extra_metas={\"unit_name\": \"iters\"}))\n\n for i in range(self.num_trees):\n self.compute_grad_and_hess()\n for tidx in range(self.tree_dim):\n LOGGER.info(\"start to fit, boost round: {}, tree index: {}\".format(i, tidx))\n tree_inst = HeteroDecisionTreeGuest(self.tree_param)\n\n tree_inst.set_inputinfo(self.data_bin, self.get_grad_and_hess(tidx), self.bin_split_points,\n self.bin_sparse_points)\n\n valid_features = self.sample_valid_features()\n tree_inst.set_valid_features(valid_features)\n tree_inst.set_encrypter(self.encrypter)\n tree_inst.set_encrypted_mode_calculator(self.encrypted_calculator)\n tree_inst.set_flowid(self.generate_flowid(i, tidx))\n tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)\n tree_inst.set_runtime_idx(self.component_properties.local_partyid)\n\n tree_inst.fit()\n\n tree_meta, tree_param = tree_inst.get_model()\n self.trees_.append(tree_param)\n if self.tree_meta is None:\n self.tree_meta = tree_meta\n self.update_f_value(new_f=tree_inst.predict_weights, tidx=tidx)\n self.update_feature_importance(tree_inst.get_feature_importance())\n\n loss = self.compute_loss()\n self.history_loss.append(loss)\n LOGGER.debug(\"boost round {} loss is {}\".format(i, loss))\n\n self.callback_metric(\"loss\",\n \"train\",\n [Metric(i, loss)])\n\n if self.validation_strategy:\n self.validation_strategy.validate(self, i)\n if self.validation_strategy.need_stop():\n LOGGER.debug('early stopping triggered')\n break\n\n if self.n_iter_no_change is True:\n if self.check_convergence(loss):\n self.sync_stop_flag(True, i)\n LOGGER.debug(\"check loss convergence on boost round {}\".format(i))\n break\n else:\n self.sync_stop_flag(False, i)\n\n LOGGER.debug(\"history loss is {}\".format(self.history_loss))\n self.callback_meta(\"loss\",\n \"train\",\n MetricMeta(name=\"train\",\n metric_type=\"LOSS\",\n extra_metas={\"Best\": min(self.history_loss)}))\n\n if self.validation_strategy and self.validation_strategy.has_saved_best_model():\n self.load_model(self.validation_strategy.cur_best_model)\n\n LOGGER.info(\"end to train secureboosting guest model\")\n\n def predict_f_value(self, data_inst, cache_dataset_key):\n LOGGER.debug(\"predict tree f value, there are {} trees\".format(len(self.trees_)))\n init_score = self.init_score\n\n last_round = self.predict_data_cache.predict_data_last_round(cache_dataset_key)\n rounds = len(self.trees_) // self.tree_dim\n if last_round == -1:\n self.predict_F = data_inst.mapValues(lambda v: init_score)\n else:\n LOGGER.debug(\"hit cache, cached round is {}\".format(last_round))\n if last_round >= rounds - 1:\n LOGGER.debug(\"predict data cached, rounds is {}, total cached round is {}\".format(rounds, last_round))\n\n self.predict_F = self.predict_data_cache.predict_data_at(cache_dataset_key, min(rounds - 1, last_round))\n\n self.sync_predict_start_round(last_round + 1)\n\n for i in range(last_round + 1, rounds):\n for tidx in range(self.tree_dim):\n LOGGER.info(\"start to predict, boost round: {}, tree index: {}\".format(i, tidx))\n tree_inst = HeteroDecisionTreeGuest(self.tree_param)\n tree_inst.load_model(self.tree_meta, self.trees_[i * self.tree_dim + tidx])\n # tree_inst.set_tree_model(self.trees_[i * self.tree_dim + tidx])\n tree_inst.set_flowid(self.generate_flowid(i, tidx))\n tree_inst.set_runtime_idx(self.component_properties.local_partyid)\n tree_inst.set_host_party_idlist(self.component_properties.host_party_idlist)\n\n predict_data = tree_inst.predict(data_inst)\n self.update_f_value(new_f=predict_data, tidx=tidx, mode=\"predict\")\n\n self.predict_data_cache.add_data(cache_dataset_key, self.predict_F)\n\n @assert_io_num_rows_equal\n def predict(self, data_inst):\n LOGGER.info(\"start predict\")\n cache_dataset_key = self.predict_data_cache.get_data_key(data_inst)\n if cache_dataset_key in self.data_alignment_map:\n data_inst = self.data_alignment_map[cache_dataset_key]\n else:\n data_inst = self.data_alignment(data_inst)\n header = [None] * len(self.feature_name_fid_mapping)\n for idx, col in self.feature_name_fid_mapping.items():\n header[idx] = col\n data_inst = data_overview.header_alignment(data_inst, header)\n self.data_alignment_map[cache_dataset_key] = data_inst\n\n self.predict_f_value(data_inst, cache_dataset_key)\n if self.task_type == consts.CLASSIFICATION:\n loss_method = self.loss\n if self.num_classes == 2:\n predicts = self.predict_F.mapValues(lambda f: float(loss_method.predict(f)))\n else:\n predicts = self.predict_F.mapValues(lambda f: loss_method.predict(f).tolist())\n\n elif self.task_type == consts.REGRESSION:\n if self.objective_param.objective in [\"lse\", \"lae\", \"huber\", \"log_cosh\", \"fair\", \"tweedie\"]:\n predicts = self.predict_F\n else:\n raise NotImplementedError(\"objective {} not supported yet\".format(self.objective_param.objective))\n\n if self.task_type == consts.CLASSIFICATION:\n classes_ = self.classes_\n if self.num_classes == 2:\n threshold = self.predict_param.threshold\n predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label,\n classes_[1] if pred > threshold else\n classes_[0], pred,\n {\"0\": 1 - pred, \"1\": pred}])\n else:\n predict_label = predicts.mapValues(lambda preds: classes_[np.argmax(preds)])\n predict_result = data_inst.join(predicts, lambda inst, preds: [inst.label, classes_[np.argmax(preds)],\n np.max(preds),\n dict(zip(map(str, classes_), preds))])\n\n elif self.task_type == consts.REGRESSION:\n predict_result = data_inst.join(predicts, lambda inst, pred: [inst.label, float(pred), float(pred),\n {\"label\": float(pred)}])\n\n else:\n raise NotImplementedError(\"task type {} not supported yet\".format(self.task_type))\n\n LOGGER.info(\"end predict\")\n\n return predict_result\n\n def get_feature_importance(self):\n return self.feature_importances_\n\n def get_model_meta(self):\n model_meta = BoostingTreeModelMeta()\n model_meta.tree_meta.CopyFrom(self.tree_meta)\n model_meta.learning_rate = self.learning_rate\n model_meta.num_trees = self.num_trees\n model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))\n model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,\n param=self.objective_param.params))\n model_meta.task_type = self.task_type\n # model_meta.tree_dim = self.tree_dim\n model_meta.n_iter_no_change = self.n_iter_no_change\n model_meta.tol = self.tol\n # model_meta.num_classes = self.num_classes\n # model_meta.classes_.extend(map(str, self.classes_))\n # model_meta.need_run = self.need_run\n meta_name = \"HeteroSecureBoostingTreeGuestMeta\"\n\n return meta_name, model_meta\n\n def set_model_meta(self, model_meta):\n self.tree_meta = model_meta.tree_meta\n self.learning_rate = model_meta.learning_rate\n self.num_trees = model_meta.num_trees\n self.bin_num = model_meta.quantile_meta.bin_num\n self.objective_param.objective = model_meta.objective_meta.objective\n self.objective_param.params = list(model_meta.objective_meta.param)\n self.task_type = model_meta.task_type\n # self.tree_dim = model_meta.tree_dim\n # self.num_classes = model_meta.num_classes\n self.n_iter_no_change = model_meta.n_iter_no_change\n self.tol = model_meta.tol\n # self.classes_ = list(model_meta.classes_)\n\n # self.set_loss(self.objective_param)\n\n def get_model_param(self):\n model_param = BoostingTreeModelParam()\n model_param.tree_num = len(list(self.trees_))\n model_param.tree_dim = self.tree_dim\n model_param.trees_.extend(self.trees_)\n model_param.init_score.extend(self.init_score)\n model_param.losses.extend(self.history_loss)\n model_param.classes_.extend(map(str, self.classes_))\n model_param.num_classes = self.num_classes\n\n model_param.best_iteration = -1 if self.validation_strategy is None else self.validation_strategy.best_iteration\n\n feature_importances = list(self.get_feature_importance().items())\n feature_importances = sorted(feature_importances, key=itemgetter(1), reverse=True)\n feature_importance_param = []\n for (sitename, fid), _importance in feature_importances:\n feature_importance_param.append(FeatureImportanceInfo(sitename=sitename,\n fid=fid,\n importance=_importance))\n model_param.feature_importances.extend(feature_importance_param)\n\n model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)\n\n param_name = \"HeteroSecureBoostingTreeGuestParam\"\n\n return param_name, model_param\n\n def set_model_param(self, model_param):\n self.trees_ = list(model_param.trees_)\n self.init_score = np.array(list(model_param.init_score))\n self.history_loss = list(model_param.losses)\n self.classes_ = list(map(int, model_param.classes_))\n self.tree_dim = model_param.tree_dim\n self.num_classes = model_param.num_classes\n self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)\n\n def get_metrics_param(self):\n if self.task_type == consts.CLASSIFICATION:\n if self.num_classes == 2:\n return EvaluateParam(eval_type=\"binary\",\n pos_label=self.classes_[1], metrics=self.metrics)\n else:\n return EvaluateParam(eval_type=\"multi\", metrics=self.metrics)\n else:\n return EvaluateParam(eval_type=\"regression\", metrics=self.metrics)\n\n def export_model(self):\n\n if self.need_cv:\n return None\n\n meta_name, meta_protobuf = self.get_model_meta()\n param_name, param_protobuf = self.get_model_param()\n\n return {meta_name: meta_protobuf, param_name: param_protobuf}\n\n def load_model(self, model_dict):\n model_param = None\n model_meta = None\n for _, value in model_dict[\"model\"].items():\n for model in value:\n if model.endswith(\"Meta\"):\n model_meta = value[model]\n if model.endswith(\"Param\"):\n model_param = value[model]\n\n LOGGER.info(\"load model\")\n\n self.set_model_meta(model_meta)\n self.set_model_param(model_param)\n self.set_loss(self.objective_param)\n" ]
[ [ "numpy.max", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mullachv/causal_notes
[ "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6" ]
[ "framework/vae_snps_traits.py" ]
[ "from keras.layers import Lambda, Dense, MaxPooling2D, UpSampling2D, Input\nfrom keras.models import Model\nfrom keras.losses import mse, binary_crossentropy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras import backend as K\nfrom keras.datasets import mnist\nfrom keras.utils import plot_model\nimport argparse\nimport os\n\n# Reparameterization trick\n# z = z_mean + var * epsilon\n# where epsilon ~ Normal(0,1)\n#\ndef sampling(args):\n\tz_mean, z_log_var = args\n\tbatch = K.shape(z_mean)[0]\n\tdim = K.shape(z_mean)[1]\n\tepsilon = K.random_normal(shape=(batch, dim))\n\treturn z_mean + K.exp(0.5 * z_log_var)*epsilon\n\ndef plot_results(models, data, batch_size=128, model_name='vae_mnist'):\n\t\"\"\"\n\tPlot labels and MNIST digits as function of 2-dim latent vector\n\n\t:param models: tuple of encode and decoder models\n\t:param data: tuple of test data and label\n\t:param batch_size: prediction batch size\n\t:param model_name: calling model name\n\t:return: None\n\t\"\"\"\n\tencoder, decoder = models\n\tx_test, y_test = data\n\tos.makedirs(model_name, exist_ok=True)\n\n\tfilename = os.path.join(model_name, \"vae_mean.jpg\")\n\t# display a 2D plot of digit classes in the latent space\n\tz_mean, _, _ = encoder.predict(x_test,batch_size=batch_size)\n\tplt.figure(figsize=(12,10))\n\tplt.scatter(z_mean[:,0], z_mean[:,1], c=y_test)\n\tplt.colorbar()\n\tplt.xlabel(\"z[0]\")\n\tplt.ylabel(\"z[1]\")\n\tplt.savefig(filename)\n\tplt.show()\n\n\tfilename = os.path.join(model_name, \"digits_over_latent.jpg\")\n\t# display a 30x30 2-D manifold of digits\n\tn = 30\n\tdigit_size = 28\n\tfigure = np.zeros((digit_size*n, digit_size*n))\n\t# linearly spaced coordinates corresponding to the 2D plot\n\t# of digits classes in latent space\n\tgrid_x = np.linspace(-4,4,n)\n\n\t#reversed of above\n\tgrid_y = np.linspace(-4,4,n)[::-1]\n\n\tfor i, yi in enumerate(grid_y):\n\t\tfor j, xi in enumerate(grid_x):\n\t\t\tz_sample = np.array([[xi, yi]])\n\t\t\tx_decoded = decoder.predict(z_sample)\n\t\t\tdigit = x_decoded[0].reshape(digit_size, digit_size)\n\t\t\tfigure[i*digit_size : (i+1)*digit_size, j*digit_size : (j+1)*digit_size] = digit\n\n\tplt.figure(figsize=(10,10))\n\tstart_range = digit_size // 2\n\tend_range = n * digit_size + start_range + 1\n\tpixel_range = np.arange(start_range, end_range, digit_size)\n\tsample_range_x = np.round(grid_x, 1)\n\tsample_range_y = np.round(grid_y, 1)\n\tplt.xticks(pixel_range, sample_range_x)\n\tplt.yticks(pixel_range, sample_range_y)\n\tplt.xlabel(\"z[0]\")\n\tplt.ylabel(\"z[1]\")\n\tplt.imshow(figure, cmap=\"Greys_r\")\n\tplt.savefig(filename)\n\tplt.show()\n\n# MNIST dataset\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nimage_size = x_train.shape[1]\noriginal_dim = image_size * image_size\nx_train = np.reshape(x_train, [-1, original_dim])\nx_test = np.reshape(x_test, [-1, original_dim])\nx_train = x_train.astype('float32') / 255.\nx_test = x_test.astype('float32') / 255.\n\n#network parameters\ninput_shape = (original_dim, )\nintermediate_dim = 512\nbatch_size = 128\nlatent_dim = 2\nepochs = 4\n\n# VAE model = encoder + decoder\ninputs = Input(shape=input_shape, name='encoder_input')\nx = Dense(intermediate_dim, activation='relu')(inputs)\nz_mean = Dense(latent_dim, name='z_mean')(x)\nz_log_var = Dense(latent_dim, name='z_log_var')(x)\n\n# sample\n# output shape is not required with tensorflow backend\n# so we can write Lambda(sampling)([z_mean, z_log,sigma])\nz = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])\n\n# instantiate encoder model\nencoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')\nencoder.summary()\nplot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)\n\n# build decoder model\nlatent_inputs = Input(shape=(latent_dim,), name='z_sampling')\nx = Dense(intermediate_dim,activation='relu')(latent_inputs)\noutputs = Dense(original_dim, activation='sigmoid')(x)\nprint(outputs.shape)\n\n# instantiate decoder model\ndecoder = Model(latent_inputs, outputs, name='decoder')\ndecoder.summary()\nplot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)\n\n# instantiate VAE model\noutputs = decoder(encoder(inputs)[2])\nprint(outputs.shape)\nvae = Model(inputs, outputs,name='vae_mlp')\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\thelp_ = 'Load h5 model trained weights'\n\tparser.add_argument('-w', '--weights',help=help_)\n\thelp_= 'Use mse loss instead of binary cross entropy'\n\tparser.add_argument('-m', '--mse', help=help_, action='store_true')\n\n\targs = parser.parse_args()\n\tmodels = (encoder, decoder)\n\tdata = (x_test, y_test)\n\n\t# VAE loss =\n\t# mse_loss + kl_loss\n\t# or\n\t# xentropy_loss + kl_loss\n\t#\n\tif args.mse:\n\t\treconstruction_loss = mse(inputs, outputs)\n\telse:\n\t\treconstruction_loss = binary_crossentropy(inputs, outputs)\n\n\treconstruction_loss *= original_dim\n\n\t# KL loss\n\t# KL(P||Q), when P~N(mu, var), and Q ~ N(0,1):\n\t# -1/2(var + mu^2 -1 - log var)\n\t# re: https://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussians\n\t#\n\tkl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)\n\tkl_loss = K.sum(kl_loss, axis=-1)\n\tkl_loss *= -0.5\n\n\tvae_loss = K.mean(reconstruction_loss + kl_loss)\n\tvae.add_loss(vae_loss)\n\tvae.compile(optimizer='adam')\n\tvae.summary()\n\n\tprint('reconstr_loss shape: {}'.format(reconstruction_loss.shape))\n\tprint('kl_loss shape: {}'.format(kl_loss.shape))\n\tprint('vae_loss shape: {}'.format(vae_loss.shape))\n\n\n\tplot_model(vae, to_file='vae_mlp.png',show_shapes=True)\n\n\tif args.weights:\n\t\tvae.load_weights(args.weights)\n\telse:\n\t\t# train\n\t\tvae.fit(x_train,epochs=epochs,batch_size=batch_size,validation_data=(x_test, None))\n\t\tvae.save_weights('vae_mlp_mnist.h5')\n\n\tplot_results(models, data, batch_size=batch_size,model_name='vae_mlp')\n\n\n" ]
[ [ "numpy.array", "matplotlib.pyplot.yticks", "matplotlib.pyplot.imshow", "matplotlib.pyplot.scatter", "numpy.linspace", "numpy.reshape", "numpy.arange", "matplotlib.pyplot.savefig", "numpy.round", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.xticks", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jstout211/enigma_MEG
[ "3db3e968c1d13a04ae27f1e7d77199ec0a589642" ]
[ "enigmeg/test/test_loop_process_file.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 28 15:55:48 2021\n\n@author: stoutjd\n\"\"\"\n\nimport pandas as pd\nimport enigmeg\nimport mne\nimport os, os.path as op\nimport shutil\n\nimport pytest\nfrom enigmeg.test_data import loop_test_data \nfrom enigmeg.test_data.get_test_data import datasets\nfrom enigmeg.process_meg import main\nfrom enigmeg.process_meg import parse_proc_inputs\n\n\ndef generate_short_test_data():\n '''This is unecessary to run - Data is stored in the git annex repo'''\n elekta_dat = datasets().elekta\n raw = mne.io.read_raw_fif(elekta_dat['meg_rest'])\n raw.crop(tmax=20)\n raw.resample(300)\n \n outfname = op.join(loop_test_data.__path__[0], 'short_elekta_rest.fif')\n raw.save(outfname) \n \n eroom_raw = mne.io.read_raw_fif(elekta_dat['meg_eroom'])\n eroom_raw.crop(tmax=20)\n eroom_raw.resample(300)\n \n outfname_eroom = op.join(loop_test_data.__path__[0], 'short_elekta_eroom.fif')\n eroom_raw.save(outfname_eroom)\n \n # For CTF data it is necessary to use the CTF commandline tools\n tmp1 = 'newDs -time 1 21 -resample 4 ctf_rest.ds short_ctf_rest.ds'\n tmp2 = 'newDs -resample 4 ctf_eroom.ds short_ctf_eroom.ds'\n print('Must process CTF data manually\\n{}\\n{}'.format(tmp1, tmp2))\n\n\n \ndef test_process_file(tmpdir):\n '''Generate a csv file and use this as input for the config file loop\n Loop over all entries in a tab separated csv file'''\n \n test_process_file = op.join(loop_test_data.__path__[0], \n 'test_process_file.csv')\n \n test_dframe = pd.read_csv(test_process_file, delimiter='\\t')\n \n output_dir = tmpdir #op.expanduser('~/Desktop/TEMP')\n \n # Process Elekta info\n elekta_dat = datasets().elekta \n test_dframe.loc[2,'subject'] = elekta_dat['subject']\n test_dframe.loc[2, 'fs_subjects_dir'] = elekta_dat['SUBJECTS_DIR']\n test_dframe.loc[2, 'meg_top_dir'] = loop_test_data.__path__[0]\n test_dframe.loc[2, 'meg_file_path'] = 'short_elekta_rest.fif'\n test_dframe.loc[2, 'eroom_file_path'] = 'short_elekta_eroom.fif'\n test_dframe.loc[2, 'output_dir'] = output_dir\n test_dframe.loc[2, 'line_freq'] = 50\n test_dframe.loc[2, 'trans_file'] = elekta_dat['trans']\n enigma_subj_dir = op.join(output_dir, elekta_dat['subject'])\n if not op.exists(enigma_subj_dir):\n os.mkdir(enigma_subj_dir)\n shutil.copy(elekta_dat['src'], enigma_subj_dir)\n shutil.copy(elekta_dat['bem'], enigma_subj_dir)\n \n # Process CTF info\n ctf_dat = datasets().ctf\n test_dframe.loc[3,'subject'] = ctf_dat['subject']\n test_dframe.loc[3, 'fs_subjects_dir'] = ctf_dat['SUBJECTS_DIR']\n test_dframe.loc[3, 'meg_top_dir'] = loop_test_data.__path__[0]\n test_dframe.loc[3, 'meg_file_path'] = 'short_ctf_rest.ds'\n test_dframe.loc[3, 'eroom_file_path'] = 'short_ctf_eroom.ds'\n test_dframe.loc[3, 'output_dir'] = output_dir\n test_dframe.loc[3, 'line_freq'] = 60 \n test_dframe.loc[3, 'trans_file'] = ctf_dat['trans']\n enigma_subj_dir = op.join(output_dir, ctf_dat['subject'])\n if not op.exists(enigma_subj_dir):\n os.mkdir(enigma_subj_dir)\n shutil.copy(ctf_dat['src'], enigma_subj_dir)\n shutil.copy(ctf_dat['bem'], enigma_subj_dir)\n \n output_csv = op.join(output_dir, 'process.csv') \n test_dframe.to_csv(output_csv, sep='\\t', index=False)\n \n # parse_inputs(output_csv)\n parse_proc_inputs(output_csv)\n \n #Verify that the outputs have been created for the multiple inputs\n assert op.exists(op.join(output_dir, elekta_dat['subject'], 'Band_rel_power.csv'))\n assert op.exists(op.join(output_dir, ctf_dat['subject'], 'Band_rel_power.csv'))\n \n print(test_dframe)\n \n \n \n \n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
daemon/squawk
[ "df6443a200f8bfef7d5338d8577fc30eac4f49b9" ]
[ "squawk/data/dataset.py" ]
[ "from collections import OrderedDict\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Mapping, Sequence\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data as tud\nimport torchaudio\n\nfrom squawk.ctqdm import ctqdm\n\n\n@dataclass\nclass DatasetInfo(object):\n name: str\n sample_rate: int\n label_map: Mapping[str, int]\n\n def __post_init__(self):\n self.num_labels = len(self.label_map)\n\n\nclass LruCache(object):\n\n def __init__(self, capacity=np.inf, load_fn=None):\n self.capacity = capacity\n self.cache = OrderedDict()\n self.load_fn = load_fn\n\n def __getitem__(self, key):\n try:\n value = self.cache.pop(key)\n self.cache[key] = value\n except KeyError:\n if self.load_fn is not None:\n self[key] = value = self.load_fn(key)\n else:\n raise KeyError\n return value\n\n def __setitem__(self, key, value):\n try:\n self.cache.pop(key)\n except KeyError:\n if len(self.cache) >= self.capacity:\n self.cache.popitem(last=False)\n self.cache[key] = value\n\n\n@dataclass\nclass ClassificationExample(object):\n audio: torch.Tensor\n label: int\n\n\n@dataclass\nclass ClassificationBatch(object):\n audio: torch.Tensor\n labels: torch.LongTensor\n lengths: torch.LongTensor = None\n\n def to(self, device):\n self.audio = self.audio.to(device)\n self.labels = self.labels.to(device)\n self.lengths = self.lengths.to(device)\n\n def pin_memory(self):\n self.audio = self.audio.pin_memory()\n self.labels = self.labels.pin_memory()\n if self.lengths is not None: self.lengths = self.lengths.pin_memory()\n return self\n\n\n@dataclass\nclass ClassificationDataset(tud.Dataset):\n audio_data: Sequence[str]\n label_data: Sequence[int]\n info: DatasetInfo\n lru_cache: LruCache\n\n def __post_init__(self):\n self.lru_cache.load_fn = self.load\n\n def __len__(self):\n return len(self.audio_data)\n\n def load(self, idx):\n return torchaudio.load(self.audio_data[idx])[0]\n\n def split(self, proportion):\n proportion = int(proportion * len(self.audio_data))\n audio_data1 = self.audio_data[:proportion]\n audio_data2 = self.audio_data[proportion:]\n label_data1 = self.label_data[:proportion]\n label_data2 = self.label_data[proportion:]\n return ClassificationDataset(audio_data1, label_data1, self.info, LruCache(self.lru_cache.capacity)),\\\n ClassificationDataset(audio_data2, label_data2, self.info, LruCache(self.lru_cache.capacity))\n\n def __getitem__(self, idx):\n return ClassificationExample(self.lru_cache[idx], self.label_data[idx])\n\n\ndef load_gsc(folder: Path, lru_maxsize=np.inf):\n def load_split(name):\n dev_path = folder / 'validation_list.txt'\n test_path = folder / 'testing_list.txt'\n labels = []\n for x in folder.glob('*'):\n if x.is_dir():\n labels.append(x.name)\n labels = sorted(labels)\n l2idx = {x: idx for idx, x in enumerate(labels)}\n with open(dev_path) as f:\n dev_set = list(map(str.strip, f.readlines()))\n with open(test_path) as f:\n test_set = list(map(str.strip, f.readlines()))\n if name == 'dev':\n tgt_set = dev_set\n elif name == 'test':\n tgt_set = test_set\n else:\n all_set = set(f'{str(x.parent.name)}/{str(x.name)}' for x in folder.glob('*/*.wav'))\n dev_set = set(dev_set)\n test_set = set(test_set)\n tgt_set = (all_set - dev_set) - test_set\n return ClassificationDataset([f'{str(folder)}/{x}' for x in tgt_set],\n [l2idx[x.split('/')[0]] for x in tgt_set],\n DatasetInfo('GSC', 16000, l2idx),\n LruCache(lru_maxsize))\n return load_split('training'), load_split('dev'), load_split('test')\n\n\ndef load_freesounds(folder: Path, lru_maxsize=np.inf):\n def load_split(name):\n train_csv_path = folder / 'FSDKaggle2018.meta' / f'train_post_competition.csv'\n labels_csv_path = folder / 'FSDKaggle2018.meta' / f'{name}_post_competition{\"_scoring_clips\" if name == \"test\" else \"\"}.csv'\n df = pd.read_csv(labels_csv_path, quoting=3)\n train_df = pd.read_csv(train_csv_path, quoting=3)\n idx2l = sorted(list(set(train_df['label'].unique())))\n l2idx = {lbl: idx for idx, lbl in enumerate(idx2l)}\n label_map = {}\n for row in df[['fname', 'label']].itertuples():\n label_map[row.fname] = l2idx[row.label]\n\n data_folder = folder / f'FSDKaggle2018.audio_{name}'\n audio_data = []\n label_data = []\n for wav_file in ctqdm(list(data_folder.glob('*.wav')), desc=f'Preparing {name} dataset'):\n audio_data.append(str(wav_file))\n label_data.append(label_map[wav_file.name])\n _, sr = torchaudio.load(wav_file)\n return ClassificationDataset(audio_data, label_data, DatasetInfo('FreeSounds', sr, l2idx), LruCache(lru_maxsize))\n train_split, dev_split = load_split('train').split(0.9)\n test_split = load_split('test')\n return train_split, dev_split, test_split\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ZhecanJamesWang/MPIIGaze_Pytorch
[ "369f836d8317b57d9d0f67622d220bc1e80a8696" ]
[ "main.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\nimport os\nimport time\nimport json\nfrom collections import OrderedDict\nimport importlib\nimport logging\nimport argparse\nimport numpy as np\nimport random\nimport cv2\nimport datetime\n\nimport torch\nimport torch.nn as nn\nimport torch.optim\nimport torch.utils.data\nimport torch.backends.cudnn\nimport torchvision.utils\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n# try:\nfrom tensorboardX import SummaryWriter\nis_tensorboard_available = True\n# except Exception:\n# is_tensorboard_available = False\n\nfrom dataloader import get_loader\n\n# torch.backends.cudnn.benchmark = False\n\nlogging.basicConfig(\n format='[%(asctime)s %(name)s %(levelname)s] - %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S',\n level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nglobal_step = 0\n\nnow = datetime.datetime.now()\ndate = now.strftime(\"%Y-%m-%d-%H-%M\")\n\nrecord_file_name = date + '_record.txt'\nrecords = \"\"\nrecords_count = 0\n\n\ndef str2bool(s):\n if s.lower() == 'true':\n return True\n elif s.lower() == 'false':\n return False\n else:\n raise RuntimeError('Boolean value expected')\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--arch', type=str, choices=['lenet', 'resnet_preact', 'alexnet', 'resnet101', 'resnet34',\n 'resnet18', 'resnet10', 'resnet18_gh_exp_1', \"resnet34_gh_exp_1\",\n \"resnet34_gh_exp_2\", \"resnet34_gh_exp_3\", 'resnet34_classifier'])\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--test_id', type=int)\n parser.add_argument('--outdir', type=str)\n parser.add_argument('--seed', type=int, default=17)\n parser.add_argument('--num_workers', type=int, default=7)\n\n # optimizer\n parser.add_argument('--epochs', type=int, default=40)\n parser.add_argument('--batch_size', type=int, default=300)\n parser.add_argument('--base_lr', type=float, default=0.01)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n parser.add_argument('--momentum', type=float, default=0.9)\n parser.add_argument('--nesterov', type=str2bool, default=True)\n parser.add_argument('--milestones', type=str, default='[20, 30]')\n parser.add_argument('--lr_decay', type=float, default=0.1)\n\n # TensorBoard\n parser.add_argument(\n '--tensorboard', dest='tensorboard', action='store_true', default=True)\n parser.add_argument(\n '--no-tensorboard', dest='tensorboard', action='store_false')\n parser.add_argument('--tensorboard_images', action='store_true')\n parser.add_argument('--tensorboard_parameters', action='store_true')\n\n parser.add_argument('--pretrained_path', type=str, default = \"\")\n parser.add_argument('--gpu', type=str)\n\n args = parser.parse_args()\n # if not is_tensorboard_available:\n # args.tensorboard = False\n # args.tensorboard_images = False\n # args.tensorboard_parameters = False\n\n args.tensorboard = True\n args.tensorboard_images = True\n args.tensorboard_parameters = True\n\n # assert os.path.exists(args.dataset)\n args.milestones = json.loads(args.milestones)\n\n return args\n\nargs = parse_args()\n\nargs.arch = \"resnet34\"\n# args.arch = \"resnet18_trimmed_2\"\n\n# dataset = \"1221_2018_train_4_camera\"\n# dataset = \"1226_2018_test_4_camera_hog\"\n# dataset = \"1228_2018_train_4_camera_hog_64\"\n# dataset = \"0102_2019_train_4_camera_hog\"\ndataset = \"0107_2019_4_camera_head_face_11.07_12.06_train\"\n\nargs.dataset = \"./\"\nargs.test_id = 0\nargs.outdir = \"second_test_data_\" + args.arch + \"_pretrained_0.01_relu_l2_batch64_no_shuffle_\" + dataset\nargs.batch_size = 64\nargs.base_lr = 0.001\nargs.momentum = 0.9\nargs.nesterov = True\nargs.weight_decay = 1e-4\nargs.epochs = 1000\nargs.lr_decay = 0.1\nargs.gpu = torch.cuda.current_device()\n#\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu.strip()\n\nprint (\"print (torch.cuda.current_device()): \", torch.cuda.current_device())\n\nargs.outdir = \"results/\" + date + \"_\" + args.outdir\n\n\ndef write_to_file(file_name, content):\n\n\tfh = open(file_name, \"a\")\n\tfh.write(content)\n\tfh.close\n\n\tcontent = \"\"\n\treturn content\n\n\ndef save_to_record(content):\n global records\n global records_count\n\n # print(content)\n records += content\n records_count += 1\n\n file_name = args.outdir + \"/\" + record_file_name\n\n if records_count % 20 == 0:\n write_to_file(file_name, records)\n records = \"\"\n\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, num):\n self.val = val\n self.sum += val * num\n self.count += num\n self.avg = self.sum / self.count\n\n# If we define pitch=0 as horizontal (z=0) and yaw as counter-clockwise from the x axis, then the direction vector will be\n\n# x = cos(yaw)*cos(pitch)\n# y = sin(yaw)*cos(pitch)\n# z = sin(pitch)\n\n\n# (pitch, yaw)\n\ndef convert_to_unit_vector(angles):\n x = -torch.cos(angles[:, 0]) * torch.sin(angles[:, 1])\n y = -torch.sin(angles[:, 0])\n # z = -torch.cos(angles[:, 1]) * torch.cos(angles[:, 1])\n z = -torch.cos(angles[:, 0]) * torch.cos(angles[:, 1])\n\n norm = torch.sqrt(x**2 + y**2 + z**2)\n x /= norm\n y /= norm\n z /= norm\n return x, y, z\n\n\n# def compute_angle_error(preds, labels):\n# pred_x, pred_y, pred_z = convert_to_unit_vector(preds)\n# label_x, label_y, label_z = convert_to_unit_vector(labels)\n# angles = pred_x * label_x + pred_y * label_y + pred_z * label_z\n# return torch.acos(angles) * 180 / np.pi\n\ndef compute_angle_error(preds, labels):\n err = torch.abs(preds - labels).mean()\n\n return err * 180 / np.pi\n\ndef train(epoch, model, optimizer, criterion, train_loader, config, writer):\n global global_step\n\n logger.info('Train {}'.format(epoch))\n save_to_record('Train {}'.format(epoch) + \"\\n\")\n\n model.train()\n\n loss_meter = AverageMeter()\n angle_error_meter = AverageMeter()\n start = time.time()\n # gaze_features = []\n # head_features = []\n # gazes_list = []\n # image_list = []\n\n # for step, (images, poses, gazes) in enumerate(train_loader):\n # for step, (images, gazes) in enumerate(train_loader):\n for step, (images, gazes, heads) in enumerate(train_loader):\n # for step, (images, gazes, heads, faces) in enumerate(train_loader):\n global_step += 1\n\n # if config['tensorboard_images'] and step == 0:\n # image = torchvision.utils.make_grid(\n # images, normalize=True, scale_each=True)\n # writer.add_image('Train/Image', image, epoch)\n\n\n images = images.cuda()\n gazes = gazes.cuda()\n heads = heads.cuda()\n\n optimizer.zero_grad()\n\n # outputs = model(images)\n outputs = model(images, heads)\n\n # print(\"outputs.shape: \", outputs.shape)\n # print(\"heads.shape: \", heads.shape)\n\n # images = images.detach().cpu().numpy()\n # gazes = gazes.detach().cpu().numpy()\n # gaze_feature = outputs.detach().cpu().numpy()\n # head_feature = heads.detach().cpu().numpy()\n #\n # image_list.extend(images)\n # gazes_list.extend(gazes)\n # gaze_features.extend(gaze_feature)\n # head_features.extend(head_feature)\n\n loss = criterion(outputs, gazes)\n loss.backward()\n\n optimizer.step()\n\n angle_error = compute_angle_error(outputs, gazes).mean()\n\n # num = images.size(0)\n num = 1\n loss_meter.update(loss.item(), num)\n angle_error_meter.update(angle_error.item(), num)\n\n if config['tensorboard']:\n writer.add_scalar('Train/RunningLoss', loss_meter.val, global_step)\n\n\n if step % 10 == 0:\n\n logger.info('Epoch {} Step {}/{} '\n 'Loss {:.4f} ({:.4f}) '\n 'AngleError {:.2f} ({:.2f})'.format(\n epoch,\n step,\n len(train_loader),\n loss_meter.val,\n loss_meter.avg,\n angle_error_meter.val,\n angle_error_meter.avg,\n ))\n save_to_record('Epoch {} Step {}/{} '\n 'Loss {:.4f} ({:.4f}) '\n 'AngleError {:.2f} ({:.2f})'.format(\n epoch,\n step,\n len(train_loader),\n loss_meter.val,\n loss_meter.avg,\n angle_error_meter.val,\n angle_error_meter.avg,\n ) + \"\\n\")\n if step % 10 == 0:\n logger.info(json.dumps(vars(args), indent=2))\n\n # image_list = np.asarray(image_list)\n # gaze_list = np.asarray(gazes_list)\n # gaze_features = np.asarray(gaze_features)\n # head_features = np.asarray(head_features)\n #\n # print(\"image_list.shape: \", image_list.shape)\n # print(\"gaze_list.shape: \", gaze_list.shape)\n # print(\"gaze_features.shape: \", gaze_features.shape)\n # print(\"head_features.shape: \", head_features.shape)\n #\n # np.savez(\"gh_exp_features\", image_list=image_list, gaze_list=gaze_list, gaze_features=gaze_features, head_features=head_features)\n #\n # raise(\"debug\")\n\n elapsed = time.time() - start\n logger.info('Elapsed {:.2f}'.format(elapsed))\n save_to_record('Elapsed {:.2f}'.format(elapsed) + \"\\n\")\n\n outdir = args.outdir\n model_path = os.path.join(outdir, 'model_state.pth')\n torch.save(model.state_dict(), model_path)\n\n if config['tensorboard']:\n writer.add_scalar('Train/Loss', loss_meter.avg, epoch)\n writer.add_scalar('Train/AngleError', angle_error_meter.avg, epoch)\n writer.add_scalar('Train/Time', elapsed, epoch)\n\n\ndef inference(test_loader, model):\n# save output ////////////////////////////////////\n\n outputs_list = []\n gazes_list = []\n gestures_list = []\n\n for step, (images, gazes) in enumerate(test_loader):\n # for step, (images, gazes, heads) in enumerate(test_loader):\n # for step, (images, gazes, gestures) in enumerate(test_loader):\n\n # if config['tensorboard_images'] and epoch == 0 and step == 0:\n # image = torchvision.utils.make_grid(\n # images, normalize=True, scale_each=True)\n # writer.add_image('Test/Image', image, epoch)\n\n images = images.cuda()\n # poses = poses.cuda()\n gazes = gazes.cuda()\n # heads = heads.cuda()\n gestures = gestures.cuda()\n\n with torch.no_grad():\n # outputs = model(images, poses)\n # outputs = model(images, heads)\n outputs = model(images)\n\n print(\"outputs.shape: \", outputs.shape)\n print(\"outputs[0].shape: \", outputs[0].shape)\n print(\"outputs[0]: \", outputs[0])\n\n outputs = outputs.detach().cpu().numpy()\n gazes = gazes.detach().cpu().numpy()\n gestures = gestures.detach().cpu().numpy()\n\n outputs_list.extend(outputs)\n gazes_list.extend(gazes)\n gestures_list.extend(gestures)\n\n print(np.asarray(outputs_list).shape)\n print(np.asarray(gazes_list).shape)\n print(np.asarray(gestures_list).shape)\n\n np.savez(\"0101_2019_regressor_output_list\", outputs_list=outputs_list)\n np.savez(\"0101_2019_regressor_gazes_list\", gazes_list=gazes_list)\n np.savez(\"0101_2019_regressor_gestures_list\", gestures_list=gestures_list)\n\n raise(\"debug\")\n\n\ndef test(epoch, model, criterion, test_loader, config, writer):\n logger.info('Test {}'.format(epoch))\n save_to_record('Test {}'.format(epoch) + \"\\n\")\n\n logger.info(json.dumps(vars(args), indent=2))\n\n model.eval()\n\n loss_meter = AverageMeter()\n angle_error_meter = AverageMeter()\n start = time.time()\n\n# ////////////////////////////////////\n# inference(test_loader, model)\n# ////////////////////////////////////\n\n\n # for step, (images, gazes) in enumerate(test_loader):\n for step, (images, gazes, heads) in enumerate(test_loader):\n # for step, (images, gazes, heads, faces) in enumerate(test_loader):\n\n # print(\"images.shape: \", images.shape)\n # print(\"gazes.shape: \", gazes.shape)\n # print(\"heads.shape: \", heads.shape)\n\n # if config['tensorboard_images'] and epoch == 0 and step == 0:\n # image = torchvision.utils.make_grid(\n # images, normalize=True, scale_each=True)\n # writer.add_image('Test/Image', image, epoch)\n\n\n images = images.cuda()\n gazes = gazes.cuda()\n heads = heads.cuda()\n\n with torch.no_grad():\n # outputs = model(images)\n outputs = model(images, heads)\n\n loss = criterion(outputs, gazes)\n\n angle_error = compute_angle_error(outputs, gazes).mean()\n\n # num = images.size(0)\n num = 1\n loss_meter.update(loss.item(), num)\n angle_error_meter.update(angle_error.item(), num)\n\n logger.info('Epoch {} Loss {:.4f} AngleError {:.2f}'.format(\n epoch, loss_meter.avg, angle_error_meter.avg))\n\n save_to_record('Epoch {} Loss {:.4f} AngleError {:.2f}'.format(\n epoch, loss_meter.avg, angle_error_meter.avg) + \"\\n\")\n\n\n elapsed = time.time() - start\n logger.info('Elapsed {:.2f}'.format(elapsed))\n save_to_record('Elapsed {:.2f}'.format(elapsed) + \"\\n\")\n\n # if config['tensorboard']:\n # if epoch > 0:\n # writer.add_scalar('Test/Loss', loss_meter.avg, epoch)\n # writer.add_scalar('Test/AngleError', angle_error_meter.avg, epoch)\n # writer.add_scalar('Test/Time', elapsed, epoch)\n #\n # if config['tensorboard_parameters']:\n # for name, param in model.named_parameters():\n # writer.add_histogram(name, param, global_step)\n\n return angle_error_meter.avg\n\ndef plot_gaze_pose(center_pt, gaze, pose, image, counter):\n\n increase = 30\n [cx, cy] = center_pt\n\n if pose:\n [head_yaw, head_pitch] = pose\n y_x, y_y = - np.sin(head_yaw * np.pi/180), np.sin(head_pitch * np.pi/180)\n else:\n [left_yaw, left_pitch] = gaze\n y_x, y_y = - np.sin(left_yaw), - np.sin(left_pitch)\n\n print(\"cx, cy: \", cx, cy)\n print(\"y_x, y_y: \", y_x, y_y)\n\n y_x, y_y = int(y_x * increase), -int(y_y * increase)\n\n print(image.shape)\n cv2.imwrite('test.png', image)\n image = cv2.imread('test.png')\n print(image.shape)\n\n cv2.circle(image, (int(cx), int(cy)), 5, (0, 0, 255), -1)\n cv2.line(image, (int(cx), int(cy)), (int(cx + y_x), int(cy + y_y)), (255, 0, 0), 3)\n\n cv2.imwrite(str(counter) + '.png', image)\n # cv2.imshow(\"eye\", image)\n # cv2.waitKey(0)\n # raise \"debug\"\n\ndef inspect_input(train_loader):\n# inspect input ///////////////////////////////////////////\n# for step, (images, poses, gazes) in enumerate(train_loader):\n for step, (images, gazes) in enumerate(train_loader):\n # for step, (images, gazes, onehot_gt) in enumerate(train_loader):\n\n print (\"images.shape: \", images.shape)\n for index in range(len(images)):\n\n image = np.asarray(images[index]).astype(np.uint8).copy()\n\n gaze = np.asarray(gazes[index])\n # pose = np.asarray(poses[index])\n # gt = np.asarray(onehot_gt[index])\n\n image = image.transpose(1, 2, 0)\n\n height, width, channels = image.shape\n cy, cx = height/2, width/2\n\n print(image.shape)\n print(type(image))\n\n print(\"gaze: \", gaze)\n # print(\"pose: \", pose)\n # print(\"onehot_gt: \", gt)\n print(\"index: \", index)\n\n # cv2.imshow(\"image\", image)\n # cv2.waitKey(0)\n\n # plot_gaze_pose([cx, cy], gaze, pose, image, index)\n # plot_gaze_pose([cx, cy], gaze, None, image, index, gt)\n plot_gaze_pose([cx, cy], gaze, None, image, index)\n\n raise (\"debug\")\n\ndef main():\n logger.info(json.dumps(vars(args), indent=2))\n save_to_record(str(json.dumps(vars(args))) + \"\\n\")\n\n # TensorBoard SummaryWriter\n writer = SummaryWriter() if args.tensorboard else None\n\n # set random seed\n seed = args.seed\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n # create output directory\n outdir = args.outdir\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n outpath = os.path.join(outdir, 'config.json')\n with open(outpath, 'w') as fout:\n json.dump(vars(args), fout, indent=2)\n\n # data loaders\n train_loader, test_loader = get_loader(\n args.dataset, args.test_id, args.batch_size, args.num_workers, True, False)\n\n # ///////////////////////////////////////////\n # inspect_input(train_loader)\n # ///////////////////////////////////////////\n\n # model\n module = importlib.import_module('models.{}'.format(args.arch))\n model = module.Model()\n\n # weights = \"models/resnet10_weights.npy\"\n # print \"loading: \", weights\n # model = module.Model(weights)\n\n # model = torch.nn.DataParallel(model)\n\n model.cuda()\n\n criterion = nn.MSELoss(size_average=True)\n # criterion = nn.SmoothL1Loss(size_average=True)\n\n # optimizer\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=args.base_lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay,\n nesterov=args.nesterov)\n # scheduler = torch.optim.lr_scheduler.MultiStepLR(\n # optimizer, milestones=args.milestones, gamma=args.lr_decay)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=args.lr_decay, last_epoch=-1)\n\n config = {\n 'tensorboard': args.tensorboard,\n 'tensorboard_images': args.tensorboard_images,\n 'tensorboard_parameters': args.tensorboard_parameters,\n }\n #\n # args.pretrained_path = \"results/2018-12-21-07-07_second_test_data_resnet34_pretrained_0.01_relu_l2_4_camera_batch64_no_shuffle/model_state_100.pth\"\n #\n # if args.pretrained_path != \"\":\n # state_dict = torch.load(args.pretrained_path)['state_dict']\n # model.load_state_dict(state_dict)\n #\n # print (\"args.pretrained_path: \", args.pretrained_path)\n # # raise (\"debug\")\n\n # run test before start training\n test(0, model, criterion, test_loader, config, writer)\n\n\n for epoch in range(1, args.epochs + 1):\n scheduler.step()\n\n lr = scheduler.get_lr()\n print(\"current learnin rate: \", str(lr))\n save_to_record(\"current learnin rate: \" + str(lr))\n\n train(epoch, model, optimizer, criterion, train_loader, config, writer)\n angle_error = test(epoch, model, criterion, test_loader, config,\n writer)\n\n state = OrderedDict([\n ('args', vars(args)),\n ('state_dict', model.state_dict()),\n ('optimizer', optimizer.state_dict()),\n ('epoch', epoch),\n ('angle_error', angle_error),\n ])\n\n if epoch % 50 == 0:\n model_path = os.path.join(outdir, 'model_state_' + str(epoch) + '.pth')\n # torch.save(model.state_dict(), model_path)\n torch.save(state, model_path)\n\n if args.tensorboard:\n outpath = os.path.join(outdir, 'all_scalars.json')\n writer.export_scalars_to_json(outpath)\n\nif __name__ == '__main__':\n main()\n\n" ]
[ [ "torch.abs", "numpy.savez", "numpy.random.seed", "torch.cuda.current_device", "torch.sqrt", "torch.manual_seed", "torch.sin", "numpy.asarray", "numpy.sin", "torch.no_grad", "torch.save", "torch.nn.MSELoss", "torch.cos", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kolk/Pea-QA
[ "bdfe90ba859833f2e86159d982bb4fa268c68af1" ]
[ "train.py" ]
[ "\n# -*- coding: utf-8 -*-\nimport json\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler, random_split\nfrom datasets import load_dataset, load_metric\nimport transformers\nfrom transformers import AutoModelForSeq2SeqLM, Trainer, Seq2SeqTrainer, Seq2SeqTrainingArguments, AutoTokenizer, AutoConfig\nfrom transformers.models.bart import BartForConditionalGeneration, BartTokenizer, BartConfig\nfrom transformers.models.bart.modeling_bart import shift_tokens_right\nfrom transformers import HoulsbyConfig, PfeifferConfig\nfrom transformers.adapters.configuration import AdapterConfig\nfrom transformers.models.auto import AutoModelWithHeads, AutoModelForSeq2SeqLM\nfrom transformers import set_seed\nfrom transformers.optimization import (\n Adafactor,\n get_cosine_schedule_with_warmup,\n get_cosine_with_hard_restarts_schedule_with_warmup,\n get_linear_schedule_with_warmup,\n get_polynomial_decay_schedule_with_warmup,\n)\nimport os\nfrom rouge_score import rouge_scorer, scoring\nimport argparse\nfrom fetaqa import FeTaQaDataset, FeTaQaProcessor\nfrom tablesum import TableSumDataset, TableSumProcessor\nfrom narrativeqa import NarrativeQADataset, NarrativeQAProcessor\n\n\nos.environ[\"WANDB_DISABLED\"] = \"true\"\narg_to_scheduler = {\n \"linear\": get_linear_schedule_with_warmup,\n \"cosine\": get_cosine_schedule_with_warmup,\n \"cosine_w_restarts\": get_cosine_with_hard_restarts_schedule_with_warmup,\n \"polynomial\": get_polynomial_decay_schedule_with_warmup,\n # '': get_constant_schedule, # not supported for now\n # '': get_constant_schedule_with_warmup, # not supported for now\n}\narg_to_scheduler_choices = sorted(arg_to_scheduler.keys())\narg_to_scheduler_metavar = \"{\" + \", \".join(arg_to_scheduler_choices) + \"}\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset_name\", type=str, help=\"name of dataset to adapter-tune on\")\nparser.add_argument(\"--decoder_max_length\", type=int, help=\"encoder sequence max length\")\nparser.add_argument(\"--pretrained_model_name\", type=str, default=None, help=\"prtrained model name\")\nparser.add_argument(\"--adapter_tune\", type=str, default=None, help='adapter name for adapter-tuning')\nparser.add_argument(\"--adapter_config\", type=str, default=\"houlsby\", help=\"Adapter configs: [houlsby, Pfeiffer]\")\nparser.add_argument(\"--leaveout\", type=int, nargs=\"*\", help=\"Adapter layers to leave out\")\nparser.add_argument(\"--learning_rate\", default=1e-4, type=float, help=\"The initial learning rate for Adam.\")\nparser.add_argument(\"--lr_scheduler\", default=\"linear\", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help=\"Learning rate scheduler\",)\nparser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\nparser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\nparser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\nparser.add_argument(\"--num_workers\", default=4, type=int, help=\"kwarg passed to DataLoader\")\nparser.add_argument(\"--num_train_epochs\", default=30, type=int)\nparser.add_argument(\"--train_batch_size\", default=32, type=int)\nparser.add_argument(\"--eval_batch_size\", default=32, type=int)\nparser.add_argument(\"--adafactor\", action=\"store_true\")\nparser.add_argument(\"--output_dir\", type=str, default=None)\nparser.add_argument(\"--seed\", type=int, default=24)\nparser.add_argument(\"--cpu\", action=\"store_true\", help=\"train using cpu\")\n\nargs = parser.parse_args()\nprint(args)\nprint('leaveout', args.leaveout)\nprint('adapter_tune', args.adapter_tune)\nprint('output_dir', args.output_dir)\nprint('num_train_epochs', args.num_train_epochs)\nprint('learning_rate', args.learning_rate)\nprint('seed', args.seed)\nprint('')\nuse_cuda = False if args.cpu else True\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n\nexperiments2_seed = 123\nexperiments3_seed = 42\nexperiments4_seed = 64\nexperiments5_seed = 5\nexperiments6_seed = 6\n\nseed = args.seed\ndef model_init():\n set_seed(args.seed)\n model = AutoModelForSeq2SeqLM.from_pretrained(args.pretrained_model_name)\n if args.adapter_tune:\n if args.leaveout:\n if args.adapter_config.lower() == \"houlsby\":\n config = HoulsbyConfig(leave_out=list(args.leaveout))\n elif args.adapter_config.lower() == \"pfeiffer\":\n config = PfeifferConfig(leave_out=list(args.leaveout))\n else:\n config = AdapterConfig(original_ln_after=True,\n residual_before_ln=True,\n adapter_residual_before_ln=True,\n ln_before=True,\n ln_after=True,\n mh_adapter=True,\n output_adapter=True,\n non_linearity=\"relu\",\n reduction_factor=64,\n inv_adapter=None,#: str | None = None,\n inv_adapter_reduction_factor=64,\n cross_adapter=True,\n )\n else:\n if args.adapter_config.lower() == \"houlsby\":\n config = HoulsbyConfig()\n elif args.adapter_config.lower() == \"pfeiffer\":\n config = PfeifferConfig()\n else:\n config = AdapterConfig(original_ln_after=True,\n residual_before_ln=True,\n adapter_residual_before_ln=True,\n ln_before=True,\n ln_after=True,\n mh_adapter=True,\n output_adapter=True,\n non_linearity=\"relu\",\n reduction_factor=64,\n inv_adapter=None,\n inv_adapter_reduction_factor=64,\n cross_adapter=True,\n )\n model.add_adapter(args.adapter_tune, config=config)\n model.train_adapter(adapter_setup=args.adapter_tune)\n model.config.max_length=args.decoder_max_length\n model = model.to(device)\n print(model)\n return model\n\n\ntokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name)\nconfig = AutoConfig.from_pretrained(args.pretrained_model_name)\n\n\ndef get_dataset(dataset_name):\n if dataset_name in \"tablesum\":\n print(\"Training with Tablesum Dataset....\")\n tablesum_data = TableSumDataset(data_directory=\"data/tablesum/data/\", use_multiprocessing=False)\n train_set_size = int(len(tablesum_data) * 0.8)\n valid_set_size = len(tablesum_data) - train_set_size\n train_set, valid_set = random_split(tablesum_data, [train_set_size, valid_set_size], generator=torch.Generator().manual_seed(42))\n test_set = valid_set\n elif dataset_name in \"fetaqa\":\n print(\"Training with FeTaQA Dataset...\")\n valid_set = FeTaQaDataset(data_directory=\"data/FeTaQA/data/\", split=\"validation\", use_multiprocessing=False)\n test_set = FeTaQaDataset(data_directory=\"data/FeTaQA/data/\", split=\"test\", use_multiprocessing=False)\n train_set = FeTaQaDataset(data_directory=\"data/FeTaQA/data/\", split=\"train\", use_multiprocessing=False)\n elif dataset_name in \"narrativeqa\":\n print(\"Training with NarrativeQA Dataset...\")\n valid_set = NarrativeQADataset(split=\"validation\", use_multiprocessing=False)\n test_set = NarrativeQADataset(split=\"test\", use_multiprocessing=False)\n train_set = NarrativeQADataset(split=\"train\", use_multiprocessing=False)\n return train_set, valid_set, test_set\n\ntrain_dataset, valid_dataset, test_dataset = get_dataset(args.dataset_name)\n\ndef rouge_metric_builder(tokenizer):\n def compute_rouge_metrics(pred):\n \"\"\"utility to compute ROUGE during training.\"\"\"\n # All special tokens are removed.\n pred_ids, labels_ids = pred.predictions, pred.label_ids\n pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)\n labels_ids[labels_ids == -100] = tokenizer.pad_token_id\n label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)\n rouge_types = [\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"]\n scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=True)\n aggregator = scoring.BootstrapAggregator()\n\n for ref, pred in zip(label_str, pred_str):\n print(\"target:\", ref)\n print(\"pred:\", pred)\n score = scorer.score(ref, pred)\n\n aggregator.add_scores(score)\n\n\n result = aggregator.aggregate()\n return {\n \"rouge1\": round(result['rouge1'].mid.fmeasure, 4),\n \"rouge2\": round(result['rouge2'].mid.fmeasure, 4),\n \"rougeL\": round(result['rougeL'].mid.fmeasure, 4),\n }\n return compute_rouge_metrics\n\nrouge_metric_fn = rouge_metric_builder(tokenizer)\n\ndef collate(batch):\n \"\"\"\n Generates tokenized batches\n \"\"\"\n source = [samp[\"question\"] + \" \" + samp[\"document\"] for samp in batch]\n target = [samp[\"target\"] for samp in batch]\n tokenized_input = tokenizer.prepare_seq2seq_batch(source, target, max_length=512,\n max_target_length=args.decoder_max_length,\n truncation=True, padding='max_length',\n return_tensors=\"pt\")\n\n if isinstance(config, BartConfig) or isinstance(config, T5Config):\n decoder_input_ids = shift_tokens_right(tokenized_input['labels'], tokenizer.pad_token_id, config.decoder_start_token_id)\n else:\n decoder_input_ids = tokenized_input['labels']\n\n return {\"input_ids\": tokenized_input[\"input_ids\"],\n \"attention_mask\": tokenized_input[\"attention_mask\"],\n \"labels\": tokenized_input[\"labels\"],\n \"decoder_input_ids\": decoder_input_ids} # tokenized_input[\"labels\"]}#decoder_input_ids[\"input_ids\"]}\n\n\ntrain_args = Seq2SeqTrainingArguments(\n output_dir=args.output_dir,\n do_train=True,\n do_eval=True,\n evaluation_strategy=\"epoch\",\n no_cuda=args.cpu,\n fp16=True if use_cuda else False,\n save_strategy=\"epoch\",\n save_total_limit = 1,\n logging_steps=100,\n eval_accumulation_steps=8,\n per_device_train_batch_size=4,\n per_device_eval_batch_size=4,\n gradient_accumulation_steps=8,\n learning_rate=args.learning_rate,\n adam_epsilon=args.adam_epsilon,\n num_train_epochs=args.num_train_epochs,\n warmup_steps=args.warmup_steps,\n seed=seed,\n disable_tqdm=False,\n predict_with_generate=True,\n generation_max_length = 200,\n generation_num_beams = 4,\n load_best_model_at_end=True,\n )\n\ntransformers.logging.set_verbosity_info()\ntrainer = Seq2SeqTrainer(\n model_init=model_init,\n args=train_args,\n train_dataset=train_dataset,\n eval_dataset=valid_dataset,\n tokenizer=tokenizer,\n data_collator=collate,\n compute_metrics=rouge_metric_fn,\n)\n\ntrainer.train()\ntrainer.save_state()\n\n" ]
[ [ "torch.device", "torch.Generator" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kelseybisson/icepyx
[ "6cf5d8faee29b5d8e04976b3a9a33c507adc7c31" ]
[ "icepyx/core/query.py" ]
[ "import datetime as dt\nimport os\nimport requests\nimport json\nimport warnings\nimport pprint\nimport time\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\n\nfrom icepyx.core.Earthdata import Earthdata\nimport icepyx.core.APIformatting as apifmt\nimport icepyx.core.is2ref as is2ref\nimport icepyx.core.granules as granules\nfrom icepyx.core.granules import Granules as Granules\n\n# QUESTION: why doesn't from granules import Granules as Granules work, since granules=icepyx.core.granules?\n# from icepyx.core.granules import Granules\nfrom icepyx.core.variables import Variables as Variables\nimport icepyx.core.geospatial as geospatial\nimport icepyx.core.validate_inputs as val\nfrom icepyx.core.visualization import Visualize\n\n# DevGoal: update docs throughout to allow for polygon spatial extent\n# Note: add files to docstring once implemented\n# DevNote: currently this class is not tested\nclass Query:\n \"\"\"\n ICESat-2 Data object to query, obtain, and perform basic operations on\n available ICESat-2 data products using temporal and spatial input parameters.\n Allows the easy input and formatting of search parameters to match the\n NASA NSIDC DAAC and (development goal-not yet implemented) conversion to multiple data types.\n\n Parameters\n ----------\n product : string\n ICESat-2 data product ID, also known as \"short name\" (e.g. ATL03).\n Available data products can be found at: https://nsidc.org/data/icesat-2/data-sets\n spatial_extent : list or string\n Spatial extent of interest, provided as a bounding box, list of polygon coordinates, or\n geospatial polygon file.\n Bounding box coordinates should be provided in decimal degrees as\n [lower-left-longitude, lower-left-latitute, upper-right-longitude, upper-right-latitude].\n Polygon coordinates should be provided as coordinate pairs in decimal degrees as\n [(longitude1, latitude1), (longitude2, latitude2), ... (longitude_n,latitude_n), (longitude1,latitude1)]\n or\n [longitude1, latitude1, longitude2, latitude2, ... longitude_n,latitude_n, longitude1,latitude1].\n Your list must contain at least four points, where the first and last are identical.\n DevGoal: adapt code so the polygon is automatically closed if need be\n Geospatial polygon files are entered as strings with the full file path and\n must contain only one polygon with the area of interest.\n Currently supported formats are: kml, shp, and gpkg\n date_range : list of 'YYYY-MM-DD' strings\n Date range of interest, provided as start and end dates, inclusive.\n The required date format is 'YYYY-MM-DD' strings, where\n YYYY = 4 digit year, MM = 2 digit month, DD = 2 digit day.\n Currently, a list of specific dates (rather than a range) is not accepted.\n DevGoal: accept date-time objects, dicts (with 'start_date' and 'end_date' keys, and DOY inputs).\n DevGoal: allow searches with a list of dates, rather than a range.\n start_time : HH:mm:ss, default 00:00:00\n Start time in UTC/Zulu (24 hour clock). If None, use default.\n DevGoal: check for time in date-range date-time object, if that's used for input.\n end_time : HH:mm:ss, default 23:59:59\n End time in UTC/Zulu (24 hour clock). If None, use default.\n DevGoal: check for time in date-range date-time object, if that's used for input.\n version : string, default most recent version\n Product version, given as a 3 digit string. If no version is given, the current\n version is used. Example: \"004\"\n cycles : string or a list of strings, default all available orbital cycles\n Product cycle, given as a 2 digit string. If no cycle is given, all available\n cycles are used. Example: \"04\"\n tracks : string or a list of strings, default all available reference ground tracks (RGTs)\n Product track, given as a 4 digit string. If no track is given, all available\n reference ground tracks are used. Example: \"0594\"\n files : string, default None\n A placeholder for future development. Not used for any purposes yet.\n\n Returns\n -------\n query object\n\n Examples\n --------\n Initializing Query with a bounding box.\n\n >>> reg_a_bbox = [-55, 68, -48, 71]\n >>> reg_a_dates = ['2019-02-20','2019-02-28']\n >>> reg_a = icepyx.query.Query('ATL06', reg_a_bbox, reg_a_dates)\n >>> reg_a\n <icepyx.core.query.Query at [location]>\n\n Initializing Query with a list of polygon vertex coordinate pairs.\n\n >>> reg_a_poly = [(-55, 68), (-55, 71), (-48, 71), (-48, 68), (-55, 68)]\n >>> reg_a_dates = ['2019-02-20','2019-02-28']\n >>> reg_a = icepyx.query.Query('ATL06', reg_a_poly, reg_a_dates)\n >>> reg_a\n <icepyx.core.query.Query at [location]>\n\n Initializing Query with a geospatial polygon file.\n\n >>> aoi = '/User/name/location/aoi.shp'\n >>> reg_a_dates = ['2019-02-22','2019-02-28']\n >>> reg_a = icepyx.query.Query('ATL06', aoi, reg_a_dates)\n >>> reg_a\n <icepyx.core.query.Query at [location]>\n \"\"\"\n\n # ----------------------------------------------------------------------\n # Constructors\n\n def __init__(\n self,\n product=None,\n spatial_extent=None,\n date_range=None,\n start_time=None,\n end_time=None,\n version=None,\n cycles=None,\n tracks=None,\n files=None, # NOTE: if you end up implemeting this feature here, use a better variable name than \"files\"\n ):\n\n # warnings.filterwarnings(\"always\")\n # warnings.warn(\"Please note: as of 2020-05-05, a major reorganization of the core icepyx.query code may result in errors produced by now depricated functions. Please see our documentation pages or example notebooks for updates.\")\n\n if (\n (product is None or spatial_extent is None)\n and (date_range is None or cycles is None or tracks is None)\n and files is None\n ):\n raise ValueError(\n \"Please provide the required inputs. Use help([function]) to view the function's documentation\"\n )\n\n if files is not None:\n self._source = \"files\"\n # self.file_vars = Variables(self._source)\n else:\n self._source = \"order\"\n # self.order_vars = Variables(self._source)\n # self.variables = Variables(self._source)\n\n self._prod = is2ref._validate_product(product)\n\n self.extent_type, self._spat_extent, self._geom_filepath = val.spatial(\n spatial_extent\n )\n\n if date_range:\n self._start, self._end = val.temporal(date_range, start_time, end_time)\n\n self._version = val.prod_version(self.latest_version(), version)\n\n # build list of available CMR parameters if reducing by cycle or RGT\n # or a list of explicitly named files (full or partial names)\n # DevGoal: add file name search to optional queries\n if cycles or tracks:\n # get lists of available ICESat-2 cycles and tracks\n self._cycles = val.cycles(cycles)\n self._tracks = val.tracks(tracks)\n # create list of CMR parameters for granule name\n self._readable_granule_name = apifmt._fmt_readable_granules(\n self._prod, cycles=self.cycles, tracks=self.tracks\n )\n\n # ----------------------------------------------------------------------\n # Properties\n\n @property\n def dataset(self):\n \"\"\"\n Legacy property included to provide depracation warning.\n\n See Also\n --------\n product\n \"\"\"\n warnings.filterwarnings(\"always\")\n warnings.warn(\n \"In line with most common usage, 'dataset' has been replaced by 'product'.\",\n DeprecationWarning,\n )\n\n @property\n def product(self):\n \"\"\"\n Return the short name product ID string associated with the query object.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.product\n 'ATL06'\n \"\"\"\n return self._prod\n\n @property\n def product_version(self):\n \"\"\"\n Return the product version of the data object.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.product_version\n '003'\n\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], version='1')\n >>> reg_a.product_version\n '001'\n \"\"\"\n return self._version\n\n @property\n def spatial_extent(self):\n \"\"\"\n Return an array showing the spatial extent of the query object.\n Spatial extent is returned as an input type (which depends on how\n you initially entered your spatial data) followed by the geometry data.\n Bounding box data is [lower-left-longitude, lower-left-latitute, upper-right-longitude, upper-right-latitude].\n Polygon data is [[array of longitudes],[array of corresponding latitudes]].\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.spatial_extent\n ['bounding box', [-55, 68, -48, 71]]\n\n >>> reg_a = icepyx.query.Query('ATL06',[(-55, 68), (-55, 71), (-48, 71), (-48, 68), (-55, 68)],['2019-02-20','2019-02-28'])\n >>> reg_a.spatial_extent\n ['polygon', [-55.0, 68.0, -55.0, 71.0, -48.0, 71.0, -48.0, 68.0, -55.0, 68.0]]\n \"\"\"\n\n if self.extent_type == \"bounding_box\":\n return [\"bounding box\", self._spat_extent]\n elif self.extent_type == \"polygon\":\n # return ['polygon', self._spat_extent]\n # Note: self._spat_extent is a shapely geometry object\n return [\"polygon\", self._spat_extent.exterior.coords.xy]\n else:\n return [\"unknown spatial type\", None]\n\n @property\n def dates(self):\n \"\"\"\n Return an array showing the date range of the query object.\n Dates are returned as an array containing the start and end datetime objects, inclusive, in that order.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.dates\n ['2019-02-20', '2019-02-28']\n \"\"\"\n if not hasattr(self, \"_start\"):\n return [\"No temporal parameters set\"]\n else:\n return [\n self._start.strftime(\"%Y-%m-%d\"),\n self._end.strftime(\"%Y-%m-%d\"),\n ] # could also use self._start.date()\n\n @property\n def start_time(self):\n \"\"\"\n Return the start time specified for the start date.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.start_time\n '00:00:00'\n\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], start_time='12:30:30')\n >>> reg_a.start_time\n '12:30:30'\n \"\"\"\n if not hasattr(self, \"_start\"):\n return [\"No temporal parameters set\"]\n else:\n return self._start.strftime(\"%H:%M:%S\")\n\n @property\n def end_time(self):\n \"\"\"\n Return the end time specified for the end date.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.end_time\n '23:59:59'\n\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'], end_time='10:20:20')\n >>> reg_a.end_time\n '10:20:20'\n \"\"\"\n if not hasattr(self, \"_end\"):\n return [\"No temporal parameters set\"]\n else:\n return self._end.strftime(\"%H:%M:%S\")\n\n @property\n def cycles(self):\n \"\"\"\n Return the unique ICESat-2 orbital cycle.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.cycles\n ['02']\n \"\"\"\n if not hasattr(self, \"_cycles\"):\n return [\"No orbital parameters set\"]\n else:\n return sorted(set(self._cycles))\n\n @property\n def tracks(self):\n \"\"\"\n Return the unique ICESat-2 Reference Ground Tracks\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.tracks\n ['0841', '0849', '0902', '0910']\n \"\"\"\n if not hasattr(self, \"_tracks\"):\n return [\"No orbital parameters set\"]\n else:\n return sorted(set(self._tracks))\n\n @property\n def CMRparams(self):\n \"\"\"\n Display the CMR key:value pairs that will be submitted. It generates the dictionary if it does not already exist.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.CMRparams\n {'short_name': 'ATL06',\n 'version': '002',\n 'temporal': '2019-02-20T00:00:00Z,2019-02-28T23:59:59Z',\n 'bounding_box': '-55,68,-48,71'}\n \"\"\"\n\n if not hasattr(self, \"_CMRparams\"):\n self._CMRparams = apifmt.Parameters(\"CMR\")\n # print(self._CMRparams)\n # print(self._CMRparams.fmted_keys)\n\n # dictionary of optional CMR parameters\n kwargs = {}\n # temporal CMR parameters\n if hasattr(self, \"_start\") and hasattr(self, \"_end\"):\n kwargs[\"start\"] = self._start\n kwargs[\"end\"] = self._end\n # granule name CMR parameters (orbital or file name)\n # DevGoal: add to file name search to optional queries\n if hasattr(self, \"_readable_granule_name\"):\n kwargs[\"options[readable_granule_name][pattern]\"] = \"true\"\n kwargs[\"options[spatial][or]\"] = \"true\"\n kwargs[\"readable_granule_name[]\"] = self._readable_granule_name\n\n if self._CMRparams.fmted_keys == {}:\n self._CMRparams.build_params(\n product=self.product,\n version=self._version,\n extent_type=self.extent_type,\n spatial_extent=self._spat_extent,\n **kwargs,\n )\n\n return self._CMRparams.fmted_keys\n\n @property\n def reqparams(self):\n \"\"\"\n Display the required key:value pairs that will be submitted. It generates the dictionary if it does not already exist.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.reqparams\n {'page_size': 10, 'page_num': 1}\n\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.order_granules()\n >>> reg_a.reqparams\n {'page_size': 10, 'page_num': 1, 'request_mode': 'async', 'include_meta': 'Y', 'client_string': 'icepyx'}\n \"\"\"\n\n if not hasattr(self, \"_reqparams\"):\n self._reqparams = apifmt.Parameters(\"required\", reqtype=\"search\")\n self._reqparams.build_params()\n\n return self._reqparams.fmted_keys\n\n # @property\n # DevQuestion: if I make this a property, I get a \"dict\" object is not callable when I try to give input kwargs... what approach should I be taking?\n def subsetparams(self, **kwargs):\n \"\"\"\n Display the subsetting key:value pairs that will be submitted. It generates the dictionary if it does not already exist\n and returns an empty dictionary if subsetting is set to False during ordering.\n\n Parameters\n ----------\n **kwargs : key-value pairs\n Additional parameters to be passed to the subsetter.\n By default temporal and spatial subset keys are passed.\n Acceptable key values are ['format','projection','projection_parameters','Coverage'].\n At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.\n\n See Also\n --------\n order_granules\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.subsetparams()\n {'time': '2019-02-20T00:00:00,2019-02-28T23:59:59', 'bbox': '-55,68,-48,71'}\n \"\"\"\n if not hasattr(self, \"_subsetparams\"):\n self._subsetparams = apifmt.Parameters(\"subset\")\n\n # temporal subsetting parameters\n if hasattr(self, \"_start\") and hasattr(self, \"_end\"):\n kwargs[\"start\"] = self._start\n kwargs[\"end\"] = self._end\n\n if self._subsetparams == None and not kwargs:\n return {}\n else:\n if self._subsetparams == None:\n self._subsetparams = apifmt.Parameters(\"subset\")\n if self._geom_filepath is not None:\n self._subsetparams.build_params(\n geom_filepath=self._geom_filepath,\n extent_type=self.extent_type,\n spatial_extent=self._spat_extent,\n **kwargs,\n )\n else:\n self._subsetparams.build_params(\n extent_type=self.extent_type,\n spatial_extent=self._spat_extent,\n **kwargs,\n )\n\n return self._subsetparams.fmted_keys\n\n # DevGoal: add to tests\n # DevGoal: add statements to the following vars properties to let the user know if they've got a mismatched source and vars type\n @property\n def order_vars(self):\n \"\"\"\n Return the order variables object.\n This instance is generated when data is ordered from the NSIDC.\n\n See Also\n --------\n variables.Variables\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.order_vars\n <icepyx.core.variables.Variables at [location]>\n \"\"\"\n\n if not hasattr(self, \"_order_vars\"):\n if self._source == \"order\":\n # DevGoal: check for active session here\n if hasattr(self, \"_cust_options\"):\n self._order_vars = Variables(\n self._source,\n session=self._session,\n product=self.product,\n avail=self._cust_options[\"variables\"],\n )\n else:\n self._order_vars = Variables(\n self._source,\n session=self._session,\n product=self.product,\n version=self._version,\n )\n\n # I think this is where property setters come in, and one should be used here? Right now order_vars.avail is only filled in\n # if _cust_options exists when the class is initialized, but not if _cust_options is filled in prior to another call to order_vars\n # if self._order_vars.avail == None and hasattr(self, '_cust_options'):\n # print('got into the loop')\n # self._order_vars.avail = self._cust_options['variables']\n\n return self._order_vars\n\n @property\n def file_vars(self):\n \"\"\"\n Return the file variables object.\n This instance is generated when files are used to create the data object (not yet implemented).\n\n See Also\n --------\n variables.Variables\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.file_vars\n <icepyx.core.variables.Variables at [location]>\n \"\"\"\n\n if not hasattr(self, \"_file_vars\"):\n if self._source == \"file\":\n self._file_vars = Variables(self._source, product=self.product)\n\n return self._file_vars\n\n @property\n def granules(self):\n \"\"\"\n Return the granules object, which provides the underlying funtionality for searching, ordering,\n and downloading granules for the specified product. Users are encouraged to use the built in wrappers\n rather than trying to access the granules object themselves.\n\n See Also\n --------\n avail_granules\n order_granules\n download_granules\n granules.Granules\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.granules\n <icepyx.core.granules.Granules at [location]>\n \"\"\"\n\n if not hasattr(self, \"_granules\"):\n self._granules = Granules()\n elif self._granules == None:\n self._granules = Granules()\n\n return self._granules\n\n # ----------------------------------------------------------------------\n # Methods - Get and display neatly information at the product level\n\n def product_summary_info(self):\n \"\"\"\n Display a summary of selected metadata for the specified version of the product\n of interest (the collection).\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.product_summary_info()\n product_id : ATLAS/ICESat-2 L3A Land Ice Height V002\n short_name : ATL06\n version_id : 002\n time_start : 2018-10-14T00:00:00.000Z\n coordinate_system : CARTESIAN\n summary : This data set (ATL06) provides geolocated, land-ice surface heights (above the WGS 84 ellipsoid, ITRF2014 reference frame), plus ancillary parameters that can be used to interpret and assess the quality of the height estimates. The data were acquired by the Advanced Topographic Laser Altimeter System (ATLAS) instrument on board the Ice, Cloud and land Elevation Satellite-2 (ICESat-2) observatory.\n orbit_parameters : {'swath_width': '36.0', 'period': '94.29', 'inclination_angle': '92.0', 'number_of_orbits': '0.071428571', 'start_circular_latitude': '0.0'}\n \"\"\"\n if not hasattr(self, \"_about_product\"):\n self._about_product = is2ref.about_product(self._prod)\n summ_keys = [\n \"product_id\",\n \"short_name\",\n \"version_id\",\n \"time_start\",\n \"coordinate_system\",\n \"summary\",\n \"orbit_parameters\",\n ]\n for key in summ_keys:\n print(key, \": \", self._about_product[\"feed\"][\"entry\"][-1][key])\n\n def product_all_info(self):\n \"\"\"\n Display all metadata about the product of interest (the collection).\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.product_all_info()\n {very long prettily-formatted dictionary output}\n\n \"\"\"\n if not hasattr(self, \"_about_product\"):\n self._about_product = is2ref.about_product(self._prod)\n pprint.pprint(self._about_product)\n\n def latest_version(self):\n \"\"\"\n Determine the most recent version available for the given product.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.latest_version()\n '003'\n \"\"\"\n if not hasattr(self, \"_about_product\"):\n self._about_product = is2ref.about_product(self._prod)\n return max(\n [entry[\"version_id\"] for entry in self._about_product[\"feed\"][\"entry\"]]\n )\n\n def show_custom_options(self, dictview=False):\n \"\"\"\n Display customization/subsetting options available for this product.\n\n Parameters\n ----------\n dictview : boolean, default False\n Show the variable portion of the custom options list as a dictionary with key:value\n pairs representing variable:paths-to-variable rather than as a long list of full\n variable paths.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.show_custom_options(dictview=True):\n Subsetting options\n [{'id': 'ICESAT2',\n 'maxGransAsyncRequest': '2000',\n 'maxGransSyncRequest': '100',\n 'spatialSubsetting': 'true',\n 'spatialSubsettingShapefile': 'true',\n 'temporalSubsetting': 'true',\n 'type': 'both'}]\n Data File Formats (Reformatting Options)\n ['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3']\n Reprojection Options\n []\n Data File (Reformatting) Options Supporting Reprojection\n ['TABULAR_ASCII', 'NetCDF4-CF', 'Shapefile', 'NetCDF-3', 'No reformatting']\n Data File (Reformatting) Options NOT Supporting Reprojection\n []\n Data Variables (also Subsettable)\n ['ancillary_data/atlas_sdp_gps_epoch',\n 'ancillary_data/control',\n 'ancillary_data/data_end_utc',\n .\n .\n .\n 'quality_assessment/gt3r/signal_selection_source_fraction_3']\n \"\"\"\n headers = [\n \"Subsetting options\",\n \"Data File Formats (Reformatting Options)\",\n \"Reprojection Options\",\n \"Data File (Reformatting) Options Supporting Reprojection\",\n \"Data File (Reformatting) Options NOT Supporting Reprojection\",\n \"Data Variables (also Subsettable)\",\n ]\n keys = [\n \"options\",\n \"fileformats\",\n \"reprojectionONLY\",\n \"formatreproj\",\n \"noproj\",\n \"variables\",\n ]\n\n try:\n all(key in self._cust_options.keys() for key in keys)\n except AttributeError or KeyError:\n self._cust_options = is2ref._get_custom_options(\n self._session, self.product, self._version\n )\n\n for h, k in zip(headers, keys):\n print(h)\n if k == \"variables\" and dictview:\n vgrp, paths = Variables.parse_var_list(self._cust_options[k])\n pprint.pprint(vgrp)\n else:\n pprint.pprint(self._cust_options[k])\n\n # ----------------------------------------------------------------------\n # Methods - Login and Granules (NSIDC-API)\n\n def earthdata_login(self, uid, email):\n \"\"\"\n Log in to NSIDC EarthData to access data. Generates the needed session and token for most\n data searches and data ordering/download.\n\n Parameters\n ----------\n uid : string\n Earthdata login user ID\n email : string\n Email address. NSIDC will automatically send you emails about the status of your order.\n\n See Also\n --------\n Earthdata.Earthdata\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n \"\"\"\n\n capability_url = f\"https://n5eil02u.ecs.nsidc.org/egi/capabilities/{self.product}.{self._version}.xml\"\n self._session = Earthdata(uid, email, capability_url).login()\n self._email = email\n\n # DevGoal: check to make sure the see also bits of the docstrings work properly in RTD\n def avail_granules(self, ids=False, cycles=False, tracks=False):\n \"\"\"\n Obtain information about the available granules for the query\n object's parameters. By default, a complete list of available granules is\n obtained and stored in the object, but only summary information is returned.\n Lists of granule IDs, cycles and RGTs can be obtained using the boolean triggers.\n\n Parameters\n ----------\n ids : boolean, default False\n Indicates whether the function should return a list of granule IDs.\n\n cycles : boolean, default False\n Indicates whether the function should return a list of orbital cycles.\n\n tracks : boolean, default False\n Indicates whether the function should return a list of RGTs.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.avail_granules()\n {'Number of available granules': 4,\n 'Average size of granules (MB)': 48.975419759750004,\n 'Total size of all granules (MB)': 195.90167903900002}\n\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.avail_granules(ids=True)\n >>> reg_a.avail_granules(cycles=True)\n ['02']\n >>> reg_a.avail_granules(tracks=True)\n ['0841', '0849', '0902', '0910']\n \"\"\"\n\n # REFACTOR: add test to make sure there's a session\n if not hasattr(self, \"_granules\"):\n self.granules\n try:\n self.granules.avail\n except AttributeError:\n self.granules.get_avail(self.CMRparams, self.reqparams)\n\n if ids or cycles or tracks:\n # list of outputs in order of ids, cycles, tracks\n return granules.gran_IDs(\n self.granules.avail, ids=ids, cycles=cycles, tracks=tracks\n )\n else:\n return granules.info(self.granules.avail)\n\n # DevGoal: display output to indicate number of granules successfully ordered (and number of errors)\n # DevGoal: deal with subset=True for variables now, and make sure that if a variable subset Coverage kwarg is input it's successfully passed through all other functions even if this is the only one run.\n def order_granules(self, verbose=False, subset=True, email=True, **kwargs):\n \"\"\"\n Place an order for the available granules for the query object.\n\n Parameters\n ----------\n verbose : boolean, default False\n Print out all feedback available from the order process.\n Progress information is automatically printed regardless of the value of verbose.\n subset : boolean, default True\n Apply subsetting to the data order from the NSIDC, returning only data that meets the\n subset parameters. Spatial and temporal subsetting based on the input parameters happens\n by default when subset=True, but additional subsetting options are available.\n Spatial subsetting returns all data that are within the area of interest (but not complete\n granules. This eliminates false-positive granules returned by the metadata-level search)\n email: boolean, default True\n Have NSIDC auto-send order status email updates to indicate order status as pending/completed.\n **kwargs : key-value pairs\n Additional parameters to be passed to the subsetter.\n By default temporal and spatial subset keys are passed.\n Acceptable key values are ['format','projection','projection_parameters','Coverage'].\n The variable 'Coverage' list should be constructed using the `order_vars.wanted` attribute of the object.\n At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.\n\n See Also\n --------\n granules.place_order\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.order_granules()\n order ID: [###############]\n [order status output]\n error messages:\n [if any were returned from the NSIDC subsetter, e.g. No data found that matched subset constraints.]\n .\n .\n .\n Retry request status is: complete\n \"\"\"\n\n if not hasattr(self, \"reqparams\"):\n self.reqparams\n\n if self._reqparams._reqtype == \"search\":\n self._reqparams._reqtype = \"download\"\n\n if \"email\" in self._reqparams.fmted_keys.keys() or email == False:\n self._reqparams.build_params(**self._reqparams.fmted_keys)\n else:\n self._reqparams.build_params(\n **self._reqparams.fmted_keys, email=self._email\n )\n\n if subset is False:\n self._subsetparams = None\n elif (\n subset == True\n and hasattr(self, \"_subsetparams\")\n and self._subsetparams == None\n ):\n del self._subsetparams\n\n # REFACTOR: add checks here to see if the granules object has been created, and also if it already has a list of avail granules (if not, need to create one and add session)\n if not hasattr(self, \"_granules\"):\n self.granules\n self._granules.place_order(\n self.CMRparams,\n self.reqparams,\n self.subsetparams(**kwargs),\n verbose,\n subset,\n session=self._session,\n geom_filepath=self._geom_filepath,\n )\n\n # DevGoal: put back in the kwargs here so that people can just call download granules with subset=False!\n def download_granules(\n self, path, verbose=False, subset=True, restart=False, **kwargs\n ): # , extract=False):\n \"\"\"\n Downloads the data ordered using order_granules.\n\n Parameters\n ----------\n path : string\n String with complete path to desired download location.\n verbose : boolean, default False\n Print out all feedback available from the order process.\n Progress information is automatically printed regardless of the value of verbose.\n subset : boolean, default True\n Apply subsetting to the data order from the NSIDC, returning only data that meets the\n subset parameters. Spatial and temporal subsetting based on the input parameters happens\n by default when subset=True, but additional subsetting options are available.\n Spatial subsetting returns all data that are within the area of interest (but not complete\n granules. This eliminates false-positive granules returned by the metadata-level search)\n restart: boolean, default false\n If previous download was terminated unexpectedly. Run again with restart set to True to continue.\n **kwargs : key-value pairs\n Additional parameters to be passed to the subsetter.\n By default temporal and spatial subset keys are passed.\n Acceptable key values are ['format','projection','projection_parameters','Coverage'].\n The variable 'Coverage' list should be constructed using the `order_vars.wanted` attribute of the object.\n At this time (2020-05), only variable ('Coverage') parameters will be automatically formatted.\n\n See Also\n --------\n granules.download\n \"\"\"\n \"\"\"\n extract : boolean, default False\n Unzip the downloaded granules.\n\n Examples\n --------\n >>> reg_a = icepyx.query.Query('ATL06',[-55, 68, -48, 71],['2019-02-20','2019-02-28'])\n >>> reg_a.earthdata_login(user_id,user_email)\n Earthdata Login password: ········\n >>> reg_a.download_granules('/path/to/download/folder')\n Beginning download of zipped output...\n Data request [##########] of x order(s) is complete.\n \"\"\"\n\n # if not os.path.exists(path):\n # os.mkdir(path)\n # os.chdir(path)\n\n if not hasattr(self, \"_granules\"):\n self.granules\n\n if restart == True:\n pass\n else:\n if (\n not hasattr(self._granules, \"orderIDs\")\n or len(self._granules.orderIDs) == 0\n ):\n self.order_granules(verbose=verbose, subset=subset, **kwargs)\n\n self._granules.download(verbose, path, session=self._session, restart=restart)\n\n # DevGoal: add testing? What do we test, and how, given this is a visualization.\n # DevGoal(long term): modify this to accept additional inputs, etc.\n # DevGoal: move this to it's own module for visualizing, etc.\n # DevGoal: see Amy's data access notebook for a zoomed in map - implement here?\n def visualize_spatial_extent(\n self,\n ): # additional args, basemap, zoom level, cmap, export\n \"\"\"\n Creates a map displaying the input spatial extent\n\n Examples\n --------\n >>> icepyx.query.Query('ATL06','path/spatialfile.shp',['2019-02-22','2019-02-28'])\n >>> reg_a.visualize_spatial_extent\n [visual map output]\n \"\"\"\n gdf = geospatial.geodataframe(self.extent_type, self._spat_extent)\n\n try:\n from shapely.geometry import Polygon\n import geoviews as gv\n\n gv.extension(\"bokeh\")\n\n line_geoms = Polygon(gdf[\"geometry\"][0]).boundary\n bbox_poly = gv.Path(line_geoms).opts(color=\"red\", line_color=\"red\")\n tile = gv.tile_sources.EsriImagery.opts(width=500, height=500)\n return tile * bbox_poly\n\n except ImportError:\n world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\n f, ax = plt.subplots(1, figsize=(12, 6))\n world.plot(ax=ax, facecolor=\"lightgray\", edgecolor=\"gray\")\n gdf.plot(ax=ax, color=\"#FF8C00\", alpha=0.7)\n plt.show()\n\n def visualize_elevation(self):\n \"\"\"\n Visualize elevation requested from OpenAltimetry API using datashader based on cycles\n https://holoviz.org/tutorial/Large_Data.html\n\n Returns\n -------\n map_cycle, map_rgt + lineplot_rgt : Holoviews objects\n Holoviews data visualization elements\n \"\"\"\n viz = Visualize(self)\n cycle_map, rgt_map = viz.viz_elevation()\n\n return cycle_map, rgt_map\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michalk8/anndata
[ "664e32b0aa6625fe593370d37174384c05abfd4e" ]
[ "anndata/tests/test_views.py" ]
[ "from operator import mul\n\nimport joblib\nimport numpy as np\nfrom scipy import sparse\nimport pandas as pd\nimport pytest\n\nimport anndata as ad\nfrom anndata._core.index import _normalize_index\nfrom anndata.utils import asarray\n\nfrom anndata.tests.helpers import (\n gen_adata,\n subset_func,\n slice_subset,\n single_subset,\n assert_equal,\n)\n\n# ------------------------------------------------------------------------------\n# Some test data\n# ------------------------------------------------------------------------------\n\n# data matrix of shape n_obs x n_vars\nX_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n# annotation of observations / rows\nobs_dict = dict(\n row_names=[\"name1\", \"name2\", \"name3\"], # row annotation\n oanno1=[\"cat1\", \"cat2\", \"cat2\"], # categorical annotation\n oanno2=[\"o1\", \"o2\", \"o3\"], # string annotation\n oanno3=[2.1, 2.2, 2.3], # float annotation\n)\n# annotation of variables / columns\nvar_dict = dict(vanno1=[3.1, 3.2, 3.3])\n# unstructured annotation\nuns_dict = dict(oanno1_colors=[\"#000000\", \"#FFFFFF\"], uns2=[\"some annotation\"])\n\n\nsubset_func2 = subset_func\n\n\nclass NDArraySubclass(np.ndarray):\n def view(self, dtype=None, typ=None):\n return self\n\n\[email protected]\ndef adata():\n adata = ad.AnnData(np.zeros((100, 100)))\n adata.obsm[\"o\"] = np.zeros((100, 50))\n adata.varm[\"o\"] = np.zeros((100, 50))\n return adata\n\n\[email protected](params=[asarray, sparse.csr_matrix, sparse.csc_matrix])\ndef adata_parameterized(request):\n return gen_adata(shape=(200, 300), X_type=request.param)\n\n\[email protected](\n params=[np.array, sparse.csr_matrix, sparse.csc_matrix],\n ids=[\"np_array\", \"scipy_csr\", \"scipy_csc\"],\n)\ndef matrix_type(request):\n return request.param\n\n\[email protected](params=[\"layers\", \"obsm\", \"varm\"])\ndef mapping_name(request):\n return request.param\n\n\n# ------------------------------------------------------------------------------\n# The test functions\n# ------------------------------------------------------------------------------\n\n\ndef test_views():\n X = np.array(X_list)\n adata = ad.AnnData(X, obs=obs_dict, var=var_dict, uns=uns_dict, dtype=\"int32\")\n\n assert adata[:, 0].is_view\n assert adata[:, 0].X.tolist() == np.reshape([1, 4, 7], (3, 1)).tolist()\n\n adata[:2, 0].X = [0, 0]\n\n assert adata[:, 0].X.tolist() == np.reshape([0, 0, 7], (3, 1)).tolist()\n\n adata_subset = adata[:2, [0, 1]]\n\n assert adata_subset.is_view\n # now transition to actual object\n adata_subset.obs[\"foo\"] = range(2)\n assert not adata_subset.is_view\n\n assert adata_subset.obs[\"foo\"].tolist() == list(range(2))\n\n\ndef test_modify_view_component(matrix_type, mapping_name):\n adata = ad.AnnData(\n np.zeros((10, 10)),\n **{mapping_name: dict(m=matrix_type(asarray(sparse.random(10, 10))))},\n )\n init_hash = joblib.hash(adata)\n\n subset = adata[:5, :][:, :5]\n assert subset.is_view\n m = getattr(subset, mapping_name)[\"m\"]\n m[0, 0] = 100\n assert not subset.is_view\n assert getattr(subset, mapping_name)[\"m\"][0, 0] == 100\n\n assert init_hash == joblib.hash(adata)\n\n\n# TODO: These tests could probably be condensed into a fixture\n# based test for obsm and varm\ndef test_set_obsm_key(adata):\n init_hash = joblib.hash(adata)\n\n orig_obsm_val = adata.obsm[\"o\"].copy()\n subset_obsm = adata[:50]\n assert subset_obsm.is_view\n subset_obsm.obsm[\"o\"] = np.ones((50, 20))\n assert not subset_obsm.is_view\n assert np.all(adata.obsm[\"o\"] == orig_obsm_val)\n\n assert init_hash == joblib.hash(adata)\n\n\ndef test_set_varm_key(adata):\n init_hash = joblib.hash(adata)\n\n orig_varm_val = adata.varm[\"o\"].copy()\n subset_varm = adata[:, :50]\n assert subset_varm.is_view\n subset_varm.varm[\"o\"] = np.ones((50, 20))\n assert not subset_varm.is_view\n assert np.all(adata.varm[\"o\"] == orig_varm_val)\n\n assert init_hash == joblib.hash(adata)\n\n\ndef test_set_obs(adata, subset_func):\n init_hash = joblib.hash(adata)\n\n subset = adata[subset_func(adata.obs_names), :]\n\n new_obs = pd.DataFrame(\n dict(a=np.ones(subset.n_obs), b=np.ones(subset.n_obs)),\n index=subset.obs_names,\n )\n\n assert subset.is_view\n subset.obs = new_obs\n assert not subset.is_view\n assert np.all(subset.obs == new_obs)\n\n assert joblib.hash(adata) == init_hash\n\n\ndef test_set_var(adata, subset_func):\n init_hash = joblib.hash(adata)\n\n subset = adata[:, subset_func(adata.var_names)]\n\n new_var = pd.DataFrame(\n dict(a=np.ones(subset.n_vars), b=np.ones(subset.n_vars)),\n index=subset.var_names,\n )\n\n assert subset.is_view\n subset.var = new_var\n assert not subset.is_view\n assert np.all(subset.var == new_var)\n\n assert joblib.hash(adata) == init_hash\n\n\ndef test_drop_obs_column():\n adata = ad.AnnData(np.array(X_list), obs=obs_dict, dtype=\"int32\")\n\n subset = adata[:2]\n assert subset.is_view\n # returns a copy of obs\n assert subset.obs.drop(columns=[\"oanno1\"]).columns.tolist() == [\"oanno2\", \"oanno3\"]\n assert subset.is_view\n # would modify obs, so it should actualize subset and not modify adata\n subset.obs.drop(columns=[\"oanno1\"], inplace=True)\n assert not subset.is_view\n assert subset.obs.columns.tolist() == [\"oanno2\", \"oanno3\"]\n\n assert adata.obs.columns.tolist() == [\"oanno1\", \"oanno2\", \"oanno3\"]\n\n\ndef test_set_obsm(adata):\n init_hash = joblib.hash(adata)\n\n dim0_size = np.random.randint(2, adata.shape[0] - 1)\n dim1_size = np.random.randint(1, 99)\n orig_obsm_val = adata.obsm[\"o\"].copy()\n subset_idx = np.random.choice(adata.obs_names, dim0_size, replace=False)\n\n subset = adata[subset_idx, :]\n assert subset.is_view\n subset.obsm = dict(o=np.ones((dim0_size, dim1_size)))\n assert not subset.is_view\n assert np.all(orig_obsm_val == adata.obsm[\"o\"]) # Checking for mutation\n assert np.all(subset.obsm[\"o\"] == np.ones((dim0_size, dim1_size)))\n\n subset = adata[subset_idx, :]\n subset_hash = joblib.hash(subset)\n with pytest.raises(ValueError):\n subset.obsm = dict(o=np.ones((dim0_size + 1, dim1_size)))\n with pytest.raises(ValueError):\n subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))\n assert subset_hash == joblib.hash(subset)\n\n # Only modification have been made to a view\n assert init_hash == joblib.hash(adata)\n\n\ndef test_set_varm(adata):\n init_hash = joblib.hash(adata)\n\n dim0_size = np.random.randint(2, adata.shape[1] - 1)\n dim1_size = np.random.randint(1, 99)\n orig_varm_val = adata.varm[\"o\"].copy()\n subset_idx = np.random.choice(adata.var_names, dim0_size, replace=False)\n\n subset = adata[:, subset_idx]\n assert subset.is_view\n subset.varm = dict(o=np.ones((dim0_size, dim1_size)))\n assert not subset.is_view\n assert np.all(orig_varm_val == adata.varm[\"o\"]) # Checking for mutation\n assert np.all(subset.varm[\"o\"] == np.ones((dim0_size, dim1_size)))\n\n subset = adata[:, subset_idx]\n subset_hash = joblib.hash(subset)\n with pytest.raises(ValueError):\n subset.varm = dict(o=np.ones((dim0_size + 1, dim1_size)))\n with pytest.raises(ValueError):\n subset.varm = dict(o=np.ones((dim0_size - 1, dim1_size)))\n # subset should not be changed by failed setting\n assert subset_hash == joblib.hash(subset)\n assert init_hash == joblib.hash(adata)\n\n\n# TODO: Determine if this is the intended behavior,\n# or just the behaviour we’ve had for a while\ndef test_not_set_subset_X(matrix_type, subset_func):\n adata = ad.AnnData(matrix_type(asarray(sparse.random(20, 20))))\n init_hash = joblib.hash(adata)\n orig_X_val = adata.X.copy()\n while True:\n subset_idx = slice_subset(adata.obs_names)\n if len(adata[subset_idx, :]) > 2:\n break\n subset = adata[subset_idx, :]\n\n subset = adata[:, subset_idx]\n\n internal_idx = _normalize_index(\n subset_func(np.arange(subset.X.shape[1])), subset.var_names\n )\n assert subset.is_view\n subset.X[:, internal_idx] = 1\n assert not subset.is_view\n assert not np.any(asarray(adata.X != orig_X_val))\n\n assert init_hash == joblib.hash(adata)\n\n\ndef test_set_scalar_subset_X(matrix_type, subset_func):\n adata = ad.AnnData(matrix_type(np.zeros((10, 10))))\n orig_X_val = adata.X.copy()\n subset_idx = slice_subset(adata.obs_names)\n\n adata_subset = adata[subset_idx, :]\n\n adata_subset.X = 1\n\n assert adata_subset.is_view\n assert np.all(asarray(adata[subset_idx, :].X) == 1)\n\n assert asarray((orig_X_val != adata.X)).sum() == mul(*adata_subset.shape)\n\n\n# TODO: Use different kind of subsetting for adata and view\ndef test_set_subset_obsm(adata, subset_func):\n init_hash = joblib.hash(adata)\n orig_obsm_val = adata.obsm[\"o\"].copy()\n\n while True:\n subset_idx = slice_subset(adata.obs_names)\n if len(adata[subset_idx, :]) > 2:\n break\n subset = adata[subset_idx, :]\n\n internal_idx = _normalize_index(\n subset_func(np.arange(subset.obsm[\"o\"].shape[0])), subset.obs_names\n )\n\n assert subset.is_view\n subset.obsm[\"o\"][internal_idx] = 1\n assert not subset.is_view\n assert np.all(adata.obsm[\"o\"] == orig_obsm_val)\n\n assert init_hash == joblib.hash(adata)\n\n\ndef test_set_subset_varm(adata, subset_func):\n init_hash = joblib.hash(adata)\n orig_varm_val = adata.varm[\"o\"].copy()\n\n while True:\n subset_idx = slice_subset(adata.var_names)\n if (adata[:, subset_idx]).shape[1] > 2:\n break\n subset = adata[:, subset_idx]\n\n internal_idx = _normalize_index(\n subset_func(np.arange(subset.varm[\"o\"].shape[0])), subset.var_names\n )\n\n assert subset.is_view\n subset.varm[\"o\"][internal_idx] = 1\n assert not subset.is_view\n assert np.all(adata.varm[\"o\"] == orig_varm_val)\n\n assert init_hash == joblib.hash(adata)\n\n\[email protected](\"attr\", [\"obsm\", \"varm\", \"obsp\", \"varp\", \"layers\"])\ndef test_view_failed_delitem(attr):\n adata = gen_adata((10, 10))\n view = adata[5:7, :][:, :5]\n adata_hash = joblib.hash(adata)\n view_hash = joblib.hash(view)\n\n with pytest.raises(KeyError):\n getattr(view, attr).__delitem__(\"not a key\")\n\n assert view.is_view\n assert adata_hash == joblib.hash(adata)\n assert view_hash == joblib.hash(view)\n\n\[email protected](\"attr\", [\"obsm\", \"varm\", \"obsp\", \"varp\", \"layers\"])\ndef test_view_delitem(attr):\n adata = gen_adata((10, 10))\n getattr(adata, attr)[\"to_delete\"] = np.ones((10, 10))\n # Shouldn’t be a subclass, should be an ndarray\n assert type(getattr(adata, attr)[\"to_delete\"]) is np.ndarray\n view = adata[5:7, :][:, :5]\n adata_hash = joblib.hash(adata)\n view_hash = joblib.hash(view)\n\n getattr(view, attr).__delitem__(\"to_delete\")\n\n assert not view.is_view\n assert \"to_delete\" not in getattr(view, attr)\n assert \"to_delete\" in getattr(adata, attr)\n assert adata_hash == joblib.hash(adata)\n assert view_hash != joblib.hash(view)\n\n\[email protected](\n \"attr\", [\"X\", \"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"layers\", \"uns\"]\n)\ndef test_view_delattr(attr, subset_func):\n base = gen_adata((10, 10))\n orig_hash = joblib.hash(base)\n subset = base[subset_func(base.obs_names), subset_func(base.var_names)]\n empty = ad.AnnData(obs=subset.obs[[]], var=subset.var[[]])\n\n delattr(subset, attr)\n\n assert not subset.is_view\n # Should now have same value as default\n assert_equal(getattr(subset, attr), getattr(empty, attr))\n assert orig_hash == joblib.hash(base) # Original should not be modified\n\n\[email protected](\n \"attr\", [\"obs\", \"var\", \"obsm\", \"varm\", \"obsp\", \"varp\", \"layers\", \"uns\"]\n)\ndef test_view_setattr_machinery(attr, subset_func, subset_func2):\n # Tests that setting attributes on a view doesn't mess anything up too bad\n adata = gen_adata((10, 10))\n view = adata[subset_func(adata.obs_names), subset_func2(adata.var_names)]\n\n actual = view.copy()\n setattr(view, attr, getattr(actual, attr))\n assert_equal(actual, view, exact=True)\n\n\ndef test_layers_view():\n X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n L = np.array([[10, 11, 12], [13, 14, 15], [16, 17, 18]])\n real_adata = ad.AnnData(X)\n real_adata.layers[\"L\"] = L\n view_adata = real_adata[1:, 1:]\n real_hash = joblib.hash(real_adata)\n view_hash = joblib.hash(view_adata)\n\n assert view_adata.is_view\n\n with pytest.raises(ValueError):\n view_adata.layers[\"L2\"] = L + 2\n\n assert view_adata.is_view # Failing to set layer item makes adata not view\n assert real_hash == joblib.hash(real_adata)\n assert view_hash == joblib.hash(view_adata)\n\n view_adata.layers[\"L2\"] = L[1:, 1:] + 2\n\n assert not view_adata.is_view\n assert real_hash == joblib.hash(real_adata)\n assert view_hash != joblib.hash(view_adata)\n\n\n# TODO: This can be flaky. Make that stop\ndef test_view_of_view(matrix_type, subset_func, subset_func2):\n adata = gen_adata((30, 15), X_type=matrix_type)\n adata.raw = adata\n if subset_func is single_subset:\n pytest.xfail(\"Other subset generating functions have trouble with this\")\n var_s1 = subset_func(adata.var_names, min_size=4)\n var_view1 = adata[:, var_s1]\n var_s2 = subset_func2(var_view1.var_names)\n var_view2 = var_view1[:, var_s2]\n assert var_view2._adata_ref is adata\n obs_s1 = subset_func(adata.obs_names, min_size=4)\n obs_view1 = adata[obs_s1, :]\n obs_s2 = subset_func2(obs_view1.obs_names)\n assert adata[obs_s1, :][:, var_s1][obs_s2, :]._adata_ref is adata\n\n view_of_actual_copy = adata[:, var_s1].copy()[obs_s1, :].copy()[:, var_s2].copy()\n\n view_of_view_copy = adata[:, var_s1][obs_s1, :][:, var_s2].copy()\n\n assert_equal(view_of_actual_copy, view_of_view_copy, exact=True)\n\n\ndef test_view_of_view_modification():\n adata = ad.AnnData(np.zeros((10, 10)))\n adata[0, :][:, 5:].X = np.ones(5)\n assert np.all(adata.X[0, 5:] == np.ones(5))\n adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2))\n assert np.all(adata.X[1:3, 1:3] == np.ones((2, 2)))\n\n adata.X = sparse.csr_matrix(adata.X)\n adata[0, :][:, 5:].X = np.ones(5) * 2\n assert np.all(asarray(adata.X)[0, 5:] == np.ones(5) * 2)\n adata[[1, 2], :][:, [1, 2]].X = np.ones((2, 2)) * 2\n assert np.all(asarray(adata.X)[1:3, 1:3] == np.ones((2, 2)) * 2)\n\n\ndef test_double_index(subset_func, subset_func2):\n adata = gen_adata((10, 10))\n obs_subset = subset_func(adata.obs_names)\n var_subset = subset_func2(adata.var_names)\n v1 = adata[obs_subset, var_subset]\n v2 = adata[obs_subset, :][:, var_subset]\n\n assert np.all(asarray(v1.X) == asarray(v2.X))\n assert np.all(v1.obs == v2.obs)\n assert np.all(v1.var == v2.var)\n\n\ndef test_view_retains_ndarray_subclass():\n adata = ad.AnnData(np.zeros((10, 10)))\n adata.obsm[\"foo\"] = np.zeros((10, 5)).view(NDArraySubclass)\n\n view = adata[:5, :]\n\n assert isinstance(view.obsm[\"foo\"], NDArraySubclass)\n assert view.obsm[\"foo\"].shape == (5, 5)\n\n\ndef test_modify_uns_in_copy():\n # https://github.com/theislab/anndata/issues/571\n adata = ad.AnnData(np.ones((5, 5)), uns={\"parent\": {\"key\": \"value\"}})\n adata_copy = adata[:3].copy()\n adata_copy.uns[\"parent\"][\"key\"] = \"new_value\"\n assert adata.uns[\"parent\"][\"key\"] != adata_copy.uns[\"parent\"][\"key\"]\n\n\[email protected](\"index\", [-101, 100, (slice(None), -101), (slice(None), 100)])\ndef test_invalid_scalar_index(adata, index):\n # https://github.com/theislab/anndata/issues/619\n with pytest.raises(IndexError, match=r\".*index.* out of range\\.\"):\n _ = adata[index]\n\n\[email protected](\"obs\", [False, True])\[email protected](\"index\", [-100, -50, -1])\ndef test_negative_scalar_index(adata, index: int, obs: bool):\n pos_index = index + (adata.n_obs if obs else adata.n_vars)\n\n if obs:\n adata_pos_subset = adata[pos_index]\n adata_neg_subset = adata[index]\n else:\n adata_pos_subset = adata[:, pos_index]\n adata_neg_subset = adata[:, index]\n\n np.testing.assert_array_equal(\n adata_pos_subset.obs_names, adata_neg_subset.obs_names\n )\n np.testing.assert_array_equal(\n adata_pos_subset.var_names, adata_neg_subset.var_names\n )\n" ]
[ [ "numpy.random.choice", "numpy.reshape", "numpy.arange", "scipy.sparse.csr_matrix", "numpy.ones", "numpy.all", "numpy.testing.assert_array_equal", "scipy.sparse.random", "numpy.array", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
ternlef11/tcr-pmhc
[ "dc033ce749b38d6effa0a583440ae4889362745a" ]
[ "data/see_data.py" ]
[ "import pandas as pd\n\nthese_features = pd.read_csv(\"example.csv\")\n\nprint(these_features.head(20))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Munna-Manoj/Team7_TTS
[ "5e2d473a2afe429023876bcc51c2ac966a4938b8" ]
[ "synthesize.py" ]
[ "import torch as t\nfrom utils import spectrogram2wav\nfrom scipy.io.wavfile import write\nimport hyperparams as hp\nfrom text import text_to_sequence\nimport numpy as np\nfrom model.network import ModelPostNet, Model\nfrom collections import OrderedDict\nfrom tqdm import tqdm\nimport argparse\n\ndef load_checkpoint(step, model_name=\"transformer\"):\n state_dict = t.load('./checkpoint/checkpoint_%s_%d.pth.tar'% (model_name, step)) \n new_state_dict = OrderedDict()\n for k, value in state_dict['model'].items():\n key = k[7:]\n new_state_dict[key] = value\n\n return new_state_dict\n\ndef synthesis(text, args):\n m = Model()\n m_post = ModelPostNet()\n\n m.load_state_dict(load_checkpoint(args.restore_step1, \"transformer\"))\n m_post.load_state_dict(load_checkpoint(args.restore_step2, \"postnet\"))\n\n text = np.asarray(text_to_sequence(text, [hp.cleaners]))\n text = t.LongTensor(text).unsqueeze(0)\n text = text.cuda()\n mel_input = t.zeros([1,1, 80]).cuda()\n pos_text = t.arange(1, text.size(1)+1).unsqueeze(0)\n pos_text = pos_text.cuda()\n\n m=m.cuda()\n m_post = m_post.cuda()\n m.train(False)\n m_post.train(False)\n \n pbar = tqdm(range(args.max_len))\n with t.no_grad():\n for i in pbar:\n pos_mel = t.arange(1,mel_input.size(1)+1).unsqueeze(0).cuda()\n mel_pred, postnet_pred, attn, stop_token, _, attn_dec = m.forward(text, mel_input, pos_text, pos_mel)\n mel_input = t.cat([mel_input, mel_pred[:,-1:,:]], dim=1)\n\n mag_pred = m_post.forward(postnet_pred)\n \n wav = spectrogram2wav(mag_pred.squeeze(0).cpu().numpy())\n write(hp.sample_path + \"/test.wav\", hp.sr, wav)\n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--restore_step1', type=int, help='Global step to restore checkpoint', default=172000)\n parser.add_argument('--restore_step2', type=int, help='Global step to restore checkpoint', default=100000)\n parser.add_argument('--max_len', type=int, help='Global step to restore checkpoint', default=400)\n\n args = parser.parse_args()\n synthesis(\"Transformer model is so fast!\",args)\n" ]
[ [ "torch.LongTensor", "scipy.io.wavfile.write", "torch.zeros", "torch.load", "torch.cat", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
IanAWatson/smu
[ "0cb07853f018b9e36cea85597b52bffde205e8d4" ]
[ "parser/smu_utils_lib.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\n\"\"\"This class provides shared utilities for parsing and writing SMU7 files.\"\"\"\n\nimport collections\nimport csv\nimport enum\nimport numpy as np\nimport pandas as pd\nfrom rdkit import Chem\nfrom rdkit import Geometry\n\nfrom tensorflow.io import gfile\n\nfrom smu import dataset_pb2\n\n# The stage1 files do not label their error fields explicitly. This is the list\n# of fields in order.\nSTAGE1_ERROR_FIELDS = [\n 'error_nstat1', 'error_nstatc', 'error_nstatt', 'error_frequencies'\n]\n\nATOM_CHARS = 'cnofh'\n\nATOM_TYPE_TO_MAX_BONDS = {\n dataset_pb2.BondTopology.AtomType.ATOM_C: 4,\n dataset_pb2.BondTopology.AtomType.ATOM_N: 3,\n dataset_pb2.BondTopology.AtomType.ATOM_NPOS: 4,\n dataset_pb2.BondTopology.AtomType.ATOM_O: 2,\n dataset_pb2.BondTopology.AtomType.ATOM_ONEG: 1,\n dataset_pb2.BondTopology.AtomType.ATOM_F: 1,\n dataset_pb2.BondTopology.AtomType.ATOM_H: 1\n}\n\n# The value is a pair of an atomic symbol and formal charge\nATOM_TYPE_TO_RDKIT = {\n dataset_pb2.BondTopology.AtomType.ATOM_C: ('C', 0),\n dataset_pb2.BondTopology.AtomType.ATOM_N: ('N', 0),\n dataset_pb2.BondTopology.AtomType.ATOM_NPOS: ('N', 1),\n dataset_pb2.BondTopology.AtomType.ATOM_O: ('O', 0),\n dataset_pb2.BondTopology.AtomType.ATOM_ONEG: ('O', -1),\n dataset_pb2.BondTopology.AtomType.ATOM_F: ('F', 0),\n dataset_pb2.BondTopology.AtomType.ATOM_H: ('H', 0),\n}\n\nATOM_TYPE_TO_CHAR = {\n dataset_pb2.BondTopology.AtomType.ATOM_C: 'c',\n dataset_pb2.BondTopology.AtomType.ATOM_N: 'n',\n dataset_pb2.BondTopology.AtomType.ATOM_NPOS: 'n',\n dataset_pb2.BondTopology.AtomType.ATOM_O: 'o',\n dataset_pb2.BondTopology.AtomType.ATOM_ONEG: 'o',\n dataset_pb2.BondTopology.AtomType.ATOM_F: 'f',\n dataset_pb2.BondTopology.AtomType.ATOM_H: 'h'\n}\nATOM_TYPE_TO_ATOMIC_NUMBER = {\n dataset_pb2.BondTopology.AtomType.ATOM_C: 6,\n dataset_pb2.BondTopology.AtomType.ATOM_N: 7,\n dataset_pb2.BondTopology.AtomType.ATOM_NPOS: 7,\n dataset_pb2.BondTopology.AtomType.ATOM_O: 8,\n dataset_pb2.BondTopology.AtomType.ATOM_ONEG: 8,\n dataset_pb2.BondTopology.AtomType.ATOM_F: 9,\n dataset_pb2.BondTopology.AtomType.ATOM_H: 1\n}\n\nBOND_TYPE_TO_RDKIT = {\n dataset_pb2.BondTopology.BondType.BOND_SINGLE: Chem.rdchem.BondType.SINGLE,\n dataset_pb2.BondTopology.BondType.BOND_DOUBLE: Chem.rdchem.BondType.DOUBLE,\n dataset_pb2.BondTopology.BondType.BOND_TRIPLE: Chem.rdchem.BondType.TRIPLE,\n}\n\nRDKIT_BOND_TO_BOND_TYPE = {\n Chem.rdchem.BondType.SINGLE: dataset_pb2.BondTopology.BondType.BOND_SINGLE,\n Chem.rdchem.BondType.DOUBLE: dataset_pb2.BondTopology.BondType.BOND_DOUBLE,\n Chem.rdchem.BondType.TRIPLE: dataset_pb2.BondTopology.BondType.BOND_TRIPLE,\n}\n\nINTEGER_TO_BOND_TYPE = [\n dataset_pb2.BondTopology.BondType.BOND_UNDEFINED,\n dataset_pb2.BondTopology.BondType.BOND_SINGLE,\n dataset_pb2.BondTopology.BondType.BOND_DOUBLE,\n dataset_pb2.BondTopology.BondType.BOND_TRIPLE,\n]\n\nERROR_CODES = collections.OrderedDict([\n # TODO(pfr): give the ones with just error codes better names\n ('nstat1', 'error_nstat1'),\n ('nstatc', 'error_nstatc'),\n ('nstatv', 'error_frequencies'),\n ('nstatt', 'error_nstatt'),\n ('nsvato', 'error_atomic_analysis'),\n ('nsvnsb', 'error_nmr_analysis_b3lyp_small_basis'),\n ('nsvnlb', 'error_nmr_analysis_b3lyp_large_basis'),\n ('nsvele', 'error_charge_analysis'),\n ('nsvego', 'error_nsvego'),\n ('nsveh3', 'error_energies_orbitals_pvtz'),\n ('nsveh4', 'error_energies_orbitals_pvqz'),\n ('nsvec3', 'error_energies_orbitals_pcvtz'),\n ('nsvexc', 'error_excitation_energies'),\n ('nsveca', 'error_single_point_energies'),\n ('nsvmr1', 'error_inconsistent_molecule_energy_turbomole_mrcc'),\n ('nsvmr2', 'error_inconsistent_cation_energy_turbomole_mrcc'),\n ('nsvvib', 'error_normal_modes'),\n ('nsvor1', 'error_inconsistent_molecule_energy_turbomole_orca'),\n ('nsvor2', 'error_inconsistent_cation_energy_turbomole_orca'),\n ('nsvrot', 'error_rotational_modes'),\n ('nsvnsp', 'error_nmr_analysis_pbe0_small_basis'),\n ('nsvnlp', 'error_nmr_analysis_pbe0_large_basis'),\n ('nsvg09', 'error_nsvg09'),\n ('nsvho1', 'error_nsvho1'),\n ('nsvho2', 'error_nsvho2'),\n ('nsvho3', 'error_nsvho3'),\n ('nsvneg', 'error_nsvneg'),\n])\n\n# SMU1 was not included in our original bond topology enumeration. So we stick\n# these bond topologies at the very end of the list. We also have to special\n# case them in the parsing / writing. The tuples are:\n# id provided in dat files, bond topology id we use, atom, valence\nSPECIAL_ID_CASES = [\n (999999, 899649, 'F', 1),\n (999998, 899650, 'O', 2),\n (999997, 899651, 'N', 3),\n (999996, 899652, 'C', 4),\n]\n\n# Conversion constant from Bohr to Angstroms\nBOHR_TO_ANGSTROMS = 0.529177249\n\n\ndef special_case_bt_id_from_dat_id(dat_id, smiles):\n \"\"\"Determines if dat_id is a special case.\n\n Special case handling for SMU1. We see special cases in two ways.\n * If dat_id is 0 (happens in stage1 files), we use the smiles string to\n determine the bt_id\n * If dat_id is listed in SPECIAL_ID_CASES (happens in stage2 files), we use\n mapping from there\n\n Args:\n dat_id: integer id from the ID line of the .dat file\n smiles: smiles string for this case\n\n Returns:\n None if this is not a special case, bond topology id otherwise\n \"\"\"\n if dat_id == 0:\n # Note that the smiles string for these special SMU1 cases is just the atom\n matched_ids = [vals[1] for vals in SPECIAL_ID_CASES if vals[2] == smiles]\n if matched_ids:\n return matched_ids[0]\n else:\n raise ValueError(f'ID from .dat is 0, but {smiles} is not a special case')\n else:\n matched_ids = [vals[1] for vals in SPECIAL_ID_CASES if vals[0] == dat_id]\n if matched_ids:\n return matched_ids[0]\n return None\n\n\ndef special_case_dat_id_from_bt_id(bt_id):\n \"\"\"Determines if bt_id is a special case.\n\n Special case handling for SMU1.\n\n Args:\n bt_id: integer bond topology id\n\n Returns:\n None if this is not a special case, id to use for .dat file otherwise\n \"\"\"\n matched_ids = [vals[0] for vals in SPECIAL_ID_CASES if vals[1] == bt_id]\n if matched_ids:\n return matched_ids[0]\n return None\n\n\ndef bohr_to_angstroms(length):\n \"\"\"Convert bohr units to angstroms.\n\n Args:\n length: float\n\n Returns:\n float\n \"\"\"\n return length * BOHR_TO_ANGSTROMS\n\n\ndef get_composition(topology):\n \"\"\"Returns the composition/stoichiometry of the molecule.\n\n The composition is returned as a 'x{num heavy atoms}_' followed by a single\n character per atom type followed by the atom count for the respective type.\n Atoms appear in order 'cnohf'. Types\n with 0-count are omitted and 1 counts are omitted.\n Example: x07_c4o2fh7\n\n Args:\n topology: A BondTopology protocol buffer message.\n\n Returns:\n composition(string)\n \"\"\"\n counts = {char: 0 for char in ATOM_CHARS}\n heavy_atom_count = 0\n for atom in topology.atoms:\n counts[ATOM_TYPE_TO_CHAR[atom]] += 1\n if atom != dataset_pb2.BondTopology.AtomType.ATOM_H:\n heavy_atom_count += 1\n components = []\n for c in ATOM_CHARS:\n if counts[c] == 0:\n continue\n elif counts[c] == 1:\n count_str = ''\n else:\n count_str = str(counts[c])\n components.append(c + count_str)\n return 'x{:02d}_{}'.format(heavy_atom_count, ''.join(components))\n\n\n_STOICHIOMETRY_WITH_HYDROGENS_COMPONENTS = ['c', 'ch', 'ch2', 'ch3', 'ch4',\n 'n', 'nh', 'nh2', 'nh3',\n 'o', 'oh', 'oh2',\n 'f', 'fh']\n\n\ndef get_canonical_stoichiometry_with_hydrogens(topology):\n \"\"\"Get stoichiometry where hydrogen count is part of the atom type.\n\n Each heavy atom is typed by the number of hydrogens it's connected to, e.g.\n * c: carbon with no hydrogens\n * ch: carbon with one hydrogen\n * ch2: carbon with two hydrogens\n\n Each atom type is then included in the output with it's count of how often it\n occurs (just like a normal stoichiometry).\n\n Atom types are in order 'cnof' then by number of hydrogens\n\n For example\n * benzene: (ch)6\n * water: (oh2) (note that the 1 is implicit)\n * ethylene: (ch2)2\n * acrylic acid: (c)(ch)(ch2)(o)(oh)\n\n Args:\n topology: A BondTopology protocol buffer message.\n\n Returns:\n string\n \"\"\"\n hydrogen_counts = compute_bonded_hydrogens(topology,\n compute_adjacency_matrix(topology))\n components = collections.defaultdict(int)\n for atom_idx, h_count in enumerate(hydrogen_counts):\n this_component = ATOM_TYPE_TO_CHAR[topology.atoms[atom_idx]]\n if h_count >= 1:\n this_component += 'h'\n if h_count > 1:\n this_component += str(h_count)\n components[this_component] += 1\n\n out = ''\n for got_component in _STOICHIOMETRY_WITH_HYDROGENS_COMPONENTS:\n if got_component not in components:\n continue\n out += f'({got_component})'\n if components[got_component] > 1:\n out += str(components[got_component])\n\n return out\n\n\ndef compute_adjacency_matrix(topology):\n \"\"\"Helper function to determine the adjacency matrix between heavy atoms.\n\n Only the upper diagonal of the matrix is filled, all other entries are 0.\n All values are non-negative, with positive values giving the bond order.\n\n Args:\n topology: A BondTopology protocol buffer message.\n\n Returns:\n An NxN matrix, where N equals the number of heavy atoms in a molecule.\n \"\"\"\n side_length = len([\n atom for atom in topology.atoms\n if atom != dataset_pb2.BondTopology.AtomType.ATOM_H\n ])\n adjacency_matrix = [[0] * side_length for _ in range(side_length)]\n for bond in topology.bonds:\n if topology.atoms[bond.atom_b] != dataset_pb2.BondTopology.AtomType.ATOM_H:\n if bond.bond_type == dataset_pb2.BondTopology.BondType.BOND_SINGLE:\n adjacency_matrix[bond.atom_a][bond.atom_b] = 1\n elif bond.bond_type == dataset_pb2.BondTopology.BondType.BOND_DOUBLE:\n adjacency_matrix[bond.atom_a][bond.atom_b] = 2\n elif bond.bond_type == dataset_pb2.BondTopology.BondType.BOND_TRIPLE:\n adjacency_matrix[bond.atom_a][bond.atom_b] = 3\n return adjacency_matrix\n\n\ndef compute_bonded_hydrogens(topology, adjacency_matrix):\n \"\"\"Helper function to compute number of bonded hydrogens per heavy atom.\n\n Args:\n topology: A BondTopology protocol buffer message.\n adjacency_matrix: Matrix for all heavy atoms giving covalent bond orders.\n\n Returns:\n A list of integers (one per heavy atom) with bonded hydrogen counts.\n \"\"\"\n side_length = len(adjacency_matrix)\n # Initialize with maximum number of hydrogens.\n # Only the first len(adjacency_matrix) atoms in the ordered topology are heavy\n # atoms capable of # binding protons.\n num_bonded_hydrogens = [\n ATOM_TYPE_TO_MAX_BONDS[atom] for atom in topology.atoms[:side_length]\n ]\n # Subtract paired bonds (to other heavy atoms).\n for i in range(side_length):\n for j in range(i + 1, side_length):\n num_bonded_hydrogens[i] -= adjacency_matrix[i][j]\n num_bonded_hydrogens[j] -= adjacency_matrix[i][j]\n return num_bonded_hydrogens\n\n\ndef labeled_smiles(mol):\n \"\"\"Return the smiles for `mol` with atom numbers labeled.\n\n For each atom in `mol` set the atom map number to the\n atom number.\n CCC -> C[C:1][C:2]\n because atom map 0 is never displayed.\n\n Args:\n mol: a molecule.\n\n Returns:\n A labelled smiles string.\n \"\"\"\n natoms = mol.GetNumAtoms()\n for i in range(1, natoms):\n mol.GetAtomWithIdx(i).SetAtomMapNum(i)\n\n to_be_returned: str = Chem.MolToSmiles(\n mol, kekuleSmiles=True, isomericSmiles=False)\n\n # Revert what we changed before returning.\n for i in range(1, natoms):\n mol.GetAtomWithIdx(i).SetAtomMapNum(0)\n\n return to_be_returned\n\n\ndef create_bond_topology(atoms, connectivity_matrix_string, hydrogens_string):\n \"\"\"Creates a BondTopology from a compact string representation.\n\n Any hydrogens in the atoms string will be ignored. The appropriate number\n will be added based on what is in the hydrogens string.\n\n Args:\n atoms: a string like 'CCCCOON' (case insensitive) for the heavy atoms\n connectivity_matrix_string: a string for the uppertriangular connectivity\n matrix with bond orders, like '010210'\n hydrogens_string: a string for the number of hydrogens conencted to each\n heavy atom\n\n Returns:\n BondTopology\n \"\"\"\n bond_topology = dataset_pb2.BondTopology()\n\n # Add the heavy atoms\n for atom_type in atoms.lower():\n if atom_type == 'c':\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_C)\n elif atom_type == 'n':\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_N)\n elif atom_type == 'o':\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_O)\n elif atom_type == 'f':\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_F)\n elif atom_type == 'h':\n pass\n else:\n raise ValueError('Unknown atom type: {}'.format(atom_type))\n\n num_heavy_atoms = len(bond_topology.atoms)\n\n # Now add the bonds between the heavy atoms\n if num_heavy_atoms > 1:\n for (i, j), bond_order in zip(\n np.nditer(np.triu_indices(num_heavy_atoms, k=1)),\n connectivity_matrix_string):\n if bond_order == '0':\n continue\n bond = bond_topology.bonds.add()\n bond.atom_a = int(i)\n bond.atom_b = int(j)\n if bond_order == '1':\n bond.bond_type = dataset_pb2.BondTopology.BondType.BOND_SINGLE\n elif bond_order == '2':\n bond.bond_type = dataset_pb2.BondTopology.BondType.BOND_DOUBLE\n elif bond_order == '3':\n bond.bond_type = dataset_pb2.BondTopology.BondType.BOND_TRIPLE\n else:\n raise ValueError('Bad bond order {}'.format(bond_order))\n\n # Now add the hydrogens, and adjust charged atoms if the total bond counts\n # indicate that.\n expected_hydrogens = compute_bonded_hydrogens(\n bond_topology, compute_adjacency_matrix(bond_topology))\n for atom_idx, (actual_h, expected_h) in enumerate(\n zip(hydrogens_string, expected_hydrogens)):\n actual_h = int(actual_h)\n diff = expected_h - actual_h\n atom_type = bond_topology.atoms[atom_idx]\n if diff == -1 and atom_type == dataset_pb2.BondTopology.AtomType.ATOM_N:\n bond_topology.atoms[\n atom_idx] = dataset_pb2.BondTopology.AtomType.ATOM_NPOS\n elif diff == 1 and atom_type == dataset_pb2.BondTopology.AtomType.ATOM_O:\n bond_topology.atoms[\n atom_idx] = dataset_pb2.BondTopology.AtomType.ATOM_ONEG\n elif diff:\n raise ValueError(\n f'Bad hydrogen count (actual={actual_h}, expected={expected_h} '\n 'for {atom_type}, index {atom_idx}')\n for _ in range(actual_h):\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_H)\n h_idx = len(bond_topology.atoms) - 1\n bond = bond_topology.bonds.add()\n bond.atom_a = atom_idx\n bond.atom_b = h_idx\n bond.bond_type = dataset_pb2.BondTopology.BondType.BOND_SINGLE\n\n return bond_topology\n\n\ndef parse_bond_topology_line(line):\n \"\"\"Parses the a line from the enumeration of bond topologies.\n\n These files are generated by a fortran program that uses fixed width\n formatting that varies by topology size.\n\n Args:\n line: string\n\n Returns:\n num atoms (int),\n atoms str (like 'N+O O O-')\n connectivity matrix str (e.g. '010110')\n hydrogen count str (e.g. '3000')\n \"\"\"\n line = line.rstrip()\n num_atoms = int(line[0:2])\n atoms_end = 4 + 2*num_atoms\n connectivity_end = atoms_end + 2 + num_atoms * (num_atoms - 1) // 2\n if len(line) != connectivity_end + 2 + num_atoms:\n raise ValueError('Wrong line length: \"{}\"'.format(line))\n return (num_atoms,\n line[4:atoms_end],\n line[atoms_end + 2:connectivity_end],\n line[connectivity_end + 2:connectivity_end + 2 + num_atoms])\n\n\ndef generate_bond_topologies_from_csv(filename):\n \"\"\"Generator for bond topologies stored in a csv.\n\n See merge_bond_topologies.py for the expected format.\n\n Args:\n filename: input csv\n\n Yields:\n BondTopology\n \"\"\"\n with gfile.GFile(filename, 'r') as infile:\n reader = csv.reader(iter(infile))\n next(reader) # skip the header line\n for row in reader:\n bt_id, _, atoms, connectivity, hydrogens, smiles = row\n # The atoms strings looks like 'C N N+O O-' where every atom has a space,\n # +, or - after it. create_bond_topology doesn't want the charge markings\n # (just a string like 'CNNOO') so the [::2] skips those.\n bond_topology = create_bond_topology(atoms[::2], connectivity, hydrogens)\n bond_topology.smiles = smiles\n bond_topology.bond_topology_id = int(bt_id)\n yield bond_topology\n\n\ndef parse_duplicates_file(filename):\n \"\"\"Parses duplciate file into a pandas dataframe.\n\n The duplciate file supplied by our collaborators (called\n list.equivalent_{isomers,conformers.dat) is a two column, space separated\n file of composite names like x07_n4o3h4.091404.073\n which we parse the names into columns\n * nameX: original composiite name from file\n * stoichX: string for the stoichiometry\n * btidX: bond topology id\n * shortconfidX: 3 digit conformer id\n * confidX: full conformer id that we use (btid * 1000 + shortconfid)\n (for X = 1 or 2)\n\n Args:\n filename: file to read (usually list.equivalent_isomers.dat)\n\n Returns:\n pd.DataFrame\n \"\"\"\n with gfile.GFile(filename) as f:\n df_dups = pd.read_csv(\n f, delim_whitespace=True, names=['name1', 'name2'], header=None)\n\n for idx in ['1', '2']:\n df_dups = pd.concat([\n df_dups,\n df_dups['name' +\n idx].str.extract(r'x07_([\\w\\d]+)\\.(\\d+).(\\d+)').rename(columns={\n 0: 'stoich' + idx,\n 1: 'btid' + idx,\n 2: 'shortconfid' + idx\n })\n ],\n axis=1)\n df_dups['btid' + idx] = df_dups['btid' + idx].astype(int)\n df_dups['shortconfid' + idx] = df_dups['shortconfid' + idx].astype(int)\n df_dups['confid' + idx] = (\n df_dups['btid' + idx] * 1000 + df_dups['shortconfid' + idx])\n\n return df_dups\n\n\ndef bond_topology_to_molecule(bond_topology):\n \"\"\"Converts a bond topology proto to an RDKit molecule.\n\n Args:\n bond_topology: dataset_pb2.BondTopology\n\n Returns:\n rdkit.Chem.rdchem.RWMol\n \"\"\"\n mol = Chem.rdchem.RWMol()\n for pb_atom_idx, pb_atom in enumerate(bond_topology.atoms):\n symbol, charge = ATOM_TYPE_TO_RDKIT[pb_atom]\n atom = Chem.Atom(symbol)\n atom.SetFormalCharge(charge)\n atom_idx = mol.AddAtom(atom)\n assert atom_idx == pb_atom_idx\n\n for pb_bond in bond_topology.bonds:\n mol.AddBond(pb_bond.atom_a, pb_bond.atom_b,\n BOND_TYPE_TO_RDKIT[pb_bond.bond_type])\n\n return mol\n\ndef AddAtom(atom: Chem.rdchem.Atom, bond_topology: dataset_pb2.BondTopology) -> None:\n \"\"\"Adds a new Atom to `bond_topology`.\n\n Args:\n atom: an RDKit Atom\n \"\"\"\n atomic_number = atom.GetAtomicNum()\n if atomic_number == 1:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_H)\n elif atomic_number == 6:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_C)\n elif atomic_number == 7:\n if atom.GetFormalCharge() == 0:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_N)\n else:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_NPOS)\n elif atomic_number == 8:\n if atom.GetFormalCharge() == 0:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_O)\n else:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_ONEG)\n elif atomic_number == 9:\n bond_topology.atoms.append(dataset_pb2.BondTopology.AtomType.ATOM_F)\n else:\n raise ValueException(f\"Unrecognized atomic number {atomic_number}\")\n\ndef AddBond(bond: Chem.rdchem.Bond, bond_topology: dataset_pb2.BondTopology) -> None:\n \"\"\"Adds a new Bond to `bond_topology`.\n\n Args:\n bond: an RDKit Bond\n \"\"\"\n smu_btype = RDKIT_BOND_TO_BOND_TYPE[bond.GetBondType()]\n bond_topology.bonds.append(dataset_pb2.BondTopology.Bond(\n atom_a=bond.GetBeginAtom().GetIdx(),\n atom_b=bond.GetEndAtom().GetIdx(),\n bond_type=smu_btype))\n\n\ndef molecule_to_bond_topology(mol: Chem.RWMol) -> dataset_pb2.BondTopology:\n \"\"\"Converts `mol` to a BondTopology.\n\n Args:\n mol: molecule\n Returns:\n BondTopology\n \"\"\"\n Chem.Kekulize(mol)\n result = dataset_pb2.BondTopology() # To be returned.\n\n for atom in mol.GetAtoms():\n AddAtom(atom, result) \n\n for bond in mol.GetBonds():\n AddBond(bond, result)\n\n result.smiles = compute_smiles_for_molecule(mol, include_hs=True)\n\n return result\n\n\ndef conformer_to_molecules(conformer,\n include_initial_geometries=True,\n include_optimized_geometry=True,\n include_all_bond_topologies=True):\n \"\"\"Converts a Conformer to RDKit molecules.\n\n Because a Conformer can include multiple bond topologies and geometries,\n multiple RDKit molecule objects can be produced\n\n The name of the molcule will be (all on one line)\n SMU <confid>\n bt=<bt_id>(<bt_idx>/<bt_count>)\n geom=[opt|init(<init_idx>/<init_count>)]\n where\n confid: conformer_id\n bt_id: bond_topology_id\n bt_idx: index in bond_topologies\n bt_count: size of bond_topologies\n init_idx: index in initial_geometries\n init_count: size of initial_geometries\n\n Args:\n conformer: dataset_pb2.Conformer\n include_initial_geometries: output molecule for each initial_geometries\n include_optimized_geometry: output molecule for optimized_geometry\n include_all_bond_topologies: if False, use only the first entry of\n bond_topologies. If True, output molecule for each bond_topologies.\n\n Yields:\n rdkit.Chem.rdchem.RWMol\n \"\"\"\n bt_count = len(conformer.bond_topologies)\n if include_all_bond_topologies:\n bts = conformer.bond_topologies\n else:\n bts = conformer.bond_topologies[0:1]\n requested_bond_topologies = [\n (bt, f'{bt.bond_topology_id}({i}/{bt_count})') for i, bt in enumerate(bts)\n ]\n\n # requested_geometries will be a list of tuples of\n # (goemetry, label)\n # where label is a string describing the geometry\n requested_geometries = []\n if include_initial_geometries:\n init_count = len(conformer.initial_geometries)\n requested_geometries.extend([\n (geom, f'init({i}/{init_count})')\n for i, geom in enumerate(conformer.initial_geometries)\n ])\n if include_optimized_geometry:\n requested_geometries.append((conformer.optimized_geometry, 'opt'))\n\n for bt, bt_label in requested_bond_topologies:\n for geom, geom_label in requested_geometries:\n\n mol = bond_topology_to_molecule(bt)\n mol.SetProp(\n '_Name',\n f'SMU {conformer.conformer_id} bt={bt_label} geom={geom_label}')\n\n # Add in the coordinates\n conf = Chem.Conformer(len(bt.atoms))\n conf.Set3D(True)\n for atom_idx, pos in enumerate(geom.atom_positions):\n conf.SetAtomPosition(\n atom_idx,\n Geometry.Point3D(\n bohr_to_angstroms(pos.x),\n bohr_to_angstroms(pos.y),\n bohr_to_angstroms(pos.z)))\n mol.AddConformer(conf)\n\n # TODO(pfr): put the computed properties as properties of the molecule.\n\n yield mol\n\n\ndef compute_smiles_for_bond_topology(bond_topology, include_hs, labeled_atoms=False):\n \"\"\"Calculate a canonical smiles for the given bond_topology.\n\n The bond topology may have the smiles field filled in but this method ignores\n that and calculates it directly from the atom and bond description.\n\n Args:\n bond_topology: dataset_pb2.BondTopology\n include_hs: whether to include hs in the smiles string\n labeled_atoms: whether or not to apply atom number labels.\n\n Returns:\n string\n \"\"\"\n return compute_smiles_for_molecule(\n bond_topology_to_molecule(bond_topology), include_hs, labeled_atoms=labeled_atoms)\n\n\ndef compute_smiles_for_molecule(mol, include_hs, labeled_atoms = False):\n \"\"\"Calculate a canonical smiles for the given RDKit Molecule.\n\n Note that you probably should NOT have sanitized your RDKit molecule. The\n sanitization procedure can move bonds around in funny ways where here we are\n ignoring aromaticity and keeping bond orders fixed. Because we have such\n funny molecules here, RDKit sometimes does things we don't expect during\n sanitization.\n\n This is *almost* what RDKit does by default, but because we don't want to\n deal with aromaticity at all, we can't sanitize. That produces one case\n where RDKit will produce different SMILES for what is the same molecule.\n We catch that one case here.\n\n Note that it is the caller's responsibility to make sure that any Hs intended\n are in the mol. They will NOT be added by this function even when include_hs\n is given.\n\n If labeled_atoms is True, a smiles where every atom has as its atom map number\n the atom number within the molecule.\n\n Args:\n mol: rdkit Mol\n include_hs: whether to include hs in the smiles string\n labeled_atoms: whether or not to apply atom number labels.\n\n Returns:\n string\n \"\"\"\n if not include_hs:\n mol = Chem.RemoveHs(mol, sanitize=False)\n if labeled_atoms:\n return labeled_smiles(mol)\n smiles = Chem.MolToSmiles(mol, kekuleSmiles=True, isomericSmiles=False)\n # Yep, this is weird. Depending on the order of the atoms presented to RDKit\n # you can get either of these two smiles back. We arbitrarily picked one of\n # them to return. Note that this one case has no hydrogens so it doesn't\n # matter whether include_hs is True\n if smiles == 'C12=C3C4=C1C4=C23':\n return 'C12=C3C1=C1C2=C31'\n return smiles\n\n\nclass SmilesCompareResult(enum.Enum):\n MISSING = 2\n MISMATCH = 3\n MATCH = 4\n\n def __str__(self):\n out = super(SmilesCompareResult, self).__str__()\n # remove the SmilesCompareResult. part\n return out[20:]\n\n\ndef bond_topology_smiles_comparison(bond_topology):\n \"\"\"Compares the given smiles string to one generated by RDKit.\n\n The atom/bond structure defines a molecule that we can then turn into a\n SMILES string. There is also a smiles string in the bond topology that was\n generated by large computational pipeline and may have some issues.\n\n There are three output states. The first applicable one is returned.\n * MISSING: bond_topology does not have a smiles field\n * MISMATCH: bond_topology.smiles does not match the one generated by RDKit\n * MATCH: bond_topology.smiles matches one from RDKit\n\n Args:\n bond_topology: dataset_pb2.BondTopology\n\n Returns:\n SmilesCompareResult, SMILES(with H), SMILES(without H)\n \"\"\"\n smiles_with_h = compute_smiles_for_bond_topology(\n bond_topology, include_hs=True)\n smiles_without_h = compute_smiles_for_bond_topology(\n bond_topology, include_hs=False)\n\n if not bond_topology.smiles:\n return SmilesCompareResult.MISSING, smiles_with_h, smiles_without_h\n\n if bond_topology.smiles == smiles_without_h:\n return SmilesCompareResult.MATCH, smiles_with_h, smiles_without_h\n else:\n return SmilesCompareResult.MISMATCH, smiles_with_h, smiles_without_h\n\n\nclass _ConformerSource(enum.Enum):\n DUPLICATE = 0\n STAGE1 = 1\n STAGE2 = 2\n\n\ndef _conformer_source(conf):\n \"\"\"Determines source of given conformer.\"\"\"\n if not conf.HasField('properties'):\n if conf.duplicated_by == 0 and not conf.duplicate_of:\n raise ValueError(\n 'Unknown conformer source, no properties or duplicates: ' +\n str(conf))\n return _ConformerSource.DUPLICATE\n # Kind of a dumb hack, but the easiest thing to look for to distinguish stage1\n # and stage 2 is that stage 1 only has timings for two computation steps.\n if len(conf.properties.calculation_statistics) == 2:\n return _ConformerSource.STAGE1\n return _ConformerSource.STAGE2\n\n\n# A list of fields that will be returned by merge_conformer on a conflict.\n# The fields for the STAGE1 conformer are first, then fields for the STAGE2\n# conformer.\nMERGE_CONFLICT_FIELDS = [\n 'conformer_id',\n 'error_nstat1_1',\n 'error_nstatc_1',\n 'error_nstatt_1',\n 'error_frequencies_1',\n 'initial_geometry_energy_1',\n 'initial_geometry_gradient_norm_1',\n 'optimized_geometry_energy_1',\n 'optimized_geometry_gradient_norm_1',\n 'has_initial_geometry_1',\n 'has_optimized_geometry_1',\n 'error_nstat1_2',\n 'error_nstatc_2',\n 'error_nstatt_2',\n 'error_frequencies_2',\n 'initial_geometry_energy_2',\n 'initial_geometry_gradient_norm_2',\n 'optimized_geometry_energy_2',\n 'optimized_geometry_gradient_norm_2',\n 'has_initial_geometry_2',\n 'has_optimized_geometry_2',\n]\n\n\ndef merge_conformer(conf1, conf2):\n \"\"\"Tries to merge information from two conformers.\n\n During the pipeline, we have partial information about conformers that we\n need to merge. This is the workhorse function for merging these.\n\n Only conformers with the same conformer_id should be merged.\n\n The key concept is to identify a source of each conformer:\n * STAGE2: From end of pipeline, with mostly complete info\n * STAGE1: From after geometry optimization. Except for duplicate information\n which may have been merged, mostly contains duplicate information to\n STAGE2. However, in some cases it's expected that stage2 will differ\n because of reruns in STAGE2.\n * DUPLICATE: An almost bare conformer with just duplicated_by and/or\n duplicate_of fields\n\n May modify one of the inputs.\n\n Note that this is not the most general merge that the format suggests. In\n particular, it's expected that there is at most 1 initial_geometries and\n 1 bond_topologies (and it's the same for all conformers). The final data won't\n be like this but it handles what's in the pipeline at this point we use this.\n\n While merging STAGE1 and STAGE2, conflicting values of some fields may be\n detected. If they are, then a list of all fields (from MERGE_CONFLICT_FIELDS)\n are returned in addition to doing the merge. If there is no conflict, None\n is returned as the second argument.\n\n ValueError is returned when a different error besides these expected\n differences is found.\n\n Args:\n conf1: dataset_pb2.Conformer\n conf2: dataset_pb2.Conformer\n\n Returns:\n dataset_pb2.Conformer, None or list of field values (see above)\n\n Raises:\n ValueError: if len(initial_geometries) != 1, len(bond_topologies) != 1,\n bond_topologies differ, or incompatible duplicated_by fields\n \"\"\"\n source1 = _conformer_source(conf1)\n source2 = _conformer_source(conf2)\n\n if source1 == source2:\n if source1 == _ConformerSource.STAGE1 or source1 == _ConformerSource.STAGE2:\n raise ValueError(\n 'Can not merge two conformers of source {}'.format(source1))\n conf1.MergeFrom(conf2)\n return conf1, None\n\n if source2.value < source1.value:\n conf1, conf2 = conf2, conf1\n source1, source2 = source2, source1\n\n if len(conf1.initial_geometries) > 1:\n raise ValueError('At most 1 initial_geometries allowed, got {}'.format(\n len(conf1.initial_geometries)))\n if len(conf2.initial_geometries) > 1:\n raise ValueError('At most 1 initial_geometries allowed, got {}'.format(\n len(conf2.initial_geometries)))\n\n if len(conf1.bond_topologies) > 1:\n raise ValueError('At most 1 bond_topologies allowed, got {}'.format(\n len(conf1.initial_geometries)))\n if len(conf2.bond_topologies) > 1:\n raise ValueError('At most 1 bond_topologies allowed, got {}'.format(\n len(conf2.initial_geometries)))\n\n if conf1.bond_topologies and conf2.bond_topologies:\n if conf1.bond_topologies[0] != conf2.bond_topologies[0]:\n raise ValueError(\n 'All bond topologies must be the same, got ids {} and {}'.format(\n conf1.bond_topologies[0].bond_topology_id,\n conf2.bond_topologies[0].bond_topology_id))\n\n # All the remaining cases are just moving duplicate information from\n # source1 to source2. The only non trivial version of this is merging\n # STAGE1 into STAGE2. In some cases, the value will differ and we want to\n # note that here.\n has_conflict = False\n if source1 == _ConformerSource.STAGE1 and source2 == _ConformerSource.STAGE2:\n if len(conf1.bond_topologies) != 1 or len(conf2.bond_topologies) != 1:\n has_conflict = True\n\n if len(conf1.initial_geometries) != len(conf2.initial_geometries):\n has_conflict = True\n if (conf1.HasField('optimized_geometry') !=\n conf2.HasField('optimized_geometry')):\n has_conflict = True\n\n for field in STAGE1_ERROR_FIELDS:\n if (getattr(conf1.properties.errors, field) !=\n getattr(conf2.properties.errors, field)):\n has_conflict = True\n\n for field, atol in [\n ('initial_geometry_energy', 2e-6),\n ('initial_geometry_gradient_norm', 1e-6),\n ('optimized_geometry_energy', 2e-6),\n ('optimized_geometry_gradient_norm', 1e-6),\n ]:\n val1 = getattr(conf1.properties, field).value\n val2 = getattr(conf2.properties, field).value\n # In some cases, stage2 files have -1 for these fields where stage1\n # doesn't. At some point, stricter error checking was done such that\n # nonsense values were not put into the .dat. So if stage2 has a -1, we\n # just leave it.\n if val2 != -1.0:\n if not np.isclose(val1, val2, atol=atol, rtol=0):\n has_conflict = True\n\n if (conf1.duplicated_by != 0 and conf2.duplicated_by != 0 and\n conf1.duplicated_by != conf2.duplicated_by):\n raise ValueError('Incompatible duplicated_by {} {}'.format(\n conf1.duplicated_by, conf2.duplicated_by))\n # max is just to get the non-zero one\n conf2.duplicated_by = max(conf1.duplicated_by, conf2.duplicated_by)\n conf2.duplicate_of.extend(conf1.duplicate_of)\n\n if not has_conflict:\n return conf2, None\n\n conflict_info = [conf1.conformer_id]\n for c in [conf1, conf2]:\n conflict_info.append(c.properties.errors.error_nstat1)\n conflict_info.append(c.properties.errors.error_nstatc)\n conflict_info.append(c.properties.errors.error_nstatt)\n conflict_info.append(c.properties.errors.error_frequencies)\n conflict_info.append(c.properties.initial_geometry_energy.value)\n conflict_info.append(c.properties.initial_geometry_gradient_norm.value)\n conflict_info.append(c.properties.optimized_geometry_energy.value)\n conflict_info.append(c.properties.optimized_geometry_gradient_norm.value)\n conflict_info.append(bool(c.initial_geometries))\n conflict_info.append(c.HasField('optimized_geometry'))\n\n return conf2, conflict_info\n\n\ndef conformer_has_calculation_errors(conformer):\n \"\"\"Checks whether error codes indicate that this conformer had errors.\n\n Args:\n conformer: dataset_pb2.Conformer\n\n Returns:\n bool\n \"\"\"\n source = _conformer_source(conformer)\n errors = conformer.properties.errors\n for field_descriptor in errors.DESCRIPTOR.fields:\n if field_descriptor.name == 'error_during_merging':\n # This is an internal field that will eventually go away.\n continue\n if (source == _ConformerSource.STAGE1 and\n field_descriptor.name not in STAGE1_ERROR_FIELDS):\n # Stage1 files only set a couple of error fields, so we just ignore the\n # others.\n continue\n value = getattr(errors, field_descriptor.name)\n if field_descriptor.name == 'error_nsvg09':\n # This field is backwards in that 0 is the success value.\n if value != 0:\n return True\n elif field_descriptor.name == 'error_nstat1':\n # Another odd case: this one value can be either 1 or 3 and still be\n # success.\n if value != 1 and value != 3:\n return True\n else:\n if value != 1:\n return True\n\n return False\n\n\ndef filter_conformer_by_availability(conformer, allowed):\n \"\"\"Filters fields of Conformer by availability annotations.\n\n *Modifies* the input conformer.\n\n Args:\n conformer: dataset_pb2.Conformer\n allowed: list of AvailabilityEnum\n \"\"\"\n # A bit of a hack because original_conformer_index is the only field we\n # filter in the conformer not in the properties subfield.\n if dataset_pb2.INTERNAL_ONLY not in allowed:\n conformer.ClearField('original_conformer_index')\n for descriptor, _ in conformer.properties.ListFields():\n if (descriptor.GetOptions().Extensions[dataset_pb2.availability]\n not in allowed):\n conformer.properties.ClearField(descriptor.name)\n\n\ndef conformer_to_standard(conformer):\n \"\"\"Converts a Conformer from internal to 'Standard' form.\n\n The \"Complete\" dataset has all information that anyone could reasonably use.\n The \"Standard\" dataset is a reduced form with information that we trust and\n various simplifications.\n\n *Modifies* the input conformer.\n\n Args:\n conformer: dataset_pb2.Conformer\n\n Returns:\n dataset_pb2.Conformer or None (meaning that this conformer should be\n filtered)\n \"\"\"\n if (conformer_has_calculation_errors(conformer) or\n conformer.duplicated_by > 0):\n return None\n\n filter_conformer_by_availability(conformer, [dataset_pb2.STANDARD])\n\n return conformer\n\n\ndef determine_fate(conformer):\n \"\"\"Determines the cateogrical FateCategory for conformer.\n\n Args:\n conformer: dataset_pb2.Conformer\n\n Returns:\n dataset_pb2.Conformer.FateCategory\n \"\"\"\n source = _conformer_source(conformer)\n if source == _ConformerSource.DUPLICATE:\n # This shouldn't really happen in the real set so we'll just leave it as\n # undefined.\n return dataset_pb2.Conformer.FATE_UNDEFINED\n\n elif source == _ConformerSource.STAGE1:\n if conformer.duplicated_by > 0:\n this_btid = conformer.conformer_id // 1000\n other_btid = conformer.duplicated_by // 1000\n if this_btid == other_btid:\n return dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY\n else:\n return dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY\n\n error_code = conformer.properties.errors.error_nstat1\n # These error codes looks like random numbers. They are the particular\n # code generated by the Fortran code.\n if error_code == 2:\n return dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM\n elif error_code == 5:\n return dataset_pb2.Conformer.FATE_DISASSOCIATED\n elif error_code == 4:\n return dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE\n elif error_code > 1:\n return dataset_pb2.Conformer.FATE_DISCARDED_OTHER\n else:\n # This means that we can find no reason this shouldn't have gone on to\n # stage2.\n return dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS\n\n elif source == _ConformerSource.STAGE2:\n if conformer_has_calculation_errors(conformer):\n return dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR\n else:\n return dataset_pb2.Conformer.FATE_SUCCESS\n\n else:\n raise ValueError(f'Got an unknown source {source}')\n\n\ndef conformer_to_bond_topology_summaries(conformer):\n \"\"\"Produces BondTopologySummary protos from Conformer.\n\n Since a conformer can be associated with many bond topologies, this can output\n potentially many summaries.\n\n Args:\n conformer: dataset_pb2.Conformer\n\n Yields:\n dataset_pb2.BondTopologySummary\n \"\"\"\n summary = dataset_pb2.BondTopologySummary()\n if (conformer.conformer_id // 1000 !=\n conformer.bond_topologies[0].bond_topology_id):\n raise ValueError('conformers_to_bond_topology_summaries assumes the '\n 'first bond topology is the one that generated this.')\n summary.bond_topology.CopyFrom(conformer.bond_topologies[0])\n summary.count_attempted_conformers = 1\n\n fate = conformer.fate\n\n if fate == dataset_pb2.Conformer.FATE_UNDEFINED:\n raise ValueError(f'Conformer {conformer.conformer_id} has undefined fate')\n elif fate == dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY:\n summary.count_duplicates_same_topology = 1\n elif fate == dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY:\n summary.count_duplicates_different_topology = 1\n elif (fate == dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM or\n fate == dataset_pb2.Conformer.FATE_DISASSOCIATED or\n fate == dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE or\n fate == dataset_pb2.Conformer.FATE_DISCARDED_OTHER):\n summary.count_failed_geometry_optimization = 1\n elif fate == dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS:\n summary.count_kept_geometry = 1\n summary.count_missing_calculation = 1\n elif fate == dataset_pb2.Conformer.FATE_CALCULATION_WITH_ERROR:\n summary.count_kept_geometry = 1\n summary.count_calculation_with_error = 1\n for bt in conformer.bond_topologies[1:]:\n other_summary = dataset_pb2.BondTopologySummary()\n other_summary.bond_topology.CopyFrom(bt)\n other_summary.count_detected_match_with_error = 1\n yield other_summary\n elif fate == dataset_pb2.Conformer.FATE_SUCCESS:\n summary.count_kept_geometry = 1\n summary.count_calculation_success = 1\n for bt in conformer.bond_topologies[1:]:\n other_summary = dataset_pb2.BondTopologySummary()\n other_summary.bond_topology.CopyFrom(bt)\n other_summary.count_detected_match_success = 1\n yield other_summary\n else:\n raise ValueError(f'Did not understand {fate}')\n\n yield summary\n" ]
[ [ "tensorflow.io.gfile.GFile", "pandas.read_csv", "numpy.isclose", "numpy.triu_indices" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
wanglifeng1022/DeepTreeAttention
[ "580e9dffbab1fb51faf1f29505897e0b5cf5693a" ]
[ "tests/test_trees.py" ]
[ "#Test main tree module, only run comet experiment locally to debug callbacks\nimport glob\nimport geopandas as gpd\nimport os\n\nis_travis = 'TRAVIS' in os.environ\nif not is_travis:\n from comet_ml import Experiment \n experiment = Experiment(project_name=\"neontrees\", workspace=\"bw4sz\")\n experiment.add_tag(\"testing\") \nelse:\n experiment = None\n\nimport numpy as np\nimport pytest\nimport pandas as pd\nimport rasterio\nimport tensorflow as tf\n\nfrom DeepTreeAttention.utils import metrics\nfrom DeepTreeAttention import trees\nfrom DeepTreeAttention.generators import boxes\nfrom matplotlib.pyplot import imshow\nfrom tensorflow.keras import metrics as keras_metrics\n\n#random label predictions just for testing\ntest_predictions = \"data/raw/2019_BART_5_320000_4881000_image_small.shp\"\n\n#Use a small rgb crop as a example tile\ntest_sensor_tile = \"data/raw/2019_BART_5_320000_4881000_image_crop.tif\"\n\ntest_sensor_hyperspec = \"data/raw/2019_BART_5_320000_4881000_image_hyperspectral_crop.tif\"\n\[email protected]()\ndef mod(tmpdir):\n mod = trees.AttentionModel(config=\"conf/tree_config.yml\") \n \n train_dir = tmpdir.mkdir(\"train\")\n predict_dir = tmpdir.mkdir(\"predict\")\n label_file = \"{}/label_file.csv\".format(train_dir)\n \n #create a fake label file\n pd.DataFrame({\"taxonID\":[\"Ben\",\"Jon\"],\"label\":[0,1]}).to_csv(label_file)\n \n config = {}\n train_config = { }\n train_config[\"tfrecords\"] = train_dir\n train_config[\"batch_size\"] = 2\n train_config[\"epochs\"] = 1\n train_config[\"steps\"] = 1\n train_config[\"gpus\"] = 1\n train_config[\"crop_size\"] = 100\n train_config[\"shuffle\"] = True\n train_config[\"weighted_sum\"] = False\n train_config[\"classes\"] = 2\n train_config[\"species_class_file\"] = label_file\n \n #evaluation\n eval_config = { }\n eval_config[\"tfrecords\"] = None\n eval_config[\"steps\"] = 1\n eval_config[\"ground_truth_path\"] = \"data/processed/test.shp\"\n \n predict_config = { }\n predict_config[\"tfrecords\"] = predict_dir\n \n config[\"train\"] = train_config\n config[\"evaluation\"] = eval_config\n config[\"predict\"] = predict_config\n \n #Replace config for testing env\n for key, value in config.items():\n for nested_key, nested_value in value.items():\n mod.config[key][nested_key] = nested_value\n \n #Update the inits\n mod.RGB_size = mod.config[\"train\"][\"RGB\"][\"crop_size\"]\n mod.HSI_size = mod.config[\"train\"][\"HSI\"][\"crop_size\"]\n mod.HSI_channels = 369\n mod.RGB_channels = 3\n mod.extend_HSI_box = mod.config[\"train\"][\"HSI\"][\"extend_box\"]\n mod.classes_file = label_file\n mod.train_shp = pd.DataFrame({\"taxonID\":[\"Jon\",\"Ben\"], \"siteID\":[0,1],\"domainID\":[0,1],\"plotID\":[0,1], \"canopyPosition\":[\"a\",\"b\"],\"scientific\":[\"genus species\",\"genus species\"]})\n mod.train_shp.index =[2,7]\n mod.sites = 23\n mod.domains = 15\n \n #Create a model with input sizes\n mod.create()\n \n return mod\n\[email protected]()\ndef tfrecords(mod, tmpdir):\n shp = gpd.read_file(test_predictions)\n \n created_records = mod.generate(shapefile=test_predictions, site=0, domain=1, elevation=100,\n heights=np.random.random(shp.shape[0]),\n HSI_sensor_path=test_sensor_hyperspec,\n RGB_sensor_path=test_sensor_tile,\n train=True,\n chunk_size=2) \n return created_records\n\n\ndef test_generate(mod):\n shp = gpd.read_file(test_predictions) \n created_records = mod.generate(\n shapefile=test_predictions,\n domain=1,\n site=0,\n heights=np.random.random(shp.shape[0]),\n elevation=100,\n HSI_sensor_path=test_sensor_hyperspec,\n RGB_sensor_path=test_sensor_tile,\n train=True, chunk_size=2) \n \n assert all([os.path.exists(x) for x in created_records])\n\ndef test_split_data(mod, tfrecords):\n #Create class\n mod.read_data(mode=\"RGB\", validation_split=True)\n \n assert len(mod.train_split_records) > 0\n assert len(mod.test_split_records) > 0\n \n #Assert tfrecords are split\n assert all([x not in mod.train_split_records for x in mod.test_split_records])\n \[email protected](\"submodel\",[True, False])\ndef test_AttentionModel(mod, tfrecords, submodel):\n shp = gpd.read_file(test_predictions)\n if submodel:\n mod.read_data(mode=\"RGB_submodel\", validation_split = True)\n else:\n mod.read_data(mode=\"RGB\", validation_split = True)\n \n #How many batches and ensure no overlap in data\n train_image_data = []\n test_image_data = []\n \n train_counter=0\n for data, label in mod.train_split:\n train_image_data.append(data)\n train_counter+=data.shape[0]\n \n test_counter=0\n for data, label in mod.val_split:\n test_image_data.append(data) \n test_counter+=data.shape[0]\n \n assert shp.shape[0] == train_counter + test_counter\n assert train_counter > test_counter\n \n #No test in train batches\n assert all([not np.array_equal(y,x) for x in train_image_data for y in test_image_data])\n\n#Test that the composition of the validation split is the same no matter the data\ndef test_read_data(mod, tfrecords):\n mod.read_data(mode=\"ensemble\", validation_split=True)\n before = mod.test_split_records\n mod.read_data(mode=\"ensemble\", validation_split=True)\n after = mod.test_split_records\n assert before == after\n \ndef test_train_metadata(tfrecords, mod):\n #initial weights\n initial_weight = mod.metadata_model.layers[4].get_weights()\n \n mod.read_data(mode=\"metadata\")\n mod.config[\"train\"][\"metadata\"][\"epochs\"] = 2\n mod.train(submodel=\"metadata\", experiment=experiment, class_weight=None)\n \n final_weight = mod.metadata_model.layers[4].get_weights()\n \n #assert training took place\n assert not np.array_equal(final_weight,initial_weight)\n assert \"loss\" in mod.metadata_model.history.history \n \ndef test_ensemble(tfrecords, mod): \n mod.read_data(mode=\"ensemble\")\n mod.config[\"train\"][\"ensemble\"][\"epochs\"] = 1 \n mod.config[\"train\"][\"ensemble\"][\"batch_size\"] = 2\n mod.ensemble(experiment=experiment, class_weight=None, train=True)\n \[email protected](is_travis, reason=\"Cannot load comet on TRAVIS\")\ndef test_train_callbacks(tfrecords, mod):\n mod.read_data(\"RGB_submodel\", validation_split=True)\n \n #update epoch manually\n mod.config[\"train\"][\"RGB\"][\"epochs\"] = 1\n mod.train(sensor=\"RGB\", submodel=\"spectral\",experiment=experiment)\n\n mod.read_data(mode=\"RGB\")\n mod.train(experiment=experiment, sensor=\"RGB\")\n " ]
[ [ "numpy.array_equal", "numpy.random.random", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
uditjuneja1/autokeras
[ "4770d60f343f3ed0cee689518c3ccefa263402d8" ]
[ "examples/code_reuse_example.py" ]
[ "from functools import reduce\n\nimport torch\n\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import Compose\n\nfrom autokeras.loss_function import classification_loss\nfrom autokeras.metric import Accuracy\nfrom autokeras.model_trainer import ModelTrainer\nfrom autokeras.preprocessor import OneHotEncoder, MultiTransformDataset\n\n\nclass Net(torch.nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(Net, self).__init__()\n self.fc1 = torch.nn.Linear(input_size, hidden_size)\n self.relu = torch.nn.ReLU()\n self.fc2 = torch.nn.Linear(hidden_size, num_classes)\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.relu(out)\n out = self.fc2(out)\n return out\n\n\nmodel = Net(50, 100, 10)\nn_instance = 100\nbatch_size = 32\n\ntrain_x = np.random.random((n_instance, 50))\ntest_x = np.random.random((n_instance, 50))\ntrain_y = np.random.randint(0, 9, n_instance)\ntest_y = np.random.randint(0, 9, n_instance)\nprint(train_x.shape)\nprint(train_y.shape)\n\nencoder = OneHotEncoder()\nencoder.fit(train_y)\ntrain_y = encoder.transform(train_y)\ntest_y = encoder.transform(test_y)\n\ncompose_list = Compose([])\ntrain_data = DataLoader(MultiTransformDataset(torch.Tensor(train_x), torch.Tensor(train_y), compose_list), batch_size=batch_size, shuffle=False)\ntest_data = DataLoader(MultiTransformDataset(torch.Tensor(test_x), torch.Tensor(test_y), compose_list), batch_size=batch_size, shuffle=False)\n\nmodel_trainer = ModelTrainer(model,\n loss_function=classification_loss,\n metric=Accuracy,\n train_data=train_data,\n test_data=test_data,\n verbose=True)\n\nmodel_trainer.train_model(2, 1)\nmodel.eval()\n\noutputs = []\nwith torch.no_grad():\n for index, (inputs, _) in enumerate(test_data):\n outputs.append(model(inputs).numpy())\noutput = reduce(lambda x, y: np.concatenate((x, y)), outputs)\npredicted = encoder.inverse_transform(output)\nprint(predicted)\n\n" ]
[ [ "numpy.random.random", "torch.Tensor", "numpy.concatenate", "torch.nn.Linear", "torch.no_grad", "torch.nn.ReLU", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joelostblom/dash-docs
[ "7be5aed7795f61ac32375ce33a18046b8f2f5254" ]
[ "dash_docs/chapters/dash_datatable/interactivity/examples/interactivity_connected_to_graph.py" ]
[ "import dash\nfrom dash.dependencies import Input, Output\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\ndf = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/gapminder2007.csv')\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div([\n dash_table.DataTable(\n id='datatable-interactivity',\n columns=[\n {\"name\": i, \"id\": i, \"deletable\": True, \"selectable\": True} for i in df.columns\n ],\n data=df.to_dict('records'),\n editable=True,\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n column_selectable=\"single\",\n row_selectable=\"multi\",\n row_deletable=True,\n selected_columns=[],\n selected_rows=[],\n page_action=\"native\",\n page_current= 0,\n page_size= 10,\n ),\n html.Div(id='datatable-interactivity-container')\n])\n\[email protected](\n Output('datatable-interactivity', 'style_data_conditional'),\n Input('datatable-interactivity', 'selected_columns')\n)\ndef update_styles(selected_columns):\n return [{\n 'if': { 'column_id': i },\n 'background_color': '#D2F3FF'\n } for i in selected_columns]\n\[email protected](\n Output('datatable-interactivity-container', \"children\"),\n Input('datatable-interactivity', \"derived_virtual_data\"),\n Input('datatable-interactivity', \"derived_virtual_selected_rows\"))\ndef update_graphs(rows, derived_virtual_selected_rows):\n # When the table is first rendered, `derived_virtual_data` and\n # `derived_virtual_selected_rows` will be `None`. This is due to an\n # idiosyncrasy in Dash (unsupplied properties are always None and Dash\n # calls the dependent callbacks when the component is first rendered).\n # So, if `rows` is `None`, then the component was just rendered\n # and its value will be the same as the component's dataframe.\n # Instead of setting `None` in here, you could also set\n # `derived_virtual_data=df.to_rows('dict')` when you initialize\n # the component.\n if derived_virtual_selected_rows is None:\n derived_virtual_selected_rows = []\n\n dff = df if rows is None else pd.DataFrame(rows)\n\n colors = ['#7FDBFF' if i in derived_virtual_selected_rows else '#0074D9'\n for i in range(len(dff))]\n\n return [\n dcc.Graph(\n id=column,\n figure={\n \"data\": [\n {\n \"x\": dff[\"country\"],\n \"y\": dff[column],\n \"type\": \"bar\",\n \"marker\": {\"color\": colors},\n }\n ],\n \"layout\": {\n \"xaxis\": {\"automargin\": True},\n \"yaxis\": {\n \"automargin\": True,\n \"title\": {\"text\": column}\n },\n \"height\": 250,\n \"margin\": {\"t\": 10, \"l\": 10, \"r\": 10},\n },\n },\n )\n # check if column exists - user may have deleted it\n # If `column.deletable=False`, then you don't\n # need to do this check.\n for column in [\"pop\", \"lifeExp\", \"gdpPercap\"] if column in dff\n ]\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
tallamjr/shogun
[ "c964c9d1aab4bc1cf9133baf14d3bd5b96ba42de" ]
[ "examples/undocumented/python/kernel_top.py" ]
[ "#!/usr/bin/env python\nfrom tools.load import LoadMatrix\nimport shogun as sg\nimport numpy as np\nlm=LoadMatrix()\n\ntraindat = lm.load_dna('../data/fm_train_dna.dat')\ntestdat = lm.load_dna('../data/fm_test_dna.dat')\nlabel_traindat = lm.load_labels('../data/label_train_dna.dat')\n\nfm_hmm_pos=[traindat[i] for i in np.where([label_traindat==1])[1] ]\nfm_hmm_neg=[traindat[i] for i in np.where([label_traindat==-1])[1] ]\n\nparameter_list = [[traindat,testdat,label_traindat,1e-1,1,0,False,1], \\\n[traindat,testdat,label_traindat,1e-1,1,0,False,1 ]]\n\ndef kernel_top (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,pseudo=1e-1,\n\torder=1,gap=0,reverse=False,c=1):\n\tfrom shogun import TOPFeatures\n\tfrom shogun import HMM, BW_NORMAL\n\n\tN=1 # toy HMM with 1 state\n\tM=4 # 4 observations -> DNA\n\n\t# train HMM for positive class\n\tcharfeat=sg.create_string_features(fm_hmm_pos, sg.DNA)\n\thmm_pos_train=sg.create_string_features(charfeat, order-1, order, gap, reverse)\n\tpos=HMM(hmm_pos_train, N, M, pseudo)\n\tpos.baum_welch_viterbi_train(BW_NORMAL)\n\n\t# train HMM for negative class\n\tcharfeat=sg.create_string_features(fm_hmm_neg, sg.DNA)\n\thmm_neg_train=sg.create_string_features(charfeat, order-1, order, gap, reverse)\n\tneg=HMM(hmm_neg_train, N, M, pseudo)\n\tneg.baum_welch_viterbi_train(BW_NORMAL)\n\n\t# Kernel training data\n\tcharfeat=sg.create_string_features(fm_train_dna, sg.DNA)\n\twordfeats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse)\n\n\t# Kernel testing data\n\tcharfeat=sg.create_string_features(fm_test_dna, sg.DNA)\n\twordfeats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse)\n\n\t# get kernel on training data\n\tpos.set_observations(wordfeats_train)\n\tneg.set_observations(wordfeats_train)\n\tfeats_train=TOPFeatures(10, pos, neg, False, False)\n\tkernel=sg.create_kernel(\"PolyKernel\", c=c)\n\tkernel.init(feats_train, feats_train)\n\tkm_train=kernel.get_kernel_matrix()\n\n\t# get kernel on testing data\n\tpos_clone=HMM(pos)\n\tneg_clone=HMM(neg)\n\tpos_clone.set_observations(wordfeats_test)\n\tneg_clone.set_observations(wordfeats_test)\n\tfeats_test=TOPFeatures(10, pos_clone, neg_clone, False, False)\n\tkernel.init(feats_train, feats_test)\n\tkm_test=kernel.get_kernel_matrix()\n\treturn km_train,km_test,kernel\n\nif __name__=='__main__':\n\tprint(\"TOP Kernel\")\n\tkernel_top(*parameter_list[0])\n" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ericjang/taichi
[ "641c4b83bcf98e7354b135964cd23759b0110c6b" ]
[ "tests/python/test_numpy.py" ]
[ "import taichi as ti\nimport numpy as np\n\[email protected]_test\ndef test_numpy():\n val = ti.var(ti.i32)\n\n n = 4\n\n @ti.layout\n def values():\n ti.root.dense(ti.i, n).place(val)\n\n @ti.kernel\n def test_numpy(arr: np.ndarray):\n for i in range(n):\n arr[i] = arr[i] ** 2\n\n a = np.array([4, 8, 1, 24], dtype=np.float32)\n \n for i in range(n):\n a[i] = i * 2\n\n test_numpy(a)\n \n for i in range(n):\n assert a[i] == i * i * 4\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dataronio/pyGPGO
[ "c628eec39d57d25929e6961b986378a3a35ffbd7" ]
[ "examples/sineGP.py" ]
[ "#######################################\n# pyGPGO examples\n# sineGP: Fits a Gaussian Process on a sine-like function.\n#######################################\n\nimport numpy as np\nfrom pyGPGO.surrogates.GaussianProcess import GaussianProcess\nfrom pyGPGO.covfunc import squaredExponential\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n # Build synthetic data (sine function)\n x = np.arange(0, 2 * np.pi + 0.01, step=np.pi / 2)\n y = np.sin(x)\n X = np.array([np.atleast_2d(u) for u in x])[:, 0]\n\n # Specify covariance function\n sexp = squaredExponential()\n\n # Instantiate GaussianProcess class\n gp = GaussianProcess(sexp)\n # Fit the model to the data\n gp.fit(X, y)\n\n # Predict on new data\n xstar = np.arange(0, 2 * np.pi, step=0.01)\n Xstar = np.array([np.atleast_2d(u) for u in xstar])[:, 0]\n ymean, ystd = gp.predict(Xstar, return_std=True)\n\n # Confidence interval bounds\n lower, upper = ymean - 1.96 * np.sqrt(ystd), ymean + 1.96 * np.sqrt(ystd)\n\n # Plot values\n plt.figure()\n plt.plot(xstar, ymean, label='Posterior mean')\n plt.plot(xstar, np.sin(xstar), label='True function')\n plt.fill_between(xstar, lower, upper, alpha=0.4, label='95% confidence band')\n plt.grid()\n plt.legend(loc=0)\n plt.show()" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.arange", "numpy.sin", "matplotlib.pyplot.plot", "numpy.atleast_2d", "matplotlib.pyplot.fill_between", "matplotlib.pyplot.grid", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shuxiaobo/Digit-image-Classify-with-one-channel
[ "ce937ac9c6b25b4aafbbdd5f6d981d96606d1e08" ]
[ "MyDateSet.py" ]
[ "from torch.utils.data.dataset import Dataset\nimport numpy as np\n\nclass MyDataSet(Dataset):\n \"\"\"Dataset wrapping images and target labels for Kaggle - Planet Amazon from Space competition.\n\n Arguments:\n A CSV file path\n Path to image folder\n Extension of images\n PIL transforms\n \"\"\"\n\n def __init__(self, npz_path = 'data.npz', train=True, transform=None, target_transform=None):\n self.train = train\n self.transform = transform\n self.target_transform = target_transform\n self.npz_path = npz_path\n npz_file = np.load(npz_path)\n\n # if self.train:\n self.train_X = np.array(npz_file['train_X'].astype(np.uint8))\n self.train_y = np.array(npz_file['train_y'].astype(np.int))\n # self.train_y = self.train_y.reshape(self.train_y.shape[0], 1)\n\n self.train_X = self.train_X.reshape(self.train_X.shape[0],1, 64, 64).transpose((0, 2, 3, 1))\n # else:\n self.test_X = np.array(npz_file['test_X'].astype(np.uint8))\n self.test_X = self.test_X.reshape(self.test_X.shape[0], 1, 64, 64).transpose((0, 2, 3, 1))\n\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.train:\n img, target = self.train_X[index], self.train_y[index]\n else:\n img, target = self.test_X[index], self.train_y[index] # notice the test_y don't filled here ,you cannot use the to predict\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n # img = Image.fromarray(img)\n # 草拟吗啊,要转坐标轴啊 N * C * H * W\n\n\n if self.transform is not None:\n img = self.transform(img)\n else:\n img = img.transpose(2, 0, 1)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self):\n if self.train:\n return len(self.train_X)\n else:\n return len(self.test_X)" ]
[ [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cedadev/ipython_project
[ "fd9c85c20d3689435684cf4b681dfaab1c0a825b" ]
[ "seasonalmean.py" ]
[ "\"\"\"A module to compute the seasonal mean over a variable in a dataset.\n\nSee the run function. time_bounds may also be useful.\n\n\"\"\"\n\nfrom IPython.parallel import Client, interactive\nimport numpy\nfrom netCDF4 import MFDataset, num2date\n\n\ndef time_bounds (files, time_name = 'time'):\n \"\"\"Get first and last times, and length of time variable.\n\ntime_bounds(files, time_name = 'time') -> (first, last, length)\n\nfiles: as taken by netCDF4.MFDataset.\ntime_name: the name of the time variable; can actually be any one-dimensional\n variable.\n\nTimes are netCDF4.netcdftime.datetime objects. If length is 0, times are None.\n\n\"\"\"\n with MFDataset(files) as d:\n t = d.variables[time_name]\n l = len(t)\n if l == 0:\n times = (None, None)\n else:\n times = tuple(num2date((t[0], t[-1]), t.units, t.calendar))\n return times + (l,)\n\n\n@interactive\ndef _get_mean_worker (times):\n \"\"\"Used by get_mean_parallel.\"\"\"\n results = []\n index = [slice(None)] * time_index + [None, Ellipsis]\n for t0, t1 in times:\n index[time_index] = slice(t0, t1)\n arr = var[index]\n results.append(arr.mean(time_index))\n return numpy.array(results)\n\n\ndef get_mean_serial (var, time_index, times):\n \"\"\"Compute the seasonal mean.\n\nget_mean_serial(var, time_index, times) -> results\n\nvar: netCDF4 variable to average over.\ntime_index: the index of the time variable's dimension in var's dimensions.\ntimes: a list of (a, b) indices indicating sets of times to take the mean over\n (var[a:b]).\n\nresults: the var array with time now in seasons.\n\n\"\"\"\n results = []\n index = [slice(None)] * time_index + [None, Ellipsis]\n for t0, t1 in times:\n index[time_index] = slice(t0, t1)\n arr = var[index]\n results.append(arr.mean(time_index))\n return numpy.array(results)\n\n\ndef get_mean_parallel (dv, var, time_index, times):\n \"\"\"Compute the seasonal mean in parallel.\n\nget_mean_serial(dv, var, time_index, times) -> results\n\ndv: IPython DirectView to use.\nvar: netCDF4 variable to average over.\ntime_index: the index of the time variable's dimension in var's dimensions.\ntimes: a list of (a, b) indices indicating sets of times to take the mean over\n (var[a:b]).\n\nresults: the var array with time now in seasons.\n\n\"\"\"\n # transfer var to the engines\n dv.push({'var': var, 'time_index': time_index})\n # do the calculation\n results = dv.parallel(block = True)(_get_mean_worker)(times)\n # close datasets\n dv.execute('var.group().close()')\n # clean up variables\n dv.execute('del var, time_index')\n return results\n\n\ndef run (files, var_name, start_year, start_month, end_year, parallel = True,\n season_length = 3, engines = None, var_path = '/', time_path = '/',\n time_name = 'time'):\n \"\"\"Run a seasonal mean on a dataset.\n\nrun(files, var_name, start_year, end_year, start_month, parallel = True,\n season_length = 3, engines = None, var_path = '/', time_path = '/',\n time_name = 'time') -> results\n\nfiles: as taken by netCDF4.MFDataset.\nvar_name: the name of the variable to compute the mean of.\nstart_year: the year to start at (this year is included).\nend_year: the year to end at (this year is included).\nstart_month: the month to start at (this month is included).\nparallel: whether to run the computation in parallel (using IPython.parallel).\nseason_length: the length of a season in months.\nengines: a list of engines to use if running in parallel. The default is to\n use all available engines.\nvar_path, time_path: the path of the groups these variables are in within the\n dataset.\ntime_name: the name of the time variable. This can actually be any\n one-dimensional variable - it doesn't need to represent time.\n\nresults: the array for the var variable, with time now in seasons.\n\n\"\"\"\n if parallel:\n c = Client()\n dv = c[:]\n if engines is not None:\n dv.targets = engines\n dv.block = True\n dv.execute('import numpy')\n\n with MFDataset(files) as d:\n # find variables\n vs = []\n for path, v_name in ((time_path, time_name), (var_path, var_name)):\n g = d\n for g_name in path.strip('/').split('/'):\n if g_name:\n g = g.groups[g_name]\n vs.append(g.variables[v_name])\n time, var = vs\n time_index = var.dimensions.index(time.dimensions[0])\n # get time indices\n times = time[:]\n dates = num2date(times, time.units, time.calendar)\n started = False\n in_season = False\n time_indices = []\n n = len(times)\n i = 0\n while i < n:\n t = times[i]\n date = dates[i]\n if started:\n if in_season:\n if date.year >= season_end_year and \\\n date.month >= season_end_month:\n # season ended; (i0, i) is used as [i0:i], skipping i,\n # as we want\n in_season = False\n time_indices.append((i0, i))\n # next season might start here: don't increment counter\n continue\n elif date.year > end_year:\n # found end year (and no seasons are in progress)\n break\n elif date.month >= start_month:\n # found new season\n in_season = True\n # get end year/month (might be longer than a year)\n season_end_month = start_month + season_length\n season_end_year = date.year + season_end_month / 12\n season_end_month %= 12\n i0 = i\n elif date.year >= start_year:\n # found start year\n started = True\n # first season might start here: don't increment counter\n continue\n i += 1\n\n if parallel:\n results = get_mean_parallel(dv, var, time_index, time_indices)\n else:\n results = get_mean_serial(var, time_index, time_indices)\n return results" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ypar/cimr
[ "c82dc769965969e1868808f05947d218ff74f9b7" ]
[ "cimr/processor/convertibles.py" ]
[ "#!/usr/bin/env python3\n\"\"\"Utility functions to convert between values and units\nfor downstream analyses.\ne.g. log(OR) -> effect_size\n\"\"\"\n\n__author__ = \"yoson park\"\n\n\nimport re\nimport sys\nimport numpy\nimport pandas\nimport logging\n\nfrom scipy import stats\n\nfrom .constants import (EFFECT_SIZE, ODDS_RATIO, ZSCORE,\n PVALUE, STANDARD_ERROR, EFFECT_DIRECTION)\n\nfrom ..defaults import VERY_SMALL_P\n\n\ndef get_effect_direction(data):\n \"\"\"Distinguish datasets with absolute beta effects only.\n data is assumed to be a pandas dataframe.\n \"\"\"\n effect_direction = None\n\n if EFFECT_SIZE in data:\n logging.debug(f' checking direction of effects.')\n effect_direction = numpy.sign(data[EFFECT_SIZE])\n logging.debug(f' effect_direction[0:3]: {effect_direction[0:3]}')\n elif EFFECT_DIRECTION in data:\n logging.debug(f' checking effect_dir column.')\n effect_direction = data[EFFECT_DIRECTION]\n # Make it equivalent to numpy.sign\n effect_direction = effect_direction.apply(\n lambda x: 1.0 if (x == '+' or x == 1.0) else -1.0\n )\n logging.debug(f' effect_direction[0:3]: {effect_direction[0:3]}.')\n\n if effect_direction is None:\n logging.warning(f' effect direction is not provided.')\n\n return effect_direction\n\n\ndef _p_to_z(data, VERY_SMALL_P):\n \"\"\"Calculate z-scores from p-values.\"\"\"\n p = data[PVALUE].values\n\n if numpy.any(p == 0):\n logging.warning(f' p-value column contains zero(s).')\n logging.warning(f' This may be caused by numerical resolution limits.')\n logging.warning(f' Consider using beta/se columns or check your input data.')\n\n effect_direction = get_effect_direction(data)\n abs_z = -stats.norm.ppf(p / 2)\n\n if numpy.any(numpy.isinf(abs_z)) and VERY_SMALL_P:\n logging.warning(' thresholding zscores.')\n min_p = numpy.min(p[numpy.logical_and(numpy.isfinite(abs_z), p != 0)])\n if VERY_SMALL_P < min_p:\n min_p = VERY_SMALL_P\n fix_z = -stats.norm.ppf(min_p / 2)\n logging.warning(f' using {fix_z} to fill in divergent z-scores.')\n abs_z[numpy.isinf(abs_z)] = fix_z\n\n z = abs_z * effect_direction\n return z\n\n\ndef _or_to_beta(odd_ratio):\n \"\"\"Checks odds_ratio column values and returns beta.\"\"\"\n if numpy.any(numpy.where(odd_ratio < 0)):\n logging.error(f' odds_ratio column includes negative values.')\n sys.exit(1)\n if numpy.any(numpy.where(odd_ratio == 0)):\n logging.error(f' odds_ratio column includes zeroes.')\n sys.exit(1)\n\n return numpy.log(odd_ratio)\n\n\ndef estimate_se(data):\n \"\"\"Given z-scores and effect sizes, estimate standard errors.\"\"\"\n if STANDARD_ERROR in data.columns:\n logging.debug(f' {STANDARD_ERROR} is included.')\n elif (ZSCORE in data.columns and\n EFFECT_SIZE in data.columns and\n STANDARD_ERROR not in data.columns):\n logging.info(f' estimating {STANDARD_ERROR} from {EFFECT_SIZE} and {ZSCORE}.')\n data[STANDARD_ERROR] = data[EFFECT_SIZE] / data[ZSCORE]\n data['se_est_from_z_beta'] = 'True'\n else:\n logging.error(f' {STANDARD_ERROR} cannot be estimated based on available data.')\n sys.exit(1)\n\n return data\n\n\ndef get_z(data):\n \"\"\"Given dataset with effect_size and standard_error or\n p-value columns, calculate z-scores.\n \"\"\"\n if ZSCORE in data:\n logging.info(f' data contains z-score column.')\n z = data[ZSCORE]\n else:\n z = None\n if PVALUE in data:\n logging.info(f' calculating z-scores from p-values.')\n z = _p_to_z(data, VERY_SMALL_P)\n elif STANDARD_ERROR in data and EFFECT_SIZE in data:\n logging.info('calculating z-scores from se and beta')\n z = data[EFFECT_SIZE] / data[STANDARD_ERROR]\n\n if z is None:\n logging.warning(f' z-scores could not be calculated based on available data.')\n\n return data\n\n\ndef convert_z_to_p(data):\n \"\"\"Given z-scores, calculate p-values.\"\"\"\n if PVALUE in data.columns:\n logging.debug(f' {PVALUE} is included.')\n elif (ZSCORE in data.columns and\n PVALUE not in data.columns):\n data[PVALUE] = 2 * stats.norm.sf(numpy.absolute(data[ZSCORE]))\n data['p_est_from_z'] = 'True'\n else:\n logging.error(f' {PVALUE} cannot be estimated based on available data.')\n sys.exit(1)\n\n return data\n\n\ndef convert_p_to_z(data):\n if (PVALUE in data.columns and\n ZSCORE not in data.columns):\n data[ZSCORE] = _p_to_z(data, VERY_SMALL_P)\n data['z_est_from_p'] = 'True'\n else:\n logging.debug(f' {ZSCORE} is included.')\n\n return data\n\n\ndef convert_or_to_beta(data):\n \"\"\"Given odds ratio, calculate effect size.\"\"\"\n if EFFECT_SIZE in data.columns:\n logging.debug(f' {EFFECT_SIZE} is included.')\n elif (ODDS_RATIO in data.columns and\n EFFECT_SIZE not in data.columns):\n data[EFFECT_SIZE] = _or_to_beta(data[ODDS_RATIO])\n else:\n logging.warning(f' {EFFECT_SIZE} could not be estimated from available data.')\n\n return data\n\n" ]
[ [ "scipy.stats.norm.ppf", "numpy.log", "numpy.absolute", "numpy.isfinite", "numpy.sign", "numpy.any", "numpy.where", "numpy.isinf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tstirrat15/predictit_538_odds
[ "dc4ab8263ebbdd209fa03a6965c31c3a69c2cd1b" ]
[ "predictit_538_presidential.py" ]
[ "# TO DO\n# 1. Fair probability\n# 2. Hedge opportunities\n# 3. Datapane map\n# 4. Change since prior poll\n\n# Import modules\nimport json\nimport requests\nimport pandas as pd\nimport numpy as np\n\nPREDICTIT_URL = \"https://www.predictit.org/api/marketdata/all/\"\nFIVE_38_PRESIDENTIAL_POLL_URL = 'https://projects.fivethirtyeight.com/polls-page/president_polls.csv' # noqa E501\nFIVE_38_PRESIDENTIAL_AVERAGES_URL = 'https://projects.fivethirtyeight.com/2020-general-data/presidential_poll_averages_2020.csv' # noqa E501\nFIVE_38_PRESIDENTIAL_TOPLINE_URL = 'https://projects.fivethirtyeight.com/2020-general-data/presidential_state_toplines_2020.csv' # noqa E501\n\n\n# Replace null values with zero\n# TODO: this is a janky way of doing this. is it necessary?\ndef dict_clean(items):\n result = {}\n for key, value in items:\n if value is None:\n value = 0\n result[key] = value\n return result\n\n\n# TODO: put this into its own function\n# together with whatever is going on in the json logic below\nPredictit_response = requests.get(PREDICTIT_URL)\njsondata = Predictit_response.json()\n# TODO: wai\ndict_str = json.dumps(jsondata)\njsondata = json.loads(dict_str, object_pairs_hook=dict_clean)\n\n# Market data by contract/price in dataframe\n# TODO: do this with comprehensions\n# or maybe even dictionary accesses. This doesn't need to look like this.\n# Hell, we might even be able to construct a more clever URL.\ndata = []\n# TODO: why names\nfor p in jsondata['markets']:\n for k in p['contracts']:\n data.append([\n p['id'],\n p['name'],\n k['id'],\n k['name'],\n k['bestBuyYesCost'],\n k['bestBuyNoCost'],\n k['bestSellYesCost'],\n k['bestSellNoCost'],\n ])\n\n# Pandas dataframe named 'predictit_df'\npredictit_df = pd.DataFrame(data)\n\n# Update dataframe column names\n# TODO: is this just depending on t\npredictit_df.columns = [\n 'Market_ID',\n 'Market_Name',\n 'Contract_ID',\n 'Contract_Name',\n 'PredictIt_Yes',\n 'bestBuyNoCost',\n 'BestSellYesCost',\n 'BestSellNoCost'\n]\n\n# Filter PredicitIt dataframe to presidential state markets/contracts\npredictit_df = predictit_df[\n predictit_df['Market_Name'].str.contains(\"Which party will win\")\n # TODO: is this pandas magic?\n & predictit_df['Market_Name'].str.contains(\"2020 presidential election?\")\n]\n\n# Fix annoying typo (double space) in congressional district market names\npredictit_df['Market_Name'] = (\n predictit_df['Market_Name']\n .str.replace('in the 2020', 'in the 2020')\n)\n\n# Split Market_Name column into state name column\nstart_string = \"Which party will win\"\nend_string = \"in the 2020 presidential election?\"\n# TODO: what is this a/b shit?\npredictit_df['a'], predictit_df['state'] = (\n predictit_df['Market_Name']\n .str.split(start_string, 1).str\n)\npredictit_df['state'], predictit_df['b'] = (\n predictit_df['state']\n .str.split(end_string, 1).str\n)\n# TODO: yeah, gross.\ndel predictit_df['a']\ndel predictit_df['b']\n\n# Create answer column from contract names\npredictit_df['answer'] = (\n predictit_df['Contract_Name']\n .str.replace('Republican', 'Trump')\n .str.replace('Democratic', 'Biden')\n)\n\n# Strip trailing/leading whitespaces in answer and state columns\npredictit_df['state'] = predictit_df['state'].str.strip()\npredictit_df['answer'] = predictit_df['answer'].str.strip()\n\n\n# Pull in polling data from 538\npres_polling = pd.read_csv(FIVE_38_PRESIDENTIAL_POLL_URL)\npres_polling = pres_polling.dropna(subset=['state'])\n\n# Drop extraneous columns\npres_polling = pres_polling.drop([\n 'pollster_id',\n 'sponsor_ids',\n 'sponsors',\n 'display_name',\n 'pollster_rating_id',\n 'pollster_rating_name',\n 'fte_grade',\n 'sample_size',\n 'population',\n 'population_full',\n 'methodology',\n 'seat_number',\n 'seat_name',\n 'start_date',\n 'sponsor_candidate',\n 'internal',\n 'partisan',\n 'tracking',\n 'nationwide_batch',\n 'ranked_choice_reallocated',\n 'notes',\n 'url'\n], axis=1)\n\n# Standardize congressional district names in 538 with PredictIt\npres_polling['state'] = (\n pres_polling['state']\n .str.replace('Maine CD-1', 'ME-01')\n)\npres_polling['state'] = (\n pres_polling['state']\n .str.replace('Maine CD-2', 'ME-02')\n)\npres_polling['state'] = (\n pres_polling['state']\n .str.replace('Nebraska CD-2', 'NE-02')\n)\n\n# Filter to most recent poll for Biden & Trump\n# create a count column for 'question_id' to work around\n# \"Delaware problem\": multiple matchups in same survey\npres_polling = (\n pres_polling\n .loc[pres_polling['pollster'] != 'SurveyMonkey']\n) # filter out SurveyMonkey polls\n# convert 'created_at' to datetime\npres_polling['created_at'] = pd.to_datetime(pres_polling['created_at'])\nrecent_pres_polling = (\n pres_polling[pres_polling['answer'].isin(['Biden', 'Trump'])]\n)\nrecent_pres_polling['Count'] = (\n recent_pres_polling\n .groupby('question_id')['question_id']\n .transform('count')\n)\nrecent_pres_polling = recent_pres_polling[(recent_pres_polling.Count > 1)]\nrecent_pres_polling = (\n recent_pres_polling\n .sort_values(by=['question_id'], ascending=False)\n .drop_duplicates(['state', 'candidate_name'], keep='first')\n)\n\n# Rename 538 'pct' column to '538_latest_poll'\nrecent_pres_polling = (\n recent_pres_polling\n .rename({'pct': '538_latest_poll'}, axis=1)\n)\n\n# Rename 538 'end_date' column to '538_poll_date'\nrecent_pres_polling = (\n recent_pres_polling\n .rename({'end_date': '538_poll_date'}, axis=1)\n)\n\n# Pull in polling data from 538 polling averages\npres_poll_avg = pd.read_csv(FIVE_38_PRESIDENTIAL_AVERAGES_URL)\n\n# Drop extraneous columns\npres_poll_avg = pres_poll_avg.drop(['cycle'], axis=1)\n\n# Standardize congressional district names in 538\n# polling averages with PredictIt\n# TODO: make these standardizations loop over a dictionary instead of having\n# a whole bunch of individual calls like this\n# Or see if there's a more sane way to call `replace` such that it does it all\n# at once\npres_poll_avg['state'] = (\n pres_poll_avg['state']\n .str.replace('Maine CD-1', 'ME-01')\n)\npres_poll_avg['state'] = (\n pres_poll_avg['state']\n .str.replace('Maine CD-2', 'ME-02')\n)\npres_poll_avg['state'] = (\n pres_poll_avg['state']\n .str.replace('Nebraska CD-2', 'NE-02')\n)\n\n# Standarize candidate names and column name\npres_poll_avg.replace({\n 'candidate_name': {\n 'Joseph R. Biden Jr.': 'Biden',\n 'Donald Trump': 'Trump'\n }\n})\npres_poll_avg['answer'] = pres_poll_avg['candidate_name']\n\n# Filter to most recent poll for Biden & Trump\n# NOTE: This is an example of a stupid comment. I want to know _why_\n# it's being done, not what is being done.\n# convert 'modeldate' to datetime\npres_poll_avg['modeldate'] = pd.to_datetime(pres_poll_avg['modeldate'])\npres_poll_avg = (\n pres_poll_avg\n .sort_values(by=['modeldate'])\n .drop_duplicates(['state', 'candidate_name'], keep='last')\n)\npres_poll_avg = pres_poll_avg[pres_poll_avg['answer'].isin(['Biden', 'Trump'])]\n\n# Round pct_estimate and pct_trend_adjusted to 2 decimal places\npres_poll_avg['pct_estimate'] = pres_poll_avg['pct_estimate'].round(2)\npres_poll_avg['pct_trend_adjusted'] = (\n pres_poll_avg['pct_trend_adjusted']\n .round(2)\n)\n\n# Merge 538 poll and 538 poll averages dataframes together\nrecent_pres_polling = pd.merge(recent_pres_polling,\n pres_poll_avg,\n on=['state', 'answer'],\n how='left')\n\n\n# Pull in most recent state-level model data from 538\npres_model = pd.read_csv(FIVE_38_PRESIDENTIAL_TOPLINE_URL)\n\n# Only keep latest models\npres_model = (\n pres_model\n .sort_values(by=['modeldate'], ascending=False)\n .drop_duplicates(['state', 'branch'], keep='first')\n)\n\n# TODO: names\n# TODO: a function to generate these? it's a lot of duplication\n# Split into 2 dataframes for Trump and Biden\npres_model_inc = pres_model[['candidate_inc',\n 'state',\n 'winstate_inc',\n 'voteshare_inc',\n 'voteshare_inc_hi',\n 'voteshare_inc_lo',\n 'win_EC_if_win_state_inc',\n 'win_state_if_win_EC_inc'\n ]]\npres_model_chal = pres_model[['candidate_chal',\n 'state',\n 'winstate_chal',\n 'voteshare_chal',\n 'voteshare_chal_hi',\n 'voteshare_chal_lo',\n 'win_EC_if_win_state_chal',\n 'win_state_if_win_EC_chal'\n ]]\n\n# TODO: is this literally just undoing the previous commands? Wat?\n# Remove _inc and _chal from column names\npres_model_inc = pres_model_inc.rename(columns={\n 'candidate_inc': 'answer',\n 'winstate_inc': 'winstate',\n 'voteshare_inc': 'voteshare',\n 'voteshare_inc_hi': 'voteshare_hi',\n 'voteshare_inc_lo': 'voteshare_lo',\n 'win_EC_if_win_state_inc': 'win_EC_if_win_state',\n 'win_state_if_win_EC_inc': 'win_state_if_win_EC'\n })\npres_model_chal = pres_model_chal.rename(columns={\n 'candidate_chal': 'answer',\n 'winstate_chal': 'winstate',\n 'voteshare_chal': 'voteshare',\n 'voteshare_chal_hi': 'voteshare_hi',\n 'voteshare_chal_lo': 'voteshare_lo',\n 'win_EC_if_win_state_chal': 'win_EC_if_win_state',\n 'win_state_if_win_EC_chal': 'win_state_if_win_EC'\n })\n\n# Concatenate Trump and Biden dataframes together\nframes = [pres_model_inc, pres_model_chal]\npres_model = pd.concat(frames)\n\n# Change 'District of Columbia' to 'DC'\npres_model['state'] = (\n pres_model['state']\n .str.replace('District of Columbia', 'DC')\n)\n\n# Standardize congressional district names\npres_model['state'] = pres_model['state'].str.replace('ME-1', 'ME-01')\npres_model['state'] = pres_model['state'].str.replace('ME-2', 'ME-02')\npres_model['state'] = pres_model['state'].str.replace('NE-1', 'NE-01')\npres_model['state'] = pres_model['state'].str.replace('NE-2', 'NE-02')\npres_model['state'] = pres_model['state'].str.replace('NE-3', 'NE-03')\n\n# Rename 538 'end_date' column to '538_poll_date'\npres_model = pres_model.rename({'winstate': '538_model'}, axis=1)\n\n# Pull in gambling odds\nodds_df = pd.read_csv(\n 'https://raw.githubusercontent.com/mauricebransfield/predictit_538_odds/master/odds_state_presidential.csv', # noqa E501\n index_col=[0])\n\n# Replace hyphen in state names with space\nodds_df['state'] = odds_df['state'].str.replace('-', ' ')\n\n# Standardize Washington DC & Washington State\nodds_df['state'] = odds_df['state'].str.replace('Washington Dc', 'DC')\nodds_df['state'] = (\n odds_df['state']\n .str.replace('Washington State', 'Washington')\n)\n\n# Replace party with candidate names\nodds_df['answer'] = odds_df['answer'].str.replace('Republicans', 'Trump')\nodds_df['answer'] = odds_df['answer'].str.replace('Democratic', 'Biden')\nodds_df['answer'] = odds_df['answer'].str.replace('Democrats', 'Biden')\nodds_df['answer'] = odds_df['answer'].str.replace('Democrat', 'Biden')\n\n# Drop rows with\nodds_df = odds_df[odds_df.answer != '\\n\\n']\n\n# Drop columns with all nan values\nodds_df = odds_df.dropna(axis=1, how='all')\n\n# Convert odds_df column headers to list\nodds_df_columns = list(odds_df.columns.values)\nodds_df_columns.remove('answer')\nodds_df_columns.remove('state')\nodds_df_loop = odds_df.copy()\ndel odds_df_loop['answer']\ndel odds_df_loop['state']\n\n\n# GASP! a function!\ndef split_more(x):\n return pd.Series(x.split('/'))\n\n\n# denominator / (denominator + numerator) = implied probability\n# Loop through odds columns to convert fractional\n# odds to new column of implied probability\nfor i in odds_df_columns:\n odds_df_loop['numerator'], odds_df_loop['denominator'] = (\n odds_df_loop[i].str.split('/', 1).str\n )\n odds_df_loop['denominator'] = (\n pd\n .to_numeric(odds_df_loop['denominator'], errors='coerce')\n .fillna(0).astype(np.int64)\n )\n # TODO: figure out what this workaround is supposed to be\n odds_df_loop['denominator'] = (\n odds_df_loop['denominator']\n .mask(odds_df_loop['denominator'] == 0)\n .fillna(1)\n ) # workaround\n odds_df_loop['numerator'] = (\n pd\n .to_numeric(odds_df_loop['numerator'], errors='coerce')\n .fillna(0).astype(np.int64)\n )\n odds_df_loop[str(i) + '_imp_prob'] = (\n odds_df_loop['denominator'] / (\n odds_df_loop['denominator']\n + odds_df_loop['numerator']\n )\n ).round(2)\n\n# Concatenate imp_prob columns with 'answer' and 'state' columns\nasdf = [odds_df['answer'], odds_df['state']]\nheaders = [\"answer\", \"state\"]\nas_df = pd.concat(asdf, axis=1, keys=headers)\nodds_imp_prob_df = pd.concat([odds_df_loop, as_df], axis=1)\n\n# Merge PredictIt and odds dataframes together\ndf = pd.merge(predictit_df,\n odds_imp_prob_df,\n on=['state', 'answer'],\n how='left')\n\n# Merge 538 polls into new dataframe\ndf = pd.merge(df, recent_pres_polling, on=['state', 'answer'], how='left')\n\n# Merge 538 models into new dataframe\ndf = pd.merge(df, pres_model, on=['state', 'answer'], how='left')\n\n# workaround to fix previous workaround\n# TODO: wat @ the above\n# TODO: names\nfor i in odds_df_columns:\n mask = df[i].isnull()\n column_name = str(i) + '_imp_prob'\n df.loc[mask, column_name] = np.nan\n\n# Find average of all implied probabilities\nm = df.loc[:, df.columns.str.contains('_imp_prob')]\nodds_df_columns2 = list(m.columns.values)\ndf['ari_mean_imp_prob'] = df[odds_df_columns2].mean(1).round(2)\n\n# Sort alphabetically by state and answer\ndf = df.sort_values([\"state\", \"answer\"])\n\n# Create column matching Trump Yes cost with Biden No cost, and vice versa\ntrump = (df['answer'] == 'Trump')\ndf.loc[trump, 'PredictIt_Oppo_No'] = df.loc[\n df['answer'] == 'Biden', 'bestBuyNoCost'\n].values\nbiden = (df['answer'] == 'Biden')\ndf.loc[biden, 'PredictIt_Oppo_No'] = df.loc[\n df['answer'] == 'Trump', 'bestBuyNoCost'\n].values\n\n# Create column of difference in betting odds & PredictIt\ndf['ari_mean_imp_prob-PredictIt_Yes'] = (\n df['ari_mean_imp_prob']-df['PredictIt_Yes']\n).round(2)\n\n# Create column of difference in 538 & PredictIt\ndf['538-PredictIt_Yes'] = (df['538_model']-df['PredictIt_Yes']).round(2)\n\n# Create column of difference in 538 & betting odds\ndf['538-ari_mean_imp_prob'] = (\n df['538_model']-df['ari_mean_imp_prob']\n).round(2)\n\n# Create column of difference in 538 & Economist\ndf['538-Econ'] = (df['538_model']-df['Econ_model']).round(2)\n\n# Print out select columns\nprint(df[['state',\n 'answer',\n '538_latest_poll',\n '538_poll_date',\n '538_model',\n 'Econ_model',\n 'PredictIt_Yes',\n 'PredictIt_Oppo_No',\n 'ari_mean_imp_prob',\n 'ari_mean_imp_prob-PredictIt_Yes',\n '538-PredictIt_Yes',\n '538-ari_mean_imp_prob',\n '538-Econ']]\n )\n" ]
[ [ "pandas.merge", "pandas.read_csv", "pandas.to_datetime", "pandas.concat", "pandas.DataFrame", "pandas.to_numeric" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
cutz-j/CS231n
[ "f8616b19a9c6ad4a8c88610a6bf3576450a25054" ]
[ "assignment2/cs231n/data_utils.py" ]
[ "import pickle\nimport numpy as np\nimport os\nfrom imageio import imread\nDIR_CS231n = '/Users/thorey/Documents/MLearning/CS231/assignment2/'\n\n\ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb') as f:\n datadict = pickle.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\n\ndef load_CIFAR10(ROOT):\n \"\"\" load all of cifar \"\"\"\n xs = []\n ys = []\n for b in range(1, 6):\n f = os.path.join(ROOT, 'data_batch_%d' % (b, ))\n X, Y = load_CIFAR_batch(f)\n xs.append(X)\n ys.append(Y)\n Xtr = np.concatenate(xs)\n Ytr = np.concatenate(ys)\n del X, Y\n Xte, Yte = load_CIFAR_batch(os.path.join(ROOT, 'test_batch'))\n return Xtr, Ytr, Xte, Yte\n\n\ndef get_CIFAR10_data(dir_path, num_training=49000, num_validation=1000, num_test=1000):\n \"\"\"\n Load the CIFAR-10 dataset from disk and perform preprocessing to prepare\n it for classifiers. These are the same steps as we used for the SVM, but\n condensed to a single function.\n \"\"\"\n # Load the raw CIFAR-10 data\n cifar10_dir = os.path.join(dir_path, 'cs231n/datasets/cifar-10-batches-py')\n X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)\n\n # Subsample the data\n mask = range(num_training, num_training + num_validation)\n X_val = X_train[mask]\n y_val = y_train[mask]\n mask = range(num_training)\n X_train = X_train[mask]\n y_train = y_train[mask]\n mask = range(num_test)\n X_test = X_test[mask]\n y_test = y_test[mask]\n\n # Normalize the data: subtract the mean image\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_val -= mean_image\n X_test -= mean_image\n\n # Transpose so that channels come first\n X_train = X_train.transpose(0, 3, 1, 2).copy()\n X_val = X_val.transpose(0, 3, 1, 2).copy()\n X_test = X_test.transpose(0, 3, 1, 2).copy()\n\n # Package data into a dictionary\n return {\n 'X_train': X_train, 'y_train': y_train,\n 'X_val': X_val, 'y_val': y_val,\n 'X_test': X_test, 'y_test': y_test,\n }\n\n\ndef load_tiny_imagenet(path, dtype=np.float32):\n \"\"\"\n Load TinyImageNet. Each of TinyImageNet-100-A, TinyImageNet-100-B, and\n TinyImageNet-200 have the same directory structure, so this can be used\n to load any of them.\n\n Inputs:\n - path: String giving path to the directory to load.\n - dtype: numpy datatype used to load the data.\n\n Returns: A tuple of\n - class_names: A list where class_names[i] is a list of strings giving the\n WordNet names for class i in the loaded dataset.\n - X_train: (N_tr, 3, 64, 64) array of training images\n - y_train: (N_tr,) array of training labels\n - X_val: (N_val, 3, 64, 64) array of validation images\n - y_val: (N_val,) array of validation labels\n - X_test: (N_test, 3, 64, 64) array of testing images.\n - y_test: (N_test,) array of test labels; if test labels are not available\n (such as in student code) then y_test will be None.\n \"\"\"\n # First load wnids\n with open(os.path.join(path, 'wnids.txt'), 'r') as f:\n wnids = [x.strip() for x in f]\n\n # Map wnids to integer labels\n wnid_to_label = {wnid: i for i, wnid in enumerate(wnids)}\n\n # Use words.txt to get names for each class\n with open(os.path.join(path, 'words.txt'), 'r') as f:\n wnid_to_words = dict(line.split('\\t') for line in f)\n for wnid, words in wnid_to_words.iteritems():\n wnid_to_words[wnid] = [w.strip() for w in words.split(',')]\n class_names = [wnid_to_words[wnid] for wnid in wnids]\n\n # Next load training data.\n X_train = []\n y_train = []\n for i, wnid in enumerate(wnids):\n if (i + 1) % 20 == 0:\n print ('loading training data for synset %d / %d' % (i + 1, len(wnids)))\n # To figure out the filenames we need to open the boxes file\n boxes_file = os.path.join(path, 'train', wnid, '%s_boxes.txt' % wnid)\n with open(boxes_file, 'r') as f:\n filenames = [x.split('\\t')[0] for x in f]\n num_images = len(filenames)\n\n X_train_block = np.zeros((num_images, 3, 64, 64), dtype=dtype)\n y_train_block = wnid_to_label[wnid] * \\\n np.ones(num_images, dtype=np.int64)\n for j, img_file in enumerate(filenames):\n img_file = os.path.join(path, 'train', wnid, 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n # grayscale file\n img.shape = (64, 64, 1)\n X_train_block[j] = img.transpose(2, 0, 1)\n X_train.append(X_train_block)\n y_train.append(y_train_block)\n\n # We need to concatenate all training data\n X_train = np.concatenate(X_train, axis=0)\n y_train = np.concatenate(y_train, axis=0)\n\n # Next load validation data\n with open(os.path.join(path, 'val', 'val_annotations.txt'), 'r') as f:\n img_files = []\n val_wnids = []\n for line in f:\n img_file, wnid = line.split('\\t')[:2]\n img_files.append(img_file)\n val_wnids.append(wnid)\n num_val = len(img_files)\n y_val = np.array([wnid_to_label[wnid] for wnid in val_wnids])\n X_val = np.zeros((num_val, 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'val', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_val[i] = img.transpose(2, 0, 1)\n\n # Next load test images\n # Students won't have test labels, so we need to iterate over files in the\n # images directory.\n img_files = os.listdir(os.path.join(path, 'test', 'images'))\n X_test = np.zeros((len(img_files), 3, 64, 64), dtype=dtype)\n for i, img_file in enumerate(img_files):\n img_file = os.path.join(path, 'test', 'images', img_file)\n img = imread(img_file)\n if img.ndim == 2:\n img.shape = (64, 64, 1)\n X_test[i] = img.transpose(2, 0, 1)\n\n y_test = None\n y_test_file = os.path.join(path, 'test', 'test_annotations.txt')\n if os.path.isfile(y_test_file):\n with open(y_test_file, 'r') as f:\n img_file_to_wnid = {}\n for line in f:\n line = line.split('\\t')\n img_file_to_wnid[line[0]] = line[1]\n y_test = [wnid_to_label[img_file_to_wnid[img_file]]\n for img_file in img_files]\n y_test = np.array(y_test)\n\n return class_names, X_train, y_train, X_val, y_val, X_test, y_test\n\n\ndef load_models(models_dir):\n \"\"\"\n Load saved models from disk. This will attempt to unpickle all files in a\n directory; any files that give errors on unpickling (such as README.txt) will\n be skipped.\n\n Inputs:\n - models_dir: String giving the path to a directory containing model files.\n Each model file is a pickled dictionary with a 'model' field.\n\n Returns:\n A dictionary mapping model file names to models.\n \"\"\"\n models = {}\n for model_file in os.listdir(models_dir):\n with open(os.path.join(models_dir, model_file), 'rb') as f:\n try:\n models[model_file] = pickle.load(f)['model']\n except pickle.UnpicklingError:\n continue\n return models\n" ]
[ [ "numpy.ones", "numpy.concatenate", "numpy.mean", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Lonely-Troubadour/IoT-Data-Mining
[ "684c201fb6278dd4f443bfed32967505cf87963d" ]
[ "lab2/NaiveBayes.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Naive Bayes python implementation.\n\nHomework of IoT Information processing Lab 2. A simple implementation\nof Naive Bayes algorithm.\n\nExample:\n $ python NaiveBayes.py\n $ python NiaveBayes.py -k num_of_iterations\n $ python NaiveBayes.py -k 25\n\nAuthor: Yongjian Hu\nLicense: MIT License\n\"\"\"\nfrom collections import defaultdict\n\nimport pandas as pd\nimport random\nimport math\nimport array\nimport argparse\n\n\ndef read_file(file_path):\n \"\"\"Read data file from disk.\n\n Args:\n file_path (str): Path to file on disk.\n\n Returns:\n df. The data frame object contains the data set.\n \"\"\"\n col_names = [\"x1\", \"x2\", \"x3\", \"x4\", \"class\"]\n df = pd.read_csv(file_path, names=col_names)\n return df\n\n\ndef bootstrap(data, length):\n \"\"\"Partition the data set to training set and testing set.\n\n Args:\n data (pandas.DataFrame): Data frame that contains the data set.\n length (int): The length of data set.\n\n Return:\n training set and testing set.\n \"\"\"\n index = random.randint(0, length - 1)\n training_set = pd.DataFrame()\n testing_set = pd.DataFrame()\n index_set = set()\n\n # Select training set\n for _ in range(length):\n index_set.add(index)\n row = data.iloc[index]\n training_set = training_set.append(row)\n index = random.randint(0, length - 1)\n\n # Let the remaining to be testing set\n for i in range(length):\n if i not in index_set:\n testing_set = testing_set.append((data.iloc[i]))\n\n return training_set, testing_set\n\n\ndef feature_scaler(x_train, x_test):\n \"\"\"Feature scaler. Standardize the features.\n\n Args:\n x_train (pandas.DataFrame): features of training set.\n x_test (pandas.DataFrame): features of testing set.\n\n Returns:\n Training set and testing set after scaling.\n \"\"\"\n mean = x_train.mean()\n std = x_train.std(ddof=0)\n x_train = (x_train - mean) / std\n x_test = (x_test - mean) / std\n return x_train, x_test\n\n\ndef calc_accuracy(y_test, prediction):\n \"\"\"Accuracy of the prediction.\n\n Args:\n y_test (pandas.DataFrame): Actual classes of test set.\n prediction (list): Predicted classes of test set.\n\n Returns:\n Accuracy of the prediction.\n \"\"\"\n count = 0\n length = len(y_test)\n for i in range(length):\n if prediction[i] == y_test.iloc[i]:\n count += 1\n\n return count / length\n\n\nclass NaiveBayesClassifier:\n \"\"\"Naive Bayes Classifier.\n\n Attributes:\n x_train (pandas.DataFrame): Training set.\n y_train (pandas.DataFrame): Classes of training set.\n feature_num (int): Feature number.\n class_num (int): Class number.\n prob_y (dict): prior probability of each class.\n feature_mean (dict): Mean of each feature of each class.\n feature_std (dict): Standard deviation of each feature of each class.\n length (int): Length of training set..\n \"\"\"\n\n def __init__(self, x_train, y_train, feature_num, class_num):\n \"\"\"Initialize naive Bayes classifier.\n\n Args:\n x_train (pandas.DataFrame): Training set.\n y_train (pandas.DataFrame): Classes of training set.\n feature_num (int): No. of features.\n class_num (int): No. of classes.\n \"\"\"\n self.x_train = x_train\n self.y_train = y_train\n self.feature_num = feature_num\n self.class_num = class_num\n self.prob_y = defaultdict(float)\n self.feature_mean = defaultdict(array.array)\n self.feature_std = defaultdict(array.array)\n self.length = x_train.shape[0]\n\n def train(self):\n \"\"\"Train the Gaussian Naive Bayes model.\n\n Returns:\n Prior probability of each class, \n Mean value and standard deviation of each feature of different classes.\n \"\"\"\n\n # Get Probability(c), prior prob of each class c.\n class_count = self.y_train.groupby(self.y_train).size()\n for class_, count in class_count.items():\n self.prob_y[class_] = round(count / self.length, 6)\n self.prob_y = dict(self.prob_y)\n\n # Get mean and std for each feature of each class.\n feature_sum = defaultdict(array.array)\n feature_mean = defaultdict(array.array)\n feature_std = defaultdict(array.array)\n\n # Initialize array in dict.\n for class_ in self.prob_y.keys():\n feature_sum[class_] = array.array('f', [.0 for _ in range(self.feature_num)])\n feature_mean[class_] = array.array('f', [.0 for _ in range(self.feature_num)])\n feature_std[class_] = array.array('f', [.0 for _ in range(self.feature_num)])\n\n # Sum.\n for i in range(self.length):\n for j in range(self.feature_num):\n feature_sum[self.y_train.iloc[i]][j] += self.x_train.iloc[i, j]\n\n # Mean.\n for class_, count in class_count.items():\n for j in range(self.feature_num):\n feature_mean[class_][j] = feature_sum[class_][j] / count\n\n # Standard deviation.\n for i in range(self.length):\n class_ = self.y_train.iloc[i]\n for j in range(self.feature_num):\n feature_std[class_][j] += (self.x_train.iloc[i, j] - feature_mean[class_][j]) ** 2\n\n for class_, count in class_count.items():\n for j in range(self.feature_num):\n feature_std[class_][j] = (feature_std[class_][j] / count) ** 0.5\n\n self.feature_mean = dict(feature_mean)\n self.feature_std = dict(feature_std)\n\n return self.prob_y, self.feature_mean, self.feature_std\n\n def gaussian_pdf(self, x, mean, std):\n \"\"\"Gaussian distribution, probability density function.\n\n N(x, mu, theta) = ( 1/(2pi)^0.5 theta ) * ( e^-( (x - mu)^2/2 theta^2 ) )\n\n Args:\n x (float): probability.\n mean (float): mean value.\n std (float): standard deviation.\n\n Returns:\n Probability.\n \"\"\"\n prob = math.e ** (-(x - mean) ** 2 / (2 * std ** 2)) / ((2 * math.pi) ** 0.5 * std)\n return prob\n\n def joint_prob(self, test_data):\n \"\"\"Calculate joint probability of likelihood and prior probability.\n\n Args:\n test_data (list): Test data set, contains features of the test data.\n\n Returns:\n Joint probability of each class.\n \"\"\"\n joint_probs = defaultdict(float)\n for class_ in self.prob_y.keys():\n likelihood = 1.0\n\n # Calculate likelihood first.\n for i in range(self.feature_num):\n feature = test_data[i]\n mean = self.feature_mean[class_][i]\n std = self.feature_std[class_][i]\n gaussian_prob = self.gaussian_pdf(feature, mean, std)\n likelihood += gaussian_prob\n\n # Calculate prior_prob * likelihood.\n prior_prob = self.prob_y[class_]\n joint_probs[class_] = prior_prob * likelihood\n return dict(joint_probs)\n\n def get_max(self, test_data):\n \"\"\"Get maximum probability from all joint probabilities,\n and hence predict the class.\n\n Args:\n test_data (list): Test data set, contains features of test data.\n\n Returns:\n Predicted class that has the max probability.\n \"\"\"\n joint_probs = self.joint_prob(test_data)\n max_prob = max(joint_probs, key=joint_probs.get)\n return max_prob\n\n def predict(self, test_set):\n \"\"\"Predict on the give test set.\n\n Args:\n test_set (pandas.DataFrame): Test data set.\n\n Returns:\n List of predictions.\n \"\"\"\n prediction = list()\n for row in test_set.values:\n max_prob = self.get_max(row)\n prediction.append(max_prob)\n return prediction\n\n\ndef bootstrap_accuracy(data_set, k=20):\n \"\"\"Calculate model accuracy using .632 bootstrap.\n\n Args:\n data_set (pandas.DataFrame): Data set.\n k (int): The number of iterations. Default is 20\n\n Returns:\n Accuracy of the model.\n \"\"\"\n acc_sum = 0\n for i in range(k):\n # Partition\n training_set, testing_set = bootstrap(data_set, data_set.shape[0])\n\n # Separates features and classes\n x_train = training_set.iloc[:, 1:5]\n y_train = training_set.iloc[:, 0]\n x_test = testing_set.iloc[:, 1:5]\n y_test = testing_set.iloc[:, 0]\n\n # Feature scaling\n x_train, x_test = feature_scaler(x_train, x_test)\n\n # Train\n classifier = NaiveBayesClassifier(x_train, y_train, 4, 3)\n classifier.train()\n\n # Predict\n prediction_test = classifier.predict(x_test)\n prediction_train = classifier.predict(x_train)\n\n # Accuracy\n acc_test = calc_accuracy(y_test, prediction_test)\n acc_train = calc_accuracy(y_train, prediction_train)\n\n print(\"Iteration \" + str(i) + \": \", end=\"\")\n print(\"Acc_test = \" + str(acc_test) + \", Acc_train = \" + str(acc_train))\n acc_sum += 0.632 * acc_test + 0.368 * acc_train\n\n return acc_sum / k\n\n\nif __name__ == '__main__':\n # parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-k\", help=\"Number of iteration, default 20\", \\\n type=int, default=20)\n args = parser.parse_args()\n\n # Check k value\n if (args.k <= 0):\n raise Exception(\"Invalid k. k should be > 0\", args.k)\n\n # Read file\n df = read_file('Iris.csv')\n\n # Using .632 bootstrap\n accuracy = bootstrap_accuracy(df, args.k)\n\n # Print model accuracy\n print(\"Accuracy \" + str(accuracy))\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
kuy04195/MaskRCNN
[ "44998883fb1c73ed1a78ad9a5abd2f2c1c1a717b" ]
[ "detect.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nsys.path.append(\"Mask_RCNN\")\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport skimage\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport PIL\n\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n# Directory of images to run detection on\nIMAGE_DIR = os.path.join(ROOT_DIR, \"../data/leftImg8bit/test\")\n\n\nclass CityscapeConfig(Config):\n \"\"\"Configuration for training on the cityscape dataset.\n Derives from the base Config class and overrides values specific\n to the cityscape dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"cityscape\"\n\n # We use a GPU with 12GB memory.\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1 # 8\n\n # Number of training steps per epoch\n STEPS_PER_EPOCH = 6000\n\n # Number of validation steRPNps to run at the end of every training epoch.\n VALIDATION_STEPS = 300\n\n # Backbone network architecture\n # Supported values are: resnet50, resnet101\n BACKBONE = \"resnet50\"\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 6 # background + 1 shapes\n\n # Input image resing\n IMAGE_RESIZE_MODE = \"square\"\n IMAGE_MIN_DIM = 800\n IMAGE_MAX_DIM = 1024\n\n # Learning rate and momentum\n LEARNING_RATE = 0.01\n\nconfig = CityscapeConfig()\n\n\nclass InferenceConfig(CityscapeConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n IMAGE_MIN_DIM = 1024\n IMAGE_MAX_DIM = 1024\n\ninference_config = InferenceConfig()\n\n# Recreate the model in inference mode\nmodel = modellib.MaskRCNN(mode=\"inference\",\n config=inference_config,\n model_dir=MODEL_DIR)\n\n# Get path to saved weights\n# Either set a specific path or find last trained weights\n# model_path = os.path.join(ROOT_DIR, \".h5 file name here\")\nmodel_path = model.find_last()[1]\n\n# Load trained weights (fill in path to trained weights here)\n# assert model_path != \"\", \"Provide path to trained weights\"\n# print(\"Loading weights from \", model_path)\nmodel.load_weights(model_path, by_name=True)\n# model.load_weights(COCO_MODEL_PATH, by_name=True)\n\nclass_names = ['BG', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']\n\n# Load random images from the images folder\nfile_names = next(os.walk(IMAGE_DIR))[2]\nrandom_files = np.random.choice(file_names, 2)\nfor random_file in random_files:\n image = skimage.io.imread(os.path.join(IMAGE_DIR, random_file))\n\n # Run detection\n results = model.detect([image], verbose=1)\n r = results[0]\n\n # Visualize results\n visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],\n class_names, r['scores'])\n\n\n" ]
[ [ "numpy.random.choice" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fmeirinhos/pytorch-hessianfree
[ "7136c79381b1aca8d37ae8866fe185dc938f0c1e" ]
[ "hessianfree.py" ]
[ "import torch\nfrom torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector\nfrom functools import reduce\n\n\nclass HessianFree(torch.optim.Optimizer):\n \"\"\"\n Implements the Hessian-free algorithm presented in `Training Deep and\n Recurrent Networks with Hessian-Free Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1)\n delta_decay (float, optional): Decay of the previous result of\n computing delta with conjugate gradient method for the\n initialization of the next conjugate gradient iteration\n damping (float, optional): Initial value of the Tikhonov damping\n coefficient. (default: 0.5)\n max_iter (int, optional): Maximum number of Conjugate-Gradient\n iterations (default: 50)\n use_gnm (bool, optional): Use the generalized Gauss-Newton matrix:\n probably solves the indefiniteness of the Hessian (Section 20.6)\n verbose (bool, optional): Print statements (debugging)\n\n .. _Training Deep and Recurrent Networks with Hessian-Free Optimization:\n https://doi.org/10.1007/978-3-642-35289-8_27\n \"\"\"\n\n def __init__(self, params,\n lr=1,\n damping=0.5,\n delta_decay=0.95,\n cg_max_iter=100,\n use_gnm=True,\n verbose=False):\n\n if not (0.0 < lr <= 1):\n raise ValueError(\"Invalid lr: {}\".format(lr))\n\n if not (0.0 < damping <= 1):\n raise ValueError(\"Invalid damping: {}\".format(damping))\n\n if not cg_max_iter > 0:\n raise ValueError(\"Invalid cg_max_iter: {}\".format(cg_max_iter))\n\n defaults = dict(alpha=lr,\n damping=damping,\n delta_decay=delta_decay,\n cg_max_iter=cg_max_iter,\n use_gnm=use_gnm,\n verbose=verbose)\n super(HessianFree, self).__init__(params, defaults)\n\n if len(self.param_groups) != 1:\n raise ValueError(\n \"HessianFree doesn't support per-parameter options (parameter groups)\")\n\n self._params = self.param_groups[0]['params']\n\n def _gather_flat_grad(self):\n views = list()\n for p in self._params:\n if p.grad is None:\n view = p.data.new(p.data.numel()).zero_()\n elif p.grad.data.is_sparse:\n view = p.grad.data.to_dense().view(-1)\n else:\n view = p.grad.contiguous().view(-1)\n views.append(view)\n return torch.cat(views, 0)\n\n def step(self, closure, b=None, M_inv=None):\n \"\"\"\n Performs a single optimization step.\n\n Arguments:\n closure (callable): A closure that re-evaluates the model\n and returns a tuple of the loss and the output.\n b (callable, optional): A closure that calculates the vector b in\n the minimization problem x^T . A . x + x^T b.\n M (callable, optional): The INVERSE preconditioner of A\n \"\"\"\n assert len(self.param_groups) == 1\n\n group = self.param_groups[0]\n alpha = group['alpha']\n delta_decay = group['delta_decay']\n cg_max_iter = group['cg_max_iter']\n damping = group['damping']\n use_gnm = group['use_gnm']\n verbose = group['verbose']\n\n state = self.state[self._params[0]]\n state.setdefault('func_evals', 0)\n state.setdefault('n_iter', 0)\n\n loss_before, output = closure()\n current_evals = 1\n state['func_evals'] += 1\n\n # Gather current parameters and respective gradients\n flat_params = parameters_to_vector(self._params)\n flat_grad = self._gather_flat_grad()\n\n # Define linear operator\n if use_gnm:\n # Generalized Gauss-Newton vector product\n def A(x):\n return self._Gv(loss_before, output, x, damping)\n else:\n # Hessian-vector product\n def A(x):\n return self._Hv(flat_grad, x, damping)\n\n if M_inv is not None:\n m_inv = M_inv()\n\n # Preconditioner recipe (Section 20.13)\n if m_inv.dim() == 1:\n m = (m_inv + damping) ** (-0.85)\n\n def M(x):\n return m * x\n else:\n m = torch.inverse(m_inv + damping * torch.eye(*m_inv.shape))\n\n def M(x):\n return m @ x\n else:\n M = None\n\n b = flat_grad.detach() if b is None else b().detach().flatten()\n\n # Initializing Conjugate-Gradient (Section 20.10)\n if state.get('init_delta') is not None:\n init_delta = delta_decay * state.get('init_delta')\n else:\n init_delta = torch.zeros_like(flat_params)\n\n eps = torch.finfo(b.dtype).eps\n\n # Conjugate-Gradient\n deltas, Ms = self._CG(A=A, b=b.neg(), x0=init_delta,\n M=M, max_iter=cg_max_iter,\n tol=1e1 * eps, eps=eps, martens=True)\n\n # Update parameters\n delta = state['init_delta'] = deltas[-1]\n M = Ms[-1]\n\n vector_to_parameters(flat_params + delta, self._params)\n loss_now = closure()[0]\n current_evals += 1\n state['func_evals'] += 1\n\n # Conjugate-Gradient backtracking (Section 20.8.7)\n if verbose:\n print(\"Loss before CG: {}\".format(float(loss_before)))\n print(\"Loss before BT: {}\".format(float(loss_now)))\n\n for (d, m) in zip(reversed(deltas[:-1][::2]), reversed(Ms[:-1][::2])):\n vector_to_parameters(flat_params + d, self._params)\n loss_prev = closure()[0]\n if float(loss_prev) > float(loss_now):\n break\n delta = d\n M = m\n loss_now = loss_prev\n\n if verbose:\n print(\"Loss after BT: {}\".format(float(loss_now)))\n\n # The Levenberg-Marquardt Heuristic (Section 20.8.5)\n reduction_ratio = (float(loss_now) -\n float(loss_before)) / M if M != 0 else 1\n\n if reduction_ratio < 0.25:\n group['damping'] *= 3 / 2\n elif reduction_ratio > 0.75:\n group['damping'] *= 2 / 3\n if reduction_ratio < 0:\n group['init_delta'] = 0\n\n # Line Searching (Section 20.8.8)\n beta = 0.8\n c = 1e-2\n min_improv = min(c * torch.dot(b, delta), 0)\n\n for _ in range(60):\n if float(loss_now) <= float(loss_before) + alpha * min_improv:\n break\n\n alpha *= beta\n vector_to_parameters(flat_params + alpha * delta, self._params)\n loss_now = closure()[0]\n else: # No good update found\n alpha = 0.0\n loss_now = loss_before\n\n # Update the parameters (this time fo real)\n vector_to_parameters(flat_params + alpha * delta, self._params)\n\n if verbose:\n print(\"Loss after LS: {0} (lr: {1:.3f})\".format(\n float(loss_now), alpha))\n print(\"Tikhonov damping: {0:.3f} (reduction ratio: {1:.3f})\".format(\n group['damping'], reduction_ratio), end='\\n\\n')\n\n return loss_now\n\n def _CG(self, A, b, x0, M=None, max_iter=50, tol=1.2e-6, eps=1.2e-7,\n martens=False):\n \"\"\"\n Minimizes the linear system x^T.A.x - x^T b using the conjugate\n gradient method\n\n Arguments:\n A (callable): An abstract linear operator implementing the\n product A.x. A must represent a hermitian, positive definite\n matrix.\n b (torch.Tensor): The vector b.\n x0 (torch.Tensor): An initial guess for x.\n M (callable, optional): An abstract linear operator implementing\n the product of the preconditioner (for A) matrix with a vector.\n tol (float, optional): Tolerance for convergence.\n martens (bool, optional): Flag for Martens' convergence criterion.\n \"\"\"\n\n x = [x0]\n r = A(x[0]) - b\n\n if M is not None:\n y = M(r)\n p = -y\n else:\n p = -r\n\n res_i_norm = r @ r\n\n if martens:\n m = [0.5 * (r - b) @ x0]\n\n for i in range(max_iter):\n Ap = A(p)\n\n alpha = res_i_norm / ((p @ Ap) + eps)\n\n x.append(x[i] + alpha * p)\n r = r + alpha * Ap\n\n if M is not None:\n y = M(r)\n res_ip1_norm = y @ r\n else:\n res_ip1_norm = r @ r\n\n beta = res_ip1_norm / (res_i_norm + eps)\n res_i_norm = res_ip1_norm\n\n # Martens' Relative Progress stopping condition (Section 20.4)\n if martens:\n m.append(0.5 * A(x[i + 1]) @ x[i + 1] - b @ x[i + 1])\n\n k = max(10, int(i / 10))\n if i > k:\n stop = (m[i] - m[i - k]) / (m[i] + eps)\n if stop < 1e-4:\n break\n\n if res_i_norm < tol or torch.isnan(res_i_norm):\n break\n\n if M is not None:\n p = - y + beta * p\n else:\n p = - r + beta * p\n\n return (x, m) if martens else (x, None)\n\n def _Hv(self, gradient, vec, damping):\n \"\"\"\n Computes the Hessian vector product.\n \"\"\"\n Hv = self._Rop(gradient, self._params, vec)\n\n # Tikhonov damping (Section 20.8.1)\n return Hv.detach() + damping * vec\n\n def _Gv(self, loss, output, vec, damping):\n \"\"\"\n Computes the generalized Gauss-Newton vector product.\n \"\"\"\n Jv = self._Rop(output, self._params, vec)\n\n gradient = torch.autograd.grad(loss, output, create_graph=True)\n HJv = self._Rop(gradient, output, Jv)\n\n JHJv = torch.autograd.grad(\n output, self._params, grad_outputs=HJv.reshape_as(output), retain_graph=True)\n\n # Tikhonov damping (Section 20.8.1)\n return parameters_to_vector(JHJv).detach() + damping * vec\n\n @staticmethod\n def _Rop(y, x, v, create_graph=False):\n \"\"\"\n Computes the product (dy_i/dx_j) v_j: R-operator\n \"\"\"\n if isinstance(y, tuple):\n ws = [torch.zeros_like(y_i, requires_grad=True) for y_i in y]\n else:\n ws = torch.zeros_like(y, requires_grad=True)\n\n jacobian = torch.autograd.grad(\n y, x, grad_outputs=ws, create_graph=True)\n\n Jv = torch.autograd.grad(parameters_to_vector(\n jacobian), ws, grad_outputs=v, create_graph=create_graph)\n\n return parameters_to_vector(Jv)\n\n\n# The empirical Fisher diagonal (Section 20.11.3)\ndef empirical_fisher_diagonal(net, xs, ys, criterion):\n grads = list()\n for (x, y) in zip(xs, ys):\n fi = criterion(net(x), y)\n grads.append(torch.autograd.grad(fi, net.parameters(),\n retain_graph=False))\n\n vec = torch.cat([(torch.stack(p) ** 2).mean(0).detach().flatten()\n for p in zip(*grads)])\n return vec\n\n\n# The empirical Fisher matrix (Section 20.11.3)\ndef empirical_fisher_matrix(net, xs, ys, criterion):\n grads = list()\n for (x, y) in zip(xs, ys):\n fi = criterion(net(x), y)\n grad = torch.autograd.grad(fi, net.parameters(),\n retain_graph=False)\n grads.append(torch.cat([g.detach().flatten() for g in grad]))\n\n grads = torch.stack(grads)\n n_batch = grads.shape[0]\n return torch.einsum('ij,ik->jk', grads, grads) / n_batch\n" ]
[ [ "torch.finfo", "torch.nn.utils.convert_parameters.parameters_to_vector", "torch.isnan", "torch.cat", "torch.einsum", "torch.zeros_like", "torch.eye", "torch.dot", "torch.stack", "torch.autograd.grad", "torch.nn.utils.convert_parameters.vector_to_parameters" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mblnk/pyeventio
[ "072dbd698f3bcd8e60eff0eeddcf828615b28db8" ]
[ "eventio/scripts/plot_hists.py" ]
[ "from eventio import EventIOFile, Histograms\nimport matplotlib.pyplot as plt\nfrom eventio.search_utils import yield_toplevel_of_type\nimport numpy as np\nfrom argparse import ArgumentParser\n\n\nparser = ArgumentParser()\nparser.add_argument('inputfile')\nparser.add_argument(\n '-o', '--output-base',\n help='if given, save plots to `output-base` + \"_{03d}.\" + `format`',\n)\nparser.add_argument(\n '-f', '--format', default='pdf', help='Format to save plots in',\n)\nparser.add_argument(\n '-r', '--dpi', default=300, help='DPI for the output', type=int,\n)\n\n\ndef main():\n args = parser.parse_args()\n inputfile = args.inputfile\n\n hists_read = 0\n with EventIOFile(inputfile) as f:\n for o in yield_toplevel_of_type(f, Histograms):\n hists = o.parse()\n\n for hist in hists:\n hists_read += 1\n plt.figure()\n\n x_bins = np.linspace(\n hist['lower_x'],\n hist['upper_x'],\n hist['n_bins_x'] + 1\n )\n\n if hist['n_bins_y'] > 0:\n y_bins = np.linspace(\n hist['lower_y'],\n hist['upper_y'],\n hist['n_bins_y'] + 1\n )\n\n plt.pcolormesh(x_bins, y_bins, hist['data'])\n\n marginal_x = np.sum(hist['data'], axis=0)\n marginal_y = np.sum(hist['data'], axis=1)\n\n non_zero_x, = np.where(marginal_x != 0)\n plt.xlim(x_bins[non_zero_x[0]], x_bins[non_zero_x[-1] + 1])\n\n non_zero_y, = np.where(marginal_y != 0)\n plt.ylim(y_bins[non_zero_y[0]], y_bins[non_zero_y[-1] + 1])\n plt.colorbar(label='Number of Events')\n\n else:\n centers = 0.5 * (x_bins[:-1] + x_bins[1:])\n plt.hist(centers, bins=x_bins, weights=hist['data'])\n\n non_zero_x, = np.where(hist['data'] != 0)\n plt.xlim(x_bins[non_zero_x[0]], x_bins[non_zero_x[-1] + 1])\n\n plt.title(hist['title'])\n plt.tight_layout()\n\n if not args.output_base:\n plt.show()\n else:\n plt.savefig(\n args.output_base\n + '_{:03d}.'.format(hists_read)\n + args.format,\n dpi=args.dpi,\n )\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.ylim", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.xlim", "matplotlib.pyplot.hist", "matplotlib.pyplot.pcolormesh", "matplotlib.pyplot.show", "numpy.where", "numpy.sum", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wmkouw/da-mrinv
[ "582ffc6fec59cf792e1086c1cd81a05433fd0184" ]
[ "mrainet/demos/demo_mraicnn.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mrainet.mraicnn import MRAIConvolutionalNeuralNetwork\nfrom mrainet.util import extract_all_patches\nfrom mrainet.viz import viz_embedding\n\n'''Load data set'''\n\n# Load source MRI-scan and corresponding segmentation\nX = np.load('./data/subject01_GE2D_1.5T.npy')\nY = np.load('./data/subject01_segmentation.npy')\n\n# Load target MRI-scan and corresponding segmentation\nZ = np.load('./data/subject02_GE2D_3.0T.npy')\nU = np.load('./data/subject02_segmentation.npy')\n\n# Note that U is missing a lot of labels\nprint('Proportion missing labels = ' + str(np.mean(~np.isnan(U.ravel()))))\n\n'''Set up MRAI network'''\n\n# Initialize and compile a small neural network\nN = MRAIConvolutionalNeuralNetwork(patch_size=(31, 31),\n num_kernels=[8],\n kernel_size=[(3, 3)],\n dense_size=[16, 8],\n batch_size=128,\n num_epochs=4,\n num_draw=10,\n margin=10)\n\n'''Train the net'''\n\n# Call training procedure on source and target data\nN.train(X, Y, Z, U, num_targets=1)\n\n'''Map images to MRAI representation.'''\n\n# Extract all source patches and feed them through network.\nPX = extract_all_patches(X[0], patch_size=(31, 31), add_4d=True)\nHX = N.feedforward(PX, scan_ID=0)\n\n# Map label image to sparse array format\nsY = N.matrix2sparse(Y[0])\n\n# Extract all target patches and feed them through network.\nPZ = extract_all_patches(Z[0], patch_size=(31, 31), add_4d=True)\nHZ = N.feedforward(PZ, scan_ID=1)\n\n# Map label image to sparse array format\nsU = N.matrix2sparse(U[0], remove_nans=False)\n\n# Filter out missing target labels\nHZ = HZ[~np.isnan(sU[:, 2]), :]\nsU = sU[~np.isnan(sU[:, 2]), :]\n\n'''Visualize results'''\n\n# Create figure\nfig, ax = plt.subplots(figsize=(15, 10))\n\n# Call visualizer\nviz_embedding(HX, sY[:, 2], marker='o', ax=ax)\nviz_embedding(HZ, sU[:, 2], marker='x', ax=ax)\n" ]
[ [ "numpy.isnan", "numpy.load", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sgg10/arsp_solver_api
[ "ad1d2f52eea58338d4f26128d5130eb326d529fb" ]
[ "app/linear_equations/factorization/partial_lu.py" ]
[ "from app.utils.methods import BaseMethod\nimport numpy as np\nfrom numpy import size, eye, zeros\n\n\nclass PartialLU(BaseMethod):\n def __init__(self, A, b):\n self.A = np.array(A)\n self.b = np.array(b)\n self.n = len(A)\n self.L = eye(self.n)\n self.U = zeros(self.n)\n self.P = eye(self.n)\n self.M = np.array(A)\n\n def proggresive_subst(self, M):\n n = len(M)\n x = zeros([n, 1])\n\n x[0] = M[0, n] / M[0, 0]\n array = [[1]]\n for i in range(1, n):\n aux = np.concatenate((array, np.transpose(x[0:i])), axis=1)\n array_aux = [M[i - 1, n]]\n aux_ = np.concatenate((array_aux, -M[i, 0:i]), axis=0)\n x[i] = np.dot(aux, aux_) / M[i, i]\n return x\n\n def back_subst(self, M):\n n = len(M)\n x = np.ones([n, 1])\n for i in range(n - 1, -1, -1):\n value = 0\n for j in range(i + 1, n):\n value += M[i, j] * x[j]\n x[i] = (M[i, n] - value) / M[i, i]\n return x\n\n def run(self):\n for i in range(1, self.n - 1):\n aux0, aux = max(abs(self.M[i + 1:self.n, i]))\n if aux0 > abs(self.M(i, i)):\n aux2 = self.M[i + aux, i:self.n]\n aux3 = self.P[i + aux, i:self.n]\n self.M[aux + i, i:self.n] = self.M[i, i:self.n]\n self.P[aux + i, :] = self.P[i, :]\n self.M[i, i:self.n] = self.P[i, :]\n self.P[i, :] = aux3\n if i > 1:\n aux4 = self.L[i + aux, 1:i - 1]\n self.L[i + aux, 1:i - 1] = self.L[i, 1:i - 1]\n self.L[i, 1:i - 1] = aux4\n for j in range(i + 1, self.n):\n if self.M[j, i] != 0:\n self.L[j, i] = self.M[j, i] / self.M[i, i]\n self.M[j, i:self.n] = self.M[j, i:self.n] - self.M[j, i] / self.M[i, i] * self.M[i, i:self.n]\n self.U[i, i:self.n] = self.M[i, i:self.n]\n self.U[i + 1, i + 1:self.n] = self.M[i + 1, i + 1:self.n]\n\n return {\"result\": {\"x\": self.back_subst(self.U), \"z\": self.proggresive_subst(self.L)}}\n" ]
[ [ "numpy.dot", "numpy.eye", "numpy.ones", "numpy.concatenate", "numpy.transpose", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RuichunWang/ModelArts-Lab
[ "cfa9a853e3a76a21eac2818f055b36978ac2bb69" ]
[ "official_examples/Using_Hetero_Cluster_Framework_to_train_a_Pong_Player_with_Rllib/learner_algorithm_dir/customize_service.py" ]
[ "import os\r\nimport numpy as np\r\nimport time\r\nimport pickle\r\nimport traceback\r\nimport logging\r\nfrom http.server import SimpleHTTPRequestHandler, HTTPServer\r\nfrom socketserver import ThreadingMixIn\r\nfrom ray.rllib.agents.ppo import PPOTrainer\r\nfrom ray.rllib.env import ExternalEnv\r\nimport sys\r\nimport ray\r\n\r\nsys.path.append(os.path.abspath(os.path.dirname(__file__)))\r\nfrom rl_config import ACTION_SPACE, OBSERVATION_SPACE, CONFIG_PPO, CONFIG_DQN\r\n\r\n\r\nclass ExternalAtari(ExternalEnv):\r\n def __init__(self, config):\r\n ExternalEnv.__init__(self, action_space=ACTION_SPACE, observation_space=OBSERVATION_SPACE)\r\n\r\n def run(self):\r\n print('start to run fake eternal env...')\r\n time.sleep(999999)\r\n\r\n\r\ndef build_bot():\r\n ray.init(local_mode=True)\r\n trainer = PPOTrainer(env=ExternalAtari, config=dict(**CONFIG_PPO))\r\n model_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ckpts')\r\n last_iter = 0\r\n for name in os.listdir(model_dir):\r\n print(name)\r\n it = int(name.split('_')[1])\r\n if it > last_iter:\r\n last_iter = it\r\n print(os.listdir(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ckpts/checkpoint_{}'.format(last_iter))))\r\n trainer.restore(os.path.join(os.path.abspath(os.path.dirname(__file__)),\r\n 'ckpts/checkpoint_{}/checkpoint-{}'.format(last_iter, last_iter)))\r\n return trainer\r\n\r\n\r\nbot = build_bot()\r\n\r\n\r\nclass PolicyServer(ThreadingMixIn, HTTPServer):\r\n def __init__(self, address, port):\r\n handler = _make_handler()\r\n HTTPServer.__init__(self, (address, port), handler)\r\n\r\n\r\ndef _make_handler():\r\n class Handler(SimpleHTTPRequestHandler):\r\n\r\n def do_POST(self):\r\n content_len = int(self.headers.get(\"Content-Length\"), 0)\r\n raw_body = self.rfile.read(content_len)\r\n parsed_input = pickle.loads(raw_body)\r\n obs = np.array(parsed_input['obs'])\r\n\r\n try:\r\n act = bot.compute_action(obs, explore=True)\r\n response = {'action': act}\r\n self.send_response_only(200)\r\n self.end_headers()\r\n self.wfile.write(pickle.dumps(response, protocol=0))\r\n except Exception:\r\n logging.info(\"error\")\r\n self.send_error(500, traceback.format_exc())\r\n\r\n return Handler\r\n\r\n\r\nif __name__ == \"__main__\":\r\n server = PolicyServer(address=\"0.0.0.0\", port=8080)\r\n print(\"server start...\")\r\n server.serve_forever()\r\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vjf/LoopStructural-1
[ "de13fa3f734e21621d3ae93c6d5dab59be4f6a12" ]
[ "LoopStructural/modelling/fault/fault_segment.py" ]
[ "import logging\n\nfrom LoopStructural.modelling.fault.fault_function_feature import FaultDisplacementFeature\nfrom LoopStructural.modelling.fault.fault_function import BaseFault\nlogger = logging.getLogger(__name__)\nfrom concurrent.futures import ThreadPoolExecutor\nimport numpy as np\n\n\nclass FaultSegment:\n \"\"\"\n Class for representing a slip event of a fault\n \"\"\"\n\n def __init__(self, faultframe,\n faultfunction = None,\n steps = 3,\n displacement=1.,\n **kwargs):\n \"\"\"\n A slip event of a fault\n\n Parameters\n ----------\n faultframe : FaultFrame\n the fault frame defining the faut geometry\n faultfunction : function/lambda function\n optional displacement function for spatially variable fault displacement\n steps : int\n how many integration steps for faults\n kwargs\n \"\"\"\n self.faultframe = faultframe\n self.type = 'fault'\n self.name = kwargs.get('name', self.faultframe.name)\n self.displacement = displacement\n self.faultfunction = faultfunction\n if faultfunction == 'BaseFault':\n self.faultfunction = BaseFault.fault_displacement\n self.steps = steps\n self.regions = []\n self.faults_enabled = True\n self.displacementfeature = None\n self.model = None\n if self.faultframe is not None:\n self.displacementfeature = FaultDisplacementFeature(\n self.faultframe, self.faultfunction, name = self.name)\n\n def __getitem__(self, item):\n \"\"\"\n\n Parameters\n ----------\n item\n\n Returns\n -------\n\n \"\"\"\n return self.faultframe[item]\n\n def set_model(self, model):\n \"\"\"\n Link a geological model to the feature\n\n Parameters\n ----------\n model - GeologicalModel\n\n Returns\n -------\n\n \"\"\"\n self.model = model\n\n def set_displacement(self, displacement, scale = True):\n \"\"\"\n Set the fault displacement to a new value\n\n Parameters\n ----------\n displacement - double\n scale - boolean\n\n Returns\n -------\n\n \"\"\"\n if scale and self.model is not None:\n self.displacement = displacement / self.model.scale_factor\n elif not scale:\n self.displacement = displacement\n else:\n logger.warning(\"Displacement not updated\")\n\n def toggle_faults(self):\n \"\"\"\n Toggle faults that affect this fault segment\n\n Returns\n -------\n\n \"\"\"\n self.faults_enabled = ~self.faults_enabled\n for i in range(3):\n self.faultframe[i].toggle_faults()\n\n def add_region(self, region):\n \"\"\"\n\n Parameters\n ----------\n region : boolean function(x,y,z)\n A function that returns true if inside region, false if outside\n can be passed as a lambda function e.g.\n lambda pos : feature.evaluate_value(pos) > 0\n Returns\n -------\n\n \"\"\"\n\n self.regions.append(region)\n\n def evaluate(self, locations):\n \"\"\"\n Evaluate which side of fault\n\n Parameters\n ----------\n locations numpy array\n location to evaluate\n\n Returns\n -------\n boolean array true if on hanging wall, false if on footwall\n\n \"\"\"\n\n return self.faultframe.features[0].evaluate_value(locations) > 0\n\n def inside_volume(self,locations):\n v = self.faultframe.evaluate_value(locations)\n return np.all(np.logical_and(v > -1,v<1),axis=1)\n\n def evaluate_value(self, locations):\n \"\"\"\n Return the value of the fault surface scalar field\n\n Parameters\n ----------\n locations - numpy array\n location to evaluate scalar field\n\n Returns\n -------\n\n \"\"\"\n v = np.zeros(locations.shape[0])\n v[:] = np.nan\n mask = np.zeros(locations.shape[0]).astype(bool)\n mask[:] = True\n # check regions\n for r in self.regions:\n mask = np.logical_and(mask, r(locations))\n return self.faultframe[0].evaluate_value(locations[mask, :])\n\n def mean(self):\n return self.faultframe[0].mean()\n\n def max(self):\n return self.faultframe[0].max()\n\n def min(self):\n return self.faultframe[0].min()\n\n def evaluate_gradient(self, locations):\n \"\"\"\n Return the fault slip direction at the location\n\n Parameters\n ----------\n locations - numpy array Nx3\n\n\n Returns\n -------\n\n \"\"\"\n v = np.zeros(locations.shape[0])\n v[:] = np.nan\n mask = np.zeros(locations.shape[0]).astype(bool)\n mask[:] = True\n # check regions\n for r in self.regions:\n mask = np.logical_and(mask, r(locations))\n # need to scale with fault displacement\n return self.faultframe[1].evaluate_gradient(locations[mask, :])\n\n def evaluate_displacement(self, points):\n newp = np.copy(points).astype(float)\n # evaluate fault function for all points then define mask for only points affected by fault\n with ThreadPoolExecutor(max_workers=8) as executor:\n # all of these operations should be independent so just run as different threads\n gx_future = executor.submit(self.faultframe.features[0].evaluate_value, newp)\n gy_future = executor.submit(self.faultframe.features[1].evaluate_value, newp)\n gz_future = executor.submit(self.faultframe.features[2].evaluate_value, newp)\n gx = gx_future.result()\n gy = gy_future.result()\n gz = gz_future.result()\n d = np.zeros(gx.shape)\n mask = np.logical_and(~np.isnan(gx),~np.isnan(gy))\n mask = np.logical_and(mask,~np.isnan(gz))\n d[~mask] = 0\n gx_mask = np.zeros_like(mask,dtype=bool)\n gx_mask[mask] = gx[mask] > 0\n d[gx_mask] = 1.\n if self.faultfunction is not None:\n d[mask] = self.faultfunction(gx[mask], gy[mask], gz[mask])\n return d\n def apply_to_points(self, points):\n \"\"\"\n Unfault the array of points\n\n Parameters\n ----------\n points - numpy array Nx3\n\n Returns\n -------\n\n \"\"\"\n steps = self.steps\n newp = np.copy(points).astype(float)\n # evaluate fault function for all points then define mask for only points affected by fault\n with ThreadPoolExecutor(max_workers=8) as executor:\n # all of these operations should be independent so just run as different threads\n gx_future = executor.submit(self.faultframe.features[0].evaluate_value, newp)\n gy_future = executor.submit(self.faultframe.features[1].evaluate_value, newp)\n gz_future = executor.submit(self.faultframe.features[2].evaluate_value, newp)\n gx = gx_future.result()\n gy = gy_future.result()\n gz = gz_future.result()\n d = np.zeros(gx.shape)\n mask = np.logical_and(~np.isnan(gx),~np.isnan(gy))\n mask = np.logical_and(mask,~np.isnan(gz))\n d[~mask] = 0\n gx_mask = np.zeros_like(mask,dtype=bool)\n gx_mask[mask] = gx[mask] > 0\n d[gx_mask] = 1.\n if self.faultfunction is not None:\n d[mask] = self.faultfunction(gx[mask], gy[mask], gz[mask])\n mask = np.abs(d) > 0.\n\n d *= self.displacement\n # calculate the fault frame for the evaluation points\n for i in range(steps):\n with ThreadPoolExecutor(max_workers=8) as executor:\n # all of these operations should be independent so just run as different threads\n gx_future = executor.submit(self.faultframe.features[0].evaluate_value, newp[mask, :])\n g_future = executor.submit(self.faultframe.features[1].evaluate_gradient, newp[mask, :])\n gy_future = executor.submit(self.faultframe.features[1].evaluate_value, newp[mask, :])\n gz_future = executor.submit(self.faultframe.features[2].evaluate_value, newp[mask, :])\n gx = gx_future.result()\n g = g_future.result()\n gy = gy_future.result()\n gz = gz_future.result()\n # # get the fault frame val/grad for the points\n # determine displacement magnitude, for constant displacement\n # hanging wall should be > 0\n d = np.zeros(gx.shape)\n mask2 = np.logical_and(~np.isnan(gx), ~np.isnan(gy))\n mask2 = np.logical_and(mask2, ~np.isnan(gz))\n d[~mask2] = 0\n gx_mask2 = np.zeros_like(mask2,dtype=bool)\n gx_mask2[mask2] = gx[mask2] > 0\n # d[~np.isnan(gx)][gx[~np.isnan(gx)]>0] = 1\n d[gx_mask2] = 1.\n # d[mask2][gx[mask2] < 0] = 0.\n # d[gx < 0] = 0.\n if self.faultfunction is not None:\n d[mask2] = self.faultfunction(gx[mask2], gy[mask2], gz[mask2])\n d *= self.displacement\n # normalise when length is >0\n g_mag = np.zeros(g.shape[0])\n g_mag[mask2] = np.linalg.norm(g[mask2], axis=1)\n # g_mag = np.linalg.norm(g[mask2], axis=1)\n g[g_mag > 0.] /= g_mag[g_mag > 0, None]\n # multiply displacement vector by the displacement magnitude for\n # step\n g *= (1. / steps) * d[:, None]\n\n # apply displacement\n newp[mask, :] += g\n return newp\n\n" ]
[ [ "numpy.abs", "numpy.isnan", "numpy.linalg.norm", "numpy.copy", "numpy.zeros_like", "numpy.logical_and", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
goodenou/decisionengine_modules
[ "cf4311949d3c5fd991d1c5c4a6a190ba53a2eb71", "cf4311949d3c5fd991d1c5c4a6a190ba53a2eb71" ]
[ "src/decisionengine_modules/glideinwms/resource_dist_plugins.py", "src/decisionengine_modules/GCE/sources/GCEInstancePerformance.py" ]
[ "import pandas as pd\n\n_RESOURCE_FROM_COLUMN_MAP = {\n \"Grid_Figure_Of_Merit\": \"Grid_Figure_Of_Merit\",\n \"GCE_Figure_Of_Merit\": \"FigureOfMerit\",\n \"AWS_Figure_Of_Merit\": \"AWS_Figure_Of_Merit\",\n \"Nersc_Figure_Of_Merit\": \"FigureOfMerit\",\n}\n\n\ndef order_resources(resources, logger=None):\n ordered_resources = []\n rss_foms = pd.DataFrame()\n\n for rss, column_name in _RESOURCE_FROM_COLUMN_MAP.items():\n fom_df = resources.get(rss)\n if logger is not None:\n logger.info(f\"Ordering resources based on {rss}\")\n if (fom_df is not None) and (fom_df.empty is False):\n # Create a new dataframe with just EntryName and FOM\n df = fom_df[[\"EntryName\", column_name]]\n # Rename the entry type specific FOM columns to just 'fom'\n df = df.rename(columns={column_name: \"FOM\"})\n # Append the results\n rss_foms = rss_foms.append(df)\n elif logger is not None:\n logger.info(f\"{rss} does not have any entries to order\")\n try:\n ordered_resources = rss_foms.sort_values(by=[\"FOM\", \"EntryName\"], ascending=True).reset_index(drop=True)\n except KeyError:\n if logger is not None:\n logger.exception(\n f'Unable to find Figure of Merrit \"FOM\" in the dataframe columns {list(resources.columns)}'\n )\n return ordered_resources\n\n\ndef fom_eligible_resources(resources, constraint=None, limit=None, logger=None):\n ordered_resources = order_resources(resources, logger)\n if constraint is None:\n return ordered_resources.head(limit)\n return ordered_resources.query(constraint).head(limit)\n", "\"\"\"\nThis source takes input from instance_performance_gce.csv\nand adds it to data block\n\"\"\"\nimport pandas as pd\n\nfrom decisionengine.framework.modules import Source\nfrom decisionengine.framework.modules.Source import Parameter\n\n\[email protected]_config(Parameter(\"csv_file\", type=str, comment=\"path to CSV file\"))\[email protected](GCE_Instance_Performance=pd.DataFrame)\nclass GCEInstancePerformance(Source.Source):\n def __init__(self, config):\n super().__init__(config)\n self.csv_file = config.get(\"csv_file\")\n if not self.csv_file:\n raise RuntimeError(\"No csv file found in configuration\")\n\n def acquire(self):\n self.logger.debug(\"in GCEInstancePerformance acquire\")\n return {\"GCE_Instance_Performance\": pd.read_csv(self.csv_file)}\n\n\nSource.describe(GCEInstancePerformance)\n" ]
[ [ "pandas.DataFrame" ], [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
zhiiker/distribuuuu
[ "eaaa9229ab66a1c5f24c2a07aced21adf9af7895" ]
[ "tutorial/imagenet.py" ]
[ "\"\"\"\n(MNMC) Multiple Nodes Multi-GPU Cards Training\nMinimal ImageNet training code powered by DDP\n\"\"\"\n\nimport os\nimport subprocess\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nBATCH_SIZE = 256\nEPOCHS = 1\nIMAGE_DIR = \"./data/ILSVRC/\"\n\n\ndef setup_distributed(backend=\"nccl\", port=None):\n \"\"\"\n Initialize distributed training environment.\n support both slurm and torch.distributed.launch\n \"\"\"\n num_gpus = torch.cuda.device_count()\n\n if \"SLURM_JOB_ID\" in os.environ:\n rank = int(os.environ[\"SLURM_PROCID\"])\n world_size = int(os.environ[\"SLURM_NTASKS\"])\n node_list = os.environ[\"SLURM_NODELIST\"]\n addr = subprocess.getoutput(f\"scontrol show hostname {node_list} | head -n1\")\n # specify master port\n if port is not None:\n os.environ[\"MASTER_PORT\"] = str(port)\n elif \"MASTER_PORT\" in os.environ:\n pass # use MASTER_PORT in the environment variable\n else:\n os.environ[\"MASTER_PORT\"] = \"29500\"\n if \"MASTER_ADDR\" not in os.environ:\n os.environ[\"MASTER_ADDR\"] = addr\n os.environ[\"WORLD_SIZE\"] = str(world_size)\n os.environ[\"LOCAL_RANK\"] = str(rank % num_gpus)\n os.environ[\"RANK\"] = str(rank)\n else:\n rank = int(os.environ[\"RANK\"])\n world_size = int(os.environ[\"WORLD_SIZE\"])\n\n torch.cuda.set_device(rank % num_gpus)\n\n dist.init_process_group(\n backend=backend,\n world_size=world_size,\n rank=rank,\n )\n\n\nif __name__ == \"__main__\":\n\n # 0. set up distributed device\n setup_distributed()\n\n rank = int(os.environ[\"RANK\"])\n local_rank = int(os.environ[\"LOCAL_RANK\"])\n device = torch.device(\"cuda\", local_rank)\n\n print(f\"[init] == local rank: {local_rank}, global rank: {rank} ==\")\n\n # 1. define network\n net = torchvision.models.resnet18(pretrained=False, num_classes=1000)\n net = net.to(device)\n # DistributedDataParallel\n net = DDP(net, device_ids=[local_rank], output_device=local_rank)\n # 2. define dataloader\n traindir = os.path.join(IMAGE_DIR, \"train\")\n trainset = torchvision.datasets.ImageFolder(\n root=traindir,\n transform=transforms.Compose(\n [\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n ),\n ]\n ),\n )\n # DistributedSampler\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n trainset, shuffle=True\n )\n train_loader = torch.utils.data.DataLoader(\n trainset,\n batch_size=BATCH_SIZE,\n num_workers=4,\n pin_memory=True,\n sampler=train_sampler,\n )\n\n # 3. define loss and optimizer\n criterion = nn.CrossEntropyLoss().to(device)\n optimizer = torch.optim.SGD(\n net.parameters(),\n lr=0.1,\n momentum=0.9,\n weight_decay=0.0001,\n nesterov=True,\n )\n\n if rank == 0:\n print(\" ======= Training ======= \\n\")\n\n # 4. start to train\n net.train()\n for ep in range(0, EPOCHS):\n train_loss = correct = total = 0\n # set sampler\n train_loader.sampler.set_epoch(ep)\n\n for idx, (inputs, targets) in enumerate(train_loader):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs = net(inputs)\n\n loss = criterion(outputs, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n total += targets.size(0)\n correct += torch.eq(outputs.argmax(dim=1), targets).sum().item()\n\n if rank == 0 and ((idx + 1) % 40 == 0 or (idx + 1) == len(train_loader)):\n print(\n \" == step: [{:3}/{}] [{}/{}] | loss: {:.3f} | acc: {:6.3f}%\".format(\n idx + 1,\n len(train_loader),\n ep,\n EPOCHS,\n train_loss / (idx + 1),\n 100.0 * correct / total,\n )\n )\n\n # 5. save model (only in rank0)\n checkpoint_file = \"./ckpt.pth.tar\"\n if rank == 0:\n checkpoint = {\n \"state_dict\": net.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n torch.save(checkpoint, checkpoint_file)\n print(f\"(rank: {rank}) == Saved: {checkpoint_file}\")\n\n # 6. load model (all ranks)\n # use a barrier() to make sure that other ranks loads the model after rank0 saves it.\n # see https://github.com/pytorch/examples/blob/master/distributed/ddp/main.py\n dist.barrier()\n map_location = f\"cuda:{local_rank}\"\n # map model to be loaded to specified single gpu.\n checkpoint = torch.load(checkpoint_file, map_location=map_location)\n net.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n print(f\"(rank: {rank}) == Loaded: {checkpoint_file}\")\n\n # method 2: net.module & map_location=\"cpu\"\n # checkpoint_file = \"./ckpt.pth.tar\"\n # if rank == 0:\n # checkpoint = {\n # \"state_dict\": net.module.state_dict(),\n # \"optimizer\": optimizer.state_dict(),\n # }\n # torch.save(checkpoint, checkpoint_file)\n # print(f\"(rank: {rank}) == Saved: {checkpoint_file}\")\n # dist.barrier()\n # # map model to be loaded to specified single gpu.\n # checkpoint = torch.load(checkpoint_file, map_location=\"cpu\")\n # net.module.load_state_dict(checkpoint[\"state_dict\"])\n # optimizer.load_state_dict(checkpoint[\"optimizer\"])\n # print(f\"(rank: {rank}) == Loaded: {checkpoint_file}\")\n\n\"\"\"\ndistributed.launch example: \n 8GPUs (batch size: 2048)\n 128k / (256*8) -> 626 itertaion\n\n>>> python -m torch.distributed.launch \\\n --nproc_per_node=8 \\\n --nnodes=1 \\\n --node_rank=0 \\\n --master_addr=localhost \\\n --master_port=22222 \\\n imagenet.py\n\n[init] == local rank: 0, global rank: 0 ==\n[init] == local rank: 2, global rank: 2 ==\n[init] == local rank: 6, global rank: 6 ==\n[init] == local rank: 5, global rank: 5 ==\n[init] == local rank: 7, global rank: 7 ==\n[init] == local rank: 4, global rank: 4 ==\n[init] == local rank: 3, global rank: 3 ==\n[init] == local rank: 1, global rank: 1 ==\n\n ======= Training ======= \n == step: [ 40/626] [0/1] | loss: 6.821 | acc: 0.498%\n == step: [ 80/626] [0/1] | loss: 6.616 | acc: 0.869%\n == step: [120/626] [0/1] | loss: 6.448 | acc: 1.351%\n == step: [160/626] [0/1] | loss: 6.294 | acc: 1.868%\n == step: [200/626] [0/1] | loss: 6.167 | acc: 2.443%\n == step: [240/626] [0/1] | loss: 6.051 | acc: 3.003%\n == step: [280/626] [0/1] | loss: 5.952 | acc: 3.457%\n == step: [320/626] [0/1] | loss: 5.860 | acc: 3.983%\n == step: [360/626] [0/1] | loss: 5.778 | acc: 4.492%\n == step: [400/626] [0/1] | loss: 5.700 | acc: 4.960%\n == step: [440/626] [0/1] | loss: 5.627 | acc: 5.488%\n == step: [480/626] [0/1] | loss: 5.559 | acc: 6.013%\n == step: [520/626] [0/1] | loss: 5.495 | acc: 6.520%\n == step: [560/626] [0/1] | loss: 5.429 | acc: 7.117%\n == step: [600/626] [0/1] | loss: 5.371 | acc: 7.580%\n == step: [626/626] [0/1] | loss: 5.332 | acc: 7.907%\n\n(rank: 0) == Saved: ./ckpt.pth.tar\n(rank: 0) == Loaded: ./ckpt.pth.tar\n(rank: 1) == Loaded: ./ckpt.pth.tar\n(rank: 6) == Loaded: ./ckpt.pth.tar\n(rank: 7) == Loaded: ./ckpt.pth.tar\n(rank: 4) == Loaded: ./ckpt.pth.tar\n(rank: 5) == Loaded: ./ckpt.pth.tar\n(rank: 3) == Loaded: ./ckpt.pth.tar\n(rank: 2) == Loaded: ./ckpt.pth.tar\n\n\nslurm example: \n 32GPUs (batch size: 8192)\n 128k / (256*32) -> 157 itertaion\n>>> srun --partition=openai -n32 --gres=gpu:8 --ntasks-per-node=8 --job-name=slrum_test \\\n python -u imagenet.py\n\n[init] == local rank: 7, global rank: 7 ==\n[init] == local rank: 1, global rank: 1 ==\n[init] == local rank: 4, global rank: 4 ==\n[init] == local rank: 2, global rank: 2 ==\n[init] == local rank: 6, global rank: 6 ==\n[init] == local rank: 3, global rank: 3 ==\n[init] == local rank: 5, global rank: 5 ==\n[init] == local rank: 4, global rank: 12 ==\n[init] == local rank: 1, global rank: 25 ==\n[init] == local rank: 5, global rank: 13 ==\n[init] == local rank: 6, global rank: 14 ==\n[init] == local rank: 0, global rank: 8 ==\n[init] == local rank: 1, global rank: 9 ==\n[init] == local rank: 2, global rank: 10 ==\n[init] == local rank: 3, global rank: 11 ==\n[init] == local rank: 7, global rank: 15 ==\n[init] == local rank: 5, global rank: 29 ==\n[init] == local rank: 2, global rank: 26 ==\n[init] == local rank: 3, global rank: 27 ==\n[init] == local rank: 0, global rank: 24 ==\n[init] == local rank: 7, global rank: 31 ==\n[init] == local rank: 6, global rank: 30 ==\n[init] == local rank: 4, global rank: 28 ==\n[init] == local rank: 0, global rank: 16 ==\n[init] == local rank: 5, global rank: 21 ==\n[init] == local rank: 7, global rank: 23 ==\n[init] == local rank: 1, global rank: 17 ==\n[init] == local rank: 6, global rank: 22 ==\n[init] == local rank: 3, global rank: 19 ==\n[init] == local rank: 2, global rank: 18 ==\n[init] == local rank: 4, global rank: 20 ==\n[init] == local rank: 0, global rank: 0 ==\n ======= Training ======= \n\n == step: [ 40/157] [0/1] | loss: 6.781 | acc: 0.703%\n == step: [ 80/157] [0/1] | loss: 6.536 | acc: 1.260%\n == step: [120/157] [0/1] | loss: 6.353 | acc: 1.875%\n == step: [157/157] [0/1] | loss: 6.207 | acc: 2.465%\n\n(rank: 0) == Saved: ./ckpt.pth.tar\n(rank: 0) == Loaded: ./ckpt.pth.tar\n(rank: 6) == Loaded: ./ckpt.pth.tar\n(rank: 2) == Loaded: ./ckpt.pth.tar\n(rank: 1) == Loaded: ./ckpt.pth.tar\n(rank: 5) == Loaded: ./ckpt.pth.tar\n(rank: 7) == Loaded: ./ckpt.pth.tar\n(rank: 3) == Loaded: ./ckpt.pth.tar\n(rank: 4) == Loaded: ./ckpt.pth.tar\n(rank: 11) == Loaded: ./ckpt.pth.tar\n(rank: 9) == Loaded: ./ckpt.pth.tar\n(rank: 8) == Loaded: ./ckpt.pth.tar\n(rank: 14) == Loaded: ./ckpt.pth.tar\n(rank: 12) == Loaded: ./ckpt.pth.tar\n(rank: 15) == Loaded: ./ckpt.pth.tar\n(rank: 13) == Loaded: ./ckpt.pth.tar\n(rank: 10) == Loaded: ./ckpt.pth.tar\n(rank: 21) == Loaded: ./ckpt.pth.tar\n(rank: 23) == Loaded: ./ckpt.pth.tar\n(rank: 20) == Loaded: ./ckpt.pth.tar\n(rank: 17) == Loaded: ./ckpt.pth.tar\n(rank: 19) == Loaded: ./ckpt.pth.tar\n(rank: 16) == Loaded: ./ckpt.pth.tar\n(rank: 18) == Loaded: ./ckpt.pth.tar\n(rank: 22) == Loaded: ./ckpt.pth.tar\n(rank: 29) == Loaded: ./ckpt.pth.tar\n(rank: 31) == Loaded: ./ckpt.pth.tar\n(rank: 24) == Loaded: ./ckpt.pth.tar\n(rank: 27) == Loaded: ./ckpt.pth.tar\n(rank: 30) == Loaded: ./ckpt.pth.tar\n(rank: 28) == Loaded: ./ckpt.pth.tar\n(rank: 26) == Loaded: ./ckpt.pth.tar\n(rank: 25) == Loaded: ./ckpt.pth.tar\n\n\"\"\"\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.utils.data.distributed.DistributedSampler", "torch.load", "torch.utils.data.DataLoader", "torch.distributed.barrier", "torch.device", "torch.cuda.device_count", "torch.nn.parallel.DistributedDataParallel", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ClementNguyen/slt
[ "20ee90349d1ed0655b99612ffcfae6d079116db6" ]
[ "signjoey/initialization.py" ]
[ "# coding: utf-8\n\n\"\"\"\nImplements custom initialization\n\"\"\"\n\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.nn.init import _calculate_fan_in_and_fan_out\n\n\ndef orthogonal_rnn_init_(cell: nn.RNNBase, gain: float = 1.0):\n \"\"\"\n Orthogonal initialization of recurrent weights\n RNN parameters contain 3 or 4 matrices in one parameter, so we slice it.\n \"\"\"\n with torch.no_grad():\n for _, hh, _, _ in cell.all_weights:\n for i in range(0, hh.size(0), cell.hidden_size):\n nn.init.orthogonal_(hh.data[i : i + cell.hidden_size], gain=gain)\n\n\ndef lstm_forget_gate_init_(cell: nn.RNNBase, value: float = 1.0) -> None:\n \"\"\"\n Initialize LSTM forget gates with `value`.\n\n :param cell: LSTM cell\n :param value: initial value, default: 1\n \"\"\"\n with torch.no_grad():\n for _, _, ih_b, hh_b in cell.all_weights:\n l = len(ih_b)\n ih_b.data[l // 4 : l // 2].fill_(value)\n hh_b.data[l // 4 : l // 2].fill_(value)\n\n\ndef xavier_uniform_n_(w: Tensor, gain: float = 1.0, n: int = 4) -> None:\n \"\"\"\n Xavier initializer for parameters that combine multiple matrices in one\n parameter for efficiency. This is e.g. used for GRU and LSTM parameters,\n where e.g. all gates are computed at the same time by 1 big matrix.\n\n :param w: parameter\n :param gain: default 1\n :param n: default 4\n \"\"\"\n with torch.no_grad():\n fan_in, fan_out = _calculate_fan_in_and_fan_out(w)\n assert fan_out % n == 0, \"fan_out should be divisible by n\"\n fan_out //= n\n std = gain * math.sqrt(2.0 / (fan_in + fan_out))\n a = math.sqrt(3.0) * std\n nn.init.uniform_(w, -a, a)\n\n\n# pylint: disable=too-many-branches\ndef initialize_model(model: nn.Module, cfg: dict, txt_padding_idx: int) -> None:\n \"\"\"\n This initializes a model based on the provided config.\n\n All initializer configuration is part of the `model` section of the\n configuration file.\n For an example, see e.g. `https://github.com/joeynmt/joeynmt/\n blob/master/configs/iwslt_envi_xnmt.yaml#L47`\n\n The main initializer is set using the `initializer` key.\n Possible values are `xavier`, `uniform`, `normal` or `zeros`.\n (`xavier` is the default).\n\n When an initializer is set to `uniform`, then `init_weight` sets the\n range for the values (-init_weight, init_weight).\n\n When an initializer is set to `normal`, then `init_weight` sets the\n standard deviation for the weights (with mean 0).\n\n The word embedding initializer is set using `embed_initializer` and takes\n the same values. The default is `normal` with `embed_init_weight = 0.01`.\n\n Biases are initialized separately using `bias_initializer`.\n The default is `zeros`, but you can use the same initializers as\n the main initializer.\n\n Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization\n (for recurrent matrices). Default is False.\n\n `lstm_forget_gate` controls how the LSTM forget gate is initialized.\n Default is `1`.\n\n :param model: model to initialize\n :param cfg: the model configuration\n :param txt_padding_idx: index of spoken language text padding token\n \"\"\"\n\n # defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal\n gain = float(cfg.get(\"init_gain\", 1.0)) # for xavier\n init = cfg.get(\"initializer\", \"xavier\")\n init_weight = float(cfg.get(\"init_weight\", 0.01))\n\n embed_init = cfg.get(\"embed_initializer\", \"normal\")\n embed_init_weight = float(cfg.get(\"embed_init_weight\", 0.01))\n embed_gain = float(cfg.get(\"embed_init_gain\", 1.0)) # for xavier\n\n bias_init = cfg.get(\"bias_initializer\", \"zeros\")\n bias_init_weight = float(cfg.get(\"bias_init_weight\", 0.01))\n\n # pylint: disable=unnecessary-lambda, no-else-return\n def _parse_init(s, scale, _gain):\n scale = float(scale)\n assert scale > 0.0, \"incorrect init_weight\"\n if s.lower() == \"xavier\":\n return lambda p: nn.init.xavier_uniform_(p, gain=_gain)\n elif s.lower() == \"uniform\":\n return lambda p: nn.init.uniform_(p, a=-scale, b=scale)\n elif s.lower() == \"normal\":\n return lambda p: nn.init.normal_(p, mean=0.0, std=scale)\n elif s.lower() == \"zeros\":\n return lambda p: nn.init.zeros_(p)\n else:\n raise ValueError(\"unknown initializer\")\n\n init_fn_ = _parse_init(init, init_weight, gain)\n embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)\n bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)\n\n with torch.no_grad():\n for name, p in model.named_parameters():\n if \"txt_embed\" in name:\n if \"lut\" in name:\n embed_init_fn_(p)\n\n elif \"bias\" in name:\n bias_init_fn_(p)\n\n elif len(p.size()) > 1:\n\n # RNNs combine multiple matrices is one, which messes up\n # xavier initialization\n if init == \"xavier\" and \"rnn\" in name:\n n = 1\n if \"encoder\" in name:\n n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3\n elif \"decoder\" in name:\n n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3\n xavier_uniform_n_(p.data, gain=gain, n=n)\n else:\n init_fn_(p)\n\n # zero out paddings\n if model.txt_embed is not None:\n model.txt_embed.lut.weight.data[txt_padding_idx].zero_()\n\n orthogonal = cfg.get(\"init_rnn_orthogonal\", False)\n lstm_forget_gate = cfg.get(\"lstm_forget_gate\", 1.0)\n\n # encoder rnn orthogonal initialization & LSTM forget gate\n if hasattr(model.encoder, \"rnn\"):\n\n if orthogonal:\n orthogonal_rnn_init_(model.encoder.rnn)\n\n if isinstance(model.encoder.rnn, nn.LSTM):\n lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)\n\n # decoder rnn orthogonal initialization & LSTM forget gate\n if hasattr(model.decoder, \"rnn\"):\n\n if orthogonal:\n orthogonal_rnn_init_(model.decoder.rnn)\n\n if isinstance(model.decoder.rnn, nn.LSTM):\n lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)\n\n\n\n\ndef initialize_feat_model(model: nn.Module, cfg: dict, txt_padding_idx: int) -> None:\n \"\"\"\n This initializes a model based on the provided config.\n\n All initializer configuration is part of the `model` section of the\n configuration file.\n For an example, see e.g. `https://github.com/joeynmt/joeynmt/\n blob/master/configs/iwslt_envi_xnmt.yaml#L47`\n\n The main initializer is set using the `initializer` key.\n Possible values are `xavier`, `uniform`, `normal` or `zeros`.\n (`xavier` is the default).\n\n When an initializer is set to `uniform`, then `init_weight` sets the\n range for the values (-init_weight, init_weight).\n\n When an initializer is set to `normal`, then `init_weight` sets the\n standard deviation for the weights (with mean 0).\n\n The word embedding initializer is set using `embed_initializer` and takes\n the same values. The default is `normal` with `embed_init_weight = 0.01`.\n\n Biases are initialized separately using `bias_initializer`.\n The default is `zeros`, but you can use the same initializers as\n the main initializer.\n\n Set `init_rnn_orthogonal` to True if you want RNN orthogonal initialization\n (for recurrent matrices). Default is False.\n\n `lstm_forget_gate` controls how the LSTM forget gate is initialized.\n Default is `1`.\n\n :param model: model to initialize\n :param cfg: the model configuration\n :param txt_padding_idx: index of spoken language text padding token\n \"\"\"\n\n # defaults: xavier, embeddings: normal 0.01, biases: zeros, no orthogonal\n gain = float(cfg.get(\"init_gain\", 1.0)) # for xavier\n init = cfg.get(\"initializer\", \"xavier\")\n init_weight = float(cfg.get(\"init_weight\", 0.01))\n\n embed_init = cfg.get(\"embed_initializer\", \"normal\")\n embed_init_weight = float(cfg.get(\"embed_init_weight\", 0.01))\n embed_gain = float(cfg.get(\"embed_init_gain\", 1.0)) # for xavier\n\n bias_init = cfg.get(\"bias_initializer\", \"zeros\")\n bias_init_weight = float(cfg.get(\"bias_init_weight\", 0.01))\n\n # pylint: disable=unnecessary-lambda, no-else-return\n def _parse_init(s, scale, _gain):\n scale = float(scale)\n assert scale > 0.0, \"incorrect init_weight\"\n if s.lower() == \"xavier\":\n return lambda p: nn.init.xavier_uniform_(p, gain=_gain)\n elif s.lower() == \"uniform\":\n return lambda p: nn.init.uniform_(p, a=-scale, b=scale)\n elif s.lower() == \"normal\":\n return lambda p: nn.init.normal_(p, mean=0.0, std=scale)\n elif s.lower() == \"zeros\":\n return lambda p: nn.init.zeros_(p)\n else:\n raise ValueError(\"unknown initializer\")\n\n init_fn_ = _parse_init(init, init_weight, gain)\n embed_init_fn_ = _parse_init(embed_init, embed_init_weight, embed_gain)\n bias_init_fn_ = _parse_init(bias_init, bias_init_weight, gain)\n\n with torch.no_grad():\n for name, p in model.named_parameters():\n if \"txt_embed\" in name:\n if \"lut\" in name:\n embed_init_fn_(p)\n\n elif \"bias\" in name:\n bias_init_fn_(p)\n\n elif len(p.size()) > 1:\n\n # RNNs combine multiple matrices is one, which messes up\n # xavier initialization\n if init == \"xavier\" and \"rnn\" in name:\n n = 1\n if \"encoder\" in name:\n n = 4 if isinstance(model.encoder.rnn, nn.LSTM) else 3\n elif \"decoder\" in name:\n n = 4 if isinstance(model.decoder.rnn, nn.LSTM) else 3\n xavier_uniform_n_(p.data, gain=gain, n=n)\n else:\n init_fn_(p)\n\n # zero out paddings\n if model.txt_embed is not None:\n model.txt_embed.lut.weight.data[txt_padding_idx].zero_()\n\n orthogonal = cfg.get(\"init_rnn_orthogonal\", False)\n lstm_forget_gate = cfg.get(\"lstm_forget_gate\", 1.0)\n\n # # encoder rnn orthogonal initialization & LSTM forget gate\n # if hasattr(model.encoder, \"rnn\"):\n\n # if orthogonal:\n # orthogonal_rnn_init_(model.encoder.rnn)\n\n # if isinstance(model.encoder.rnn, nn.LSTM):\n # lstm_forget_gate_init_(model.encoder.rnn, lstm_forget_gate)\n\n # # decoder rnn orthogonal initialization & LSTM forget gate\n # if hasattr(model.decoder, \"rnn\"):\n\n # if orthogonal:\n # orthogonal_rnn_init_(model.decoder.rnn)\n\n # if isinstance(model.decoder.rnn, nn.LSTM):\n # lstm_forget_gate_init_(model.decoder.rnn, lstm_forget_gate)\n" ]
[ [ "torch.nn.init.uniform_", "torch.no_grad", "torch.nn.init.orthogonal_", "torch.nn.init.normal_", "torch.nn.init.xavier_uniform_", "torch.nn.init._calculate_fan_in_and_fan_out", "torch.nn.init.zeros_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Phil1108/transformers
[ "a48f183f1f23568caed1cc4f9db71d25e17e91f1" ]
[ "src/transformers/modeling_reformer.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch REFORMER model. \"\"\"\n\nimport logging\nimport sys\nfrom collections import namedtuple\nfrom functools import reduce\nfrom operator import mul\n\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd.function import Function\nfrom torch.nn import CrossEntropyLoss, MSELoss\n\nfrom .activations import gelu, gelu_fast, gelu_new, swish\nfrom .configuration_reformer import ReformerConfig\nfrom .file_utils import (\n DUMMY_INPUTS,\n DUMMY_MASK,\n add_code_sample_docstrings,\n add_start_docstrings,\n add_start_docstrings_to_callable,\n)\nfrom .modeling_outputs import (\n BaseModelOutput,\n CausalLMOutput,\n MaskedLMOutput,\n QuestionAnsweringModelOutput,\n SequenceClassifierOutput,\n)\nfrom .modeling_utils import PreTrainedModel, apply_chunking_to_forward\n\n\nlogger = logging.getLogger(__name__)\n\n_CONFIG_FOR_DOC = \"ReformerConfig\"\n_TOKENIZER_FOR_DOC = \"ReformerTokenizer\"\n\nREFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [\n \"google/reformer-crime-and-punishment\",\n \"google/reformer-enwik8\",\n # See all Reformer models at https://huggingface.co/models?filter=reformer\n]\n\n\ndef mish(x):\n return x * torch.tanh(nn.functional.softplus(x))\n\n\nACT2FN = {\n \"gelu\": gelu,\n \"relu\": torch.nn.functional.relu,\n \"swish\": swish,\n \"gelu_new\": gelu_new,\n \"gelu_fast\": gelu_fast,\n \"mish\": mish,\n}\n\n\n# Define named tuples for nn.Modules here\nLSHSelfAttentionOutput = namedtuple(\"LSHSelfAttentionOutput\", [\"hidden_states\", \"attention_probs\", \"buckets\"])\nLocalSelfAttentionOutput = namedtuple(\"LocalSelfAttentionOutput\", [\"hidden_states\", \"attention_probs\"])\nAttentionOutput = namedtuple(\"AttentionOutput\", [\"hidden_states\", \"attention_probs\", \"buckets\"])\nReformerOutput = namedtuple(\"ReformerOutput\", [\"hidden_states\", \"attn_output\", \"attention_probs\", \"buckets\"])\nReformerBackwardOutput = namedtuple(\n \"ReformerBackwardOutput\", [\"attn_output\", \"hidden_states\", \"grad_attn_output\", \"grad_hidden_states\"]\n)\nReformerEncoderOutput = namedtuple(\"ReformerEncoderOutput\", [\"hidden_states\", \"all_hidden_states\", \"all_attentions\"])\n\n\ndef _get_least_common_mult_chunk_len(config):\n attn_types = config.attn_layers\n attn_types_set = set(attn_types)\n if len(attn_types_set) == 1 and attn_types[0] == \"lsh\":\n return config.lsh_attn_chunk_length\n elif len(attn_types_set) == 1 and attn_types[0] == \"local\":\n return config.local_attn_chunk_length\n elif len(attn_types_set) == 2 and attn_types_set == set([\"lsh\", \"local\"]):\n return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.\".format(\n config.attn_layers\n )\n )\n\n\nclass AxialPositionEmbeddings(nn.Module):\n \"\"\"Constructs axial position embeddings. Useful for very long input\n sequences to save memory and time.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.axial_pos_shape = config.axial_pos_shape\n self.axial_pos_embds_dim = config.axial_pos_embds_dim\n self.dropout = config.hidden_dropout_prob\n\n self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)\n self.weights = nn.ParameterList()\n\n assert (\n sum(self.axial_pos_embds_dim) == config.hidden_size\n ), \"Make sure that config.axial_pos_embds factors: {} sum to config.hidden_size: {}\".format(\n self.axial_pos_embds_dim, config.hidden_size\n )\n\n # create weights\n for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):\n # create expanded shapes\n ax_shape = [1] * len(self.axial_pos_shape)\n ax_shape[axis] = self.axial_pos_shape[axis]\n ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)\n\n # create tensor and init\n self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))\n\n def forward(self, position_ids):\n # broadcast weights to correct shape\n batch_size = position_ids.shape[0]\n sequence_length = position_ids.shape[1]\n\n broadcasted_weights = [\n weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights\n ]\n\n if self.training is True:\n assert (\n reduce(mul, self.axial_pos_shape) == sequence_length\n ), \"If training, make sure that config.axial_pos_shape factors: {} multiply to sequence length. Got prod({}) != sequence_length: {}. You might want to consider padding your sequence length to {} or changing config.axial_pos_shape.\".format(\n self.axial_pos_shape, self.axial_pos_shape, sequence_length, reduce(mul, self.axial_pos_shape)\n )\n if self.dropout > 0:\n weights = torch.cat(broadcasted_weights, dim=-1)\n # permute weights so that 2D correctly drops dims 1 and 2\n transposed_weights = weights.transpose(2, 1)\n # drop entire matrix of last two dims (prev dims 1 and 2)\n dropped_transposed_weights = nn.functional.dropout2d(\n transposed_weights, p=self.dropout, training=self.training\n )\n dropped_weights = dropped_transposed_weights.transpose(2, 1)\n\n position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))\n\n else:\n position_encodings = torch.cat(\n [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],\n dim=-1,\n )\n\n else:\n assert (\n reduce(mul, self.axial_pos_shape) >= sequence_length\n ), \"Make sure that config.axial_pos_shape factors: {} multiply at least to max(sequence_length, least_common_mult_chunk_length): max({}, {})\".format(\n self.axial_pos_shape, sequence_length, self.least_common_mult_chunk_length,\n )\n\n # compute how many columns are needed\n required_pos_encodings_columns = -(-sequence_length // self.axial_pos_shape[1])\n\n # cut to columns that are needed\n position_encodings = torch.cat(\n [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1\n )\n position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))[\n :, :sequence_length\n ]\n\n return position_encodings\n\n\nclass PositionEmbeddings(nn.Module):\n \"\"\"Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)\n\n def forward(self, position_ids):\n position_embeddings = self.embedding(position_ids)\n position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)\n return position_embeddings\n\n\nclass ReformerEmbeddings(nn.Module):\n \"\"\"Construct the embeddings from word, position and token_type embeddings.\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.max_position_embeddings = config.max_position_embeddings\n self.dropout = config.hidden_dropout_prob\n\n self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)\n self.position_embeddings = (\n AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)\n )\n\n def forward(self, input_ids=None, position_ids=None, inputs_embeds=None):\n if input_ids is not None:\n input_shape = input_ids.size()\n device = input_ids.device\n else:\n input_shape = inputs_embeds.size()[:-1]\n device = inputs_embeds.device\n\n seq_length = input_shape[1]\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n\n assert (\n position_ids.shape[-1] <= self.max_position_embeddings\n ), \"Sequence Length: {} has to be larger equal than config.max_position_embeddings: {}\".format(\n position_ids.shape[-1], self.max_position_embeddings\n )\n\n # dropout\n embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)\n\n # add positional embeddings\n position_embeddings = self.position_embeddings(position_ids)\n embeddings = embeddings + position_embeddings\n return embeddings\n\n\nclass EfficientAttentionMixin:\n \"\"\"\n A few utilities for nn.Modules in Reformer, to be used as a mixin.\n \"\"\"\n\n def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):\n \"\"\" Used to implement attention between consecutive chunks.\n\n Args:\n vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]\n num_chunks_before: chunks before current chunk to include in attention\n num_chunks_after: chunks after current chunk to include in attention\n\n Returns:\n tensor of shape [num_chunks, N * chunk_length, ...], where\n N = (1 + num_chunks_before + num_chunks_after).\n \"\"\"\n if num_chunks_before == 0 and num_chunks_after == 0:\n return vectors\n\n slices = []\n for i in range(-num_chunks_before, num_chunks_after + 1):\n if i == 0:\n slices.append(vectors)\n else:\n slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))\n return torch.cat(slices, dim=3)\n\n def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):\n \"\"\"\n splits hidden_size dim into attn_head_size and num_attn_heads\n \"\"\"\n new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)\n x = x.view(*new_x_shape)\n return x.transpose(2, 1)\n\n def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):\n \"\"\"\n merges attn_head_size dim and num_attn_heads dim into hidden_size\n \"\"\"\n x = x.permute(0, 2, 1, 3)\n return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))\n\n def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):\n \"\"\"\n splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims\n \"\"\"\n batch_size = vectors.shape[0]\n split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)\n\n if len(vectors.shape) == 4:\n return torch.reshape(vectors, split_dim_shape + (attn_head_size,))\n elif len(vectors.shape) == 3:\n return torch.reshape(vectors, split_dim_shape)\n else:\n raise ValueError(\"Input vector rank should be one of [3, 4], but is: {}\".format(len(vectors.shape)))\n\n\nclass LSHSelfAttention(nn.Module, EfficientAttentionMixin):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.chunk_length = config.lsh_attn_chunk_length\n self.num_hashes = config.num_hashes\n self.num_buckets = config.num_buckets\n self.num_chunks_before = config.lsh_num_chunks_before\n self.num_chunks_after = config.lsh_num_chunks_after\n self.hash_seed = config.hash_seed\n self.is_decoder = config.is_decoder\n self.max_position_embeddings = config.max_position_embeddings\n\n self.dropout = config.lsh_attention_probs_dropout_prob\n\n self.num_attention_heads = config.num_attention_heads\n self.attention_head_size = config.attention_head_size\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.hidden_size = config.hidden_size\n\n # projection matrices\n self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n\n # save mask value here. Need fp32 and fp16 mask values\n self.register_buffer(\"self_mask_value_float16\", torch.tensor(-1e3))\n self.register_buffer(\"self_mask_value_float32\", torch.tensor(-1e5))\n self.register_buffer(\"mask_value_float16\", torch.tensor(-1e4))\n self.register_buffer(\"mask_value_float32\", torch.tensor(-1e9))\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n output_attentions=False,\n buckets=None,\n **kwargs\n ):\n sequence_length = hidden_states.shape[1]\n batch_size = hidden_states.shape[0]\n\n # num hashes can optionally be overwritten by user\n num_hashes = num_hashes if num_hashes is not None else self.num_hashes\n\n # project hidden_states to query_key and value\n query_key_vectors = self.query_key(hidden_states)\n value_vectors = self.value(hidden_states)\n\n # free memory\n del hidden_states\n\n query_key_vectors = self._split_hidden_size_dim(\n query_key_vectors, self.num_attention_heads, self.attention_head_size\n )\n value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)\n\n assert (\n query_key_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n query_key_vectors.shape[-1], self.attention_head_size\n )\n assert (\n value_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of value_vectors is {} but should be {}.\".format(\n value_vectors.shape[-1], self.attention_head_size\n )\n\n # LSH attention only makes sense if chunked attention should be performed\n if self.chunk_length < sequence_length:\n # set `num_buckets` on the fly, recommended way to do it\n if self.num_buckets is None:\n self._set_num_buckets(sequence_length)\n\n # use cached buckets for backprop only\n if buckets is None:\n # hash query key vectors into buckets\n buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)\n\n assert (\n int(buckets.shape[-1]) == num_hashes * sequence_length\n ), \"last dim of buckets is {}, but should be {}\".format(buckets.shape[-1], num_hashes * sequence_length)\n\n sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(\n sequence_length, buckets, num_hashes\n )\n\n # make sure bucket idx is not longer then sequence length\n sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length\n\n # cluster query key value vectors according to hashed buckets\n query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)\n value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)\n\n query_key_vectors = self._split_seq_length_dim_to(\n query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,\n )\n value_vectors = self._split_seq_length_dim_to(\n value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,\n )\n\n if self.chunk_length is None:\n assert (\n self.num_chunks_before == 0 and self.num_chunks_after == 0\n ), \"If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.\"\n else:\n # get sequence length indices\n sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(\n batch_size, self.num_attention_heads, 1\n )\n\n # scale key vectors\n key_vectors = self._len_and_dim_norm(query_key_vectors)\n\n # get attention probs\n out_vectors, logits, attention_probs = self._attend(\n query_vectors=query_key_vectors,\n key_vectors=key_vectors,\n value_vectors=value_vectors,\n sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,\n attention_mask=attention_mask,\n head_mask=head_mask,\n sequence_length=sequence_length,\n )\n\n # free memory\n del query_key_vectors, key_vectors, value_vectors\n\n # re-order out_vectors and logits\n if self.chunk_length < sequence_length:\n # sort clusters back to correct ordering\n out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)\n\n # sum up all hash rounds\n if num_hashes > 1:\n out_vectors = self._split_seq_length_dim_to(\n out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,\n )\n logits = self._split_seq_length_dim_to(\n logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size,\n ).unsqueeze(-1)\n\n probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))\n out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)\n # free memory\n del probs_vectors\n\n # free memory\n del logits\n\n assert out_vectors.shape == (\n batch_size,\n self.num_attention_heads,\n sequence_length,\n self.attention_head_size,\n ), \"out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`.\"\n\n out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)\n\n if output_attentions is False:\n attention_probs = ()\n\n return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)\n\n def _hash_vectors(self, vectors, num_hashes, attention_mask):\n batch_size = vectors.shape[0]\n\n # See https://arxiv.org/pdf/1509.02897.pdf\n # We sample a different random rotation for each round of hashing to\n # decrease the probability of hash misses.\n if isinstance(self.num_buckets, int):\n assert (\n self.num_buckets % 2 == 0\n ), \"There should be an even number of bucktes, but `self.num_bucktes`: {}\".format(self.num_buckets)\n rotation_size = self.num_buckets\n num_buckets = self.num_buckets\n else:\n # Factorize the hash if self.num_buckets is a list or tuple\n rotation_size, num_buckets = 0, 1\n for bucket_factor in self.num_buckets:\n assert bucket_factor % 2 == 0, \"The number of buckets should be even, but `num_bucket`: {}\".format(\n bucket_factor\n )\n rotation_size = rotation_size + bucket_factor\n num_buckets = num_buckets * bucket_factor\n\n # remove gradient\n vectors = vectors.detach()\n\n if self.hash_seed is not None:\n # for determinism\n torch.manual_seed(self.hash_seed)\n\n rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)\n # create a random self.attention_head_size x num_hashes x num_buckets/2\n random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)\n\n # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2\n rotated_vectors = torch.einsum(\"bmtd,mdhr->bmhtr\", vectors, random_rotations)\n\n if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:\n rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)\n buckets = torch.argmax(rotated_vectors, dim=-1)\n else:\n # Get the buckets for them and combine.\n buckets, cur_sum, cur_product = None, 0, 1\n for bucket_factor in self.num_buckets:\n rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]\n cur_sum = cur_sum + bucket_factor // 2\n rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)\n\n if buckets is None:\n buckets = torch.argmax(rotated_vectors_factor, dim=-1)\n else:\n buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))\n\n cur_product = cur_product * bucket_factor\n\n if attention_mask is not None:\n # add an extra bucket for padding tokens only\n num_buckets = num_buckets + 1\n # assign padding tokens extra bucket\n buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)\n buckets = torch.where(\n buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)\n )\n\n # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).\n # Next we add offsets so that bucket numbers from different hashing rounds don't overlap.\n offsets = torch.arange(num_hashes, device=vectors.device)\n offsets = (offsets * num_buckets).view((1, 1, -1, 1))\n\n # expand to batch size and num attention heads\n offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])\n offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)\n\n return offset_buckets\n\n def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):\n # no gradients are needed\n with torch.no_grad():\n batch_size = buckets.shape[0]\n\n # arange and expand\n orig_indices = torch.arange(num_hashes * sequence_length, device=buckets.device).view(1, 1, -1)\n orig_indices = orig_indices.expand(batch_size, self.num_attention_heads, orig_indices.shape[-1])\n\n # scale buckets\n scaled_buckets = sequence_length * buckets + (orig_indices % sequence_length)\n\n # remove gradient\n scaled_buckets = scaled_buckets.detach()\n\n # Hash-based sort\n sorted_bucket_idx = torch.argsort(scaled_buckets, dim=-1)\n\n # create simple indices to scatter to, to have undo sort\n indices = (\n torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)\n .view(1, 1, -1)\n .expand(sorted_bucket_idx.shape)\n )\n\n # get undo sort\n undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())\n undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)\n\n return sorted_bucket_idx, undo_sorted_bucket_idx\n\n def _set_num_buckets(self, sequence_length):\n # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper\n num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1\n # make sure buckets are power of 2\n num_buckets = 2 ** num_buckets_pow_2\n\n # factorize `num_buckets` if `num_buckets` becomes too large\n num_buckets_limit = 2 * max(\n int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length,\n )\n if num_buckets > num_buckets_limit:\n num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]\n\n logger.warning(\"config.num_buckets is not set. Setting config.num_buckets to {}...\".format(num_buckets))\n\n # set num buckets in config to be properly saved\n self.config.num_buckets = num_buckets\n self.num_buckets = num_buckets\n\n def _attend(\n self,\n query_vectors,\n key_vectors,\n value_vectors,\n sorted_bucket_idx_per_hash,\n attention_mask,\n head_mask,\n sequence_length,\n ):\n\n # look at previous and following chunks if chunked attention\n if self.chunk_length < sequence_length:\n key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)\n value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)\n\n # get logits and dots\n query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))\n\n # free memory\n del query_vectors, key_vectors\n\n # if chunked attention split bucket idxs to query and key\n if self.chunk_length < sequence_length:\n query_bucket_idx = self._split_seq_length_dim_to(\n sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads\n )\n key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)\n else:\n query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash\n\n # get correct mask values depending on precision\n if query_key_dots.dtype == torch.float16:\n self_mask_value = self.self_mask_value_float16.half()\n mask_value = self.mask_value_float16.half()\n else:\n self_mask_value = self.self_mask_value_float32\n mask_value = self.mask_value_float32\n\n mask = self._compute_attn_mask(\n query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, sequence_length\n )\n\n if mask is not None:\n query_key_dots = torch.where(mask, query_key_dots, mask_value)\n\n # free memory\n del mask\n\n # Self mask is ALWAYS applied.\n # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):\n # \" While attention to the future is not allowed, typical implementations of the\n # Transformer do allow a position to attend to itself.\n # Such behavior is undesirable in a shared-QK formulation because the dot-product\n # of a query vector with itself will almost always be greater than the dot product of a\n # query vector with a vector at another position. We therefore modify the masking\n # to forbid a token from attending to itself, except in situations\n # where a token has no other valid attention targets (e.g. the first token in a sequence) \"\n\n self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(\n query_bucket_idx.device\n )\n\n # apply self_mask\n query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)\n\n # free memory\n del self_mask\n\n logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)\n # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`\n attention_probs = torch.exp(query_key_dots - logits)\n\n # free memory\n del query_key_dots\n\n # dropout\n attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n # attend values\n out_vectors = torch.matmul(attention_probs, value_vectors)\n\n # free memory\n del value_vectors\n\n # merge chunk length\n if self.chunk_length < sequence_length:\n logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)\n out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)\n\n return out_vectors, logits, attention_probs\n\n def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dot_shape, sequence_length):\n\n # attention mask for LSH\n if attention_mask is not None:\n # if chunked attention, the attention mask has to correspond to LSH order\n attention_mask = attention_mask.to(torch.uint8)[:, None, :]\n if sequence_length > self.chunk_length:\n # expand attn_mask to fit with key_value_bucket_idx shape\n attention_mask = attention_mask[:, None, :]\n attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))\n # extract attention mask from LSH sorted key_indices\n attention_mask = torch.gather(attention_mask, -1, key_indices)\n\n attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)\n\n # Causal mask\n if self.is_decoder is True:\n causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)\n\n # add attention mask if not None\n if attention_mask is not None:\n attention_mask = causal_mask * attention_mask\n else:\n attention_mask = causal_mask\n\n return attention_mask\n\n def _len_and_dim_norm(self, vectors):\n \"\"\"\n length and attention head size dim normalization\n \"\"\"\n vectors = self._len_norm(vectors)\n vectors = vectors * torch.rsqrt(\n torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)\n )\n return vectors\n\n def _len_norm(self, x, epsilon=1e-6):\n \"\"\"\n length normalization\n \"\"\"\n variance = torch.mean(x ** 2, -1, keepdim=True)\n norm_x = x * torch.rsqrt(variance + epsilon)\n return norm_x\n\n def _gather_by_expansion(self, vectors, idxs, num_hashes):\n \"\"\"\n expand dims of idxs and vectors for all hashes and gather\n \"\"\"\n expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)\n vectors = vectors.repeat(1, 1, num_hashes, 1)\n return torch.gather(vectors, 2, expanded_idxs)\n\n\nclass ReverseSort(Function):\n \"\"\"\n After chunked attention is applied which sorted clusters,\n original ordering has to be restored.\n Since customized backward function is used for Reformer,\n the gradients of the output vectors have to be explicitely\n sorted here.\n \"\"\"\n\n @staticmethod\n def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):\n # save sorted_bucket_idx for backprop\n with torch.no_grad():\n ctx.sorted_bucket_idx = sorted_bucket_idx\n\n # undo sort to have correct order for next layer\n expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)\n out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)\n logits = torch.gather(logits, 2, undo_sorted_bucket_idx)\n return out_vectors, logits\n\n @staticmethod\n def backward(ctx, grad_out_vectors, grad_logits):\n # get parameters saved in ctx\n sorted_bucket_idx = ctx.sorted_bucket_idx\n\n expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)\n # reverse sort of forward\n grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)\n grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)\n\n # return grad and `None` fillers for last 2 forward args\n return grad_out_vectors, grad_logits, None, None\n\n\nclass LocalSelfAttention(nn.Module, EfficientAttentionMixin):\n def __init__(self, config):\n super().__init__()\n\n self.num_attention_heads = config.num_attention_heads\n self.chunk_length = config.local_attn_chunk_length\n self.num_chunks_before = config.local_num_chunks_before\n self.num_chunks_after = config.local_num_chunks_after\n self.is_decoder = config.is_decoder\n self.pad_token_id = config.pad_token_id\n\n self.attention_head_size = config.attention_head_size\n self.all_head_size = self.num_attention_heads * self.attention_head_size\n self.hidden_size = config.hidden_size\n\n # projection matrices\n self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)\n\n self.dropout = config.local_attention_probs_dropout_prob\n\n # save mask value here\n self.register_buffer(\"mask_value_float16\", torch.tensor(-1e4))\n self.register_buffer(\"mask_value_float32\", torch.tensor(-1e9))\n\n def forward(self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, **kwargs):\n sequence_length = hidden_states.shape[1]\n batch_size = hidden_states.shape[0]\n\n # project hidden_states to query, key and value\n query_vectors = self.query(hidden_states)\n key_vectors = self.key(hidden_states)\n value_vectors = self.value(hidden_states)\n\n # split last dim into `config.num_attention_heads` and `config.attention_head_size`\n query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)\n key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)\n value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)\n\n assert (\n query_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n query_vectors.shape[-1], self.attention_head_size\n )\n assert (\n key_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n key_vectors.shape[-1], self.attention_head_size\n )\n assert (\n value_vectors.shape[-1] == self.attention_head_size\n ), \"last dim of query_key_vectors is {} but should be {}.\".format(\n value_vectors.shape[-1], self.attention_head_size\n )\n\n if self.chunk_length is None:\n assert (\n self.num_chunks_before == 0 and self.num_chunks_after == 0\n ), \"If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0.\"\n\n # normalize key vectors\n key_vectors = key_vectors / torch.sqrt(\n torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype)\n )\n\n # get sequence length indices\n indices = torch.arange(sequence_length, device=query_vectors.device).repeat(\n batch_size, self.num_attention_heads, 1\n )\n\n # if input should be chunked\n if self.chunk_length < sequence_length:\n # chunk vectors\n # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size\n query_vectors = self._split_seq_length_dim_to(\n query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,\n )\n key_vectors = self._split_seq_length_dim_to(\n key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,\n )\n value_vectors = self._split_seq_length_dim_to(\n value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size,\n )\n\n # chunk indices\n query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)\n key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)\n\n # append chunks before and after\n key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)\n value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)\n key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)\n else:\n query_indices = key_indices = indices\n\n # query-key matmul: QK^T\n query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))\n\n # free memory\n del query_vectors, key_vectors\n\n mask = self._compute_attn_mask(\n query_indices, key_indices, attention_mask, query_key_dots.shape, sequence_length\n )\n\n if mask is not None:\n # get mask tensor depending on half precision or not\n if query_key_dots.dtype == torch.float16:\n mask_value = self.mask_value_float16.half()\n else:\n mask_value = self.mask_value_float32\n\n query_key_dots = torch.where(mask, query_key_dots, mask_value)\n\n # free memory\n del mask\n\n # softmax\n logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)\n attention_probs = torch.exp(query_key_dots - logits)\n\n # free memory\n del logits\n\n # dropout\n attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)\n\n # Mask heads if we want to\n if head_mask is not None:\n attention_probs = attention_probs * head_mask\n\n # attend values\n out_vectors = torch.matmul(attention_probs, value_vectors)\n\n # free memory\n del value_vectors\n\n # merge chunk length\n if self.chunk_length < sequence_length:\n out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)\n\n assert out_vectors.shape == (batch_size, self.num_attention_heads, sequence_length, self.attention_head_size,)\n\n out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)\n\n if output_attentions is False:\n attention_probs = ()\n\n return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)\n\n def _compute_attn_mask(self, query_indices, key_indices, attention_mask, query_key_dots_shape, sequence_length):\n\n # chunk attention mask and look before and after\n if attention_mask is not None:\n attention_mask = attention_mask.to(torch.uint8)[:, None, :]\n\n if self.chunk_length < sequence_length:\n attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)\n attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)\n # create attn_mask\n attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)\n\n # Causal mask\n if self.is_decoder is True:\n causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)\n\n # add attention mask if not None\n if attention_mask is not None:\n attention_mask = causal_mask * attention_mask\n else:\n attention_mask = causal_mask\n\n return attention_mask\n\n\nclass ReformerSelfOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n all_head_size = config.num_attention_heads * config.attention_head_size\n self.dropout = config.hidden_dropout_prob\n\n self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n return hidden_states\n\n\nclass ReformerAttention(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.layer_id = layer_id\n self.attn_layers = config.attn_layers\n\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n\n if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == \"lsh\":\n self.self_attention = LSHSelfAttention(config)\n elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == \"local\":\n self.self_attention = LocalSelfAttention(config)\n elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set([\"lsh\", \"local\"]):\n # get correct attn layers\n if self.attn_layers[self.layer_id] == \"lsh\":\n self.self_attention = LSHSelfAttention(config)\n else:\n self.self_attention = LocalSelfAttention(config)\n else:\n raise NotImplementedError(\n \"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {}. Select attn layer types from ['lsh', 'local'] only.\".format(\n self.attn_layers\n )\n )\n self.output = ReformerSelfOutput(config)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n output_attentions=False,\n buckets=None,\n ):\n hidden_states = self.layer_norm(hidden_states)\n\n # use cached buckets for backprob if buckets not None for LSHSelfAttention\n self_attention_outputs = self.self_attention(\n hidden_states=hidden_states,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n output_attentions=output_attentions,\n buckets=buckets,\n )\n attention_output = self.output(self_attention_outputs.hidden_states)\n\n # add buckets if necessary\n if hasattr(self_attention_outputs, \"buckets\"):\n buckets = self_attention_outputs.buckets\n else:\n buckets = None\n\n return AttentionOutput(\n hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets,\n )\n\n\nclass ReformerFeedForwardDense(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n if isinstance(config.hidden_act, str):\n self.act_fn = ACT2FN[config.hidden_act]\n else:\n self.act_fn = config.hidden_act\n\n self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n hidden_states = self.act_fn(hidden_states)\n return hidden_states\n\n\nclass ReformerFeedForwardOutput(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)\n\n def forward(self, hidden_states):\n hidden_states = self.dense(hidden_states)\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n return hidden_states\n\n\nclass ChunkReformerFeedForward(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.chunk_size_feed_forward = config.chunk_size_feed_forward\n self.seq_len_dim = 1\n\n self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.dense = ReformerFeedForwardDense(config)\n self.output = ReformerFeedForwardOutput(config)\n\n def forward(self, attention_output):\n return apply_chunking_to_forward(\n self.chunk_size_feed_forward, self.seq_len_dim, self.forward_chunk, attention_output,\n )\n\n def forward_chunk(self, hidden_states):\n hidden_states = self.layer_norm(hidden_states)\n hidden_states = self.dense(hidden_states)\n return self.output(hidden_states)\n\n\nclass ReformerLayer(nn.Module):\n def __init__(self, config, layer_id=0):\n super().__init__()\n self.attention = ReformerAttention(config, layer_id)\n # dropout requires to have the same\n # seed for forward and backward pass\n self.attention_seed = None\n self.feed_forward_seed = None\n\n self.feed_forward = ChunkReformerFeedForward(config)\n\n def _init_attention_seed(self):\n \"\"\"\n This function sets a new seed for the\n attention layer to make dropout deterministic\n for both forward calls: 1 normal forward\n call and 1 forward call in backward\n to recalculate activations.\n \"\"\"\n\n # randomize seeds\n if next(self.parameters()).device.type == \"cuda\":\n # GPU\n device_idx = torch.cuda.current_device()\n self.attention_seed = torch.cuda.default_generators[device_idx].seed()\n torch.cuda.manual_seed(self.attention_seed)\n else:\n # CPU\n self.attention_seed = int(torch.seed() % sys.maxsize)\n torch.manual_seed(self.attention_seed)\n\n def _init_feed_forward_seed(self):\n \"\"\"\n This function sets a new seed for the\n feed forward layer to make dropout deterministic\n for both forward calls: 1 normal forward\n call and 1 forward call in backward\n to recalculate activations.\n \"\"\"\n\n # randomize seeds\n if next(self.parameters()).device.type == \"cuda\":\n # GPU\n device_idx = torch.cuda.current_device()\n self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()\n torch.cuda.manual_seed(self.feed_forward_seed)\n else:\n # CPU\n self.feed_forward_seed = int(torch.seed() % sys.maxsize)\n torch.manual_seed(self.feed_forward_seed)\n\n def forward(\n self,\n prev_attn_output,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n output_attentions=False,\n ):\n with torch.no_grad():\n # every forward pass we sample a different seed\n # for dropout and save for forward fn in backward pass\n # to have correct dropout\n self._init_attention_seed()\n attn_outputs = self.attention(\n hidden_states=hidden_states,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n output_attentions=output_attentions,\n )\n attn_output = attn_outputs.hidden_states\n\n # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)\n # Y_1 = X_1 + f(X_2)\n attn_output = prev_attn_output + attn_output\n\n # free memory\n del prev_attn_output\n\n # every forward pass we sample a different seed\n # for dropout and save seed for forward fn in backward\n # to have correct dropout\n self._init_feed_forward_seed()\n # Y_2 = X_2 + g(Y_1)\n hidden_states = hidden_states + self.feed_forward(attn_output)\n\n return ReformerOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n attention_probs=attn_outputs.attention_probs,\n buckets=attn_outputs.buckets,\n )\n\n def backward_pass(\n self,\n next_attn_output,\n hidden_states,\n grad_attn_output,\n grad_hidden_states,\n attention_mask=None,\n head_mask=None,\n buckets=None,\n ):\n # Implements the backward pass for reversible ResNets.\n # A good blog post on how this works can be found here:\n # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)\n # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py\n\n with torch.enable_grad():\n next_attn_output.requires_grad = True\n\n # set seed to have correct dropout\n torch.manual_seed(self.feed_forward_seed)\n # g(Y_1)\n res_hidden_states = self.feed_forward(next_attn_output)\n res_hidden_states.backward(grad_hidden_states, retain_graph=True)\n\n with torch.no_grad():\n # X_2 = Y_2 - g(Y_1)\n hidden_states = hidden_states - res_hidden_states\n del res_hidden_states\n\n grad_attn_output = grad_attn_output + next_attn_output.grad\n next_attn_output.grad = None\n\n with torch.enable_grad():\n hidden_states.requires_grad = True\n\n # set seed to have correct dropout\n torch.manual_seed(self.attention_seed)\n # f(X_2)\n # use cached buckets for backprob if buckets not None for LSHSelfAttention\n output = self.attention(\n hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets,\n ).hidden_states\n output.backward(grad_attn_output, retain_graph=True)\n\n with torch.no_grad():\n # X_1 = Y_1 - f(X_2)\n attn_output = next_attn_output - output\n del output, next_attn_output\n\n grad_hidden_states = grad_hidden_states + hidden_states.grad\n hidden_states.grad = None\n hidden_states = hidden_states.detach()\n\n return ReformerBackwardOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n grad_attn_output=grad_attn_output,\n grad_hidden_states=grad_hidden_states,\n )\n\n\nclass _ReversibleFunction(Function):\n \"\"\"\n To prevent PyTorch from performing the usual backpropagation,\n a customized backward function is implemented here. This way\n it is made sure that no memory expensive activations are\n saved during the forward pass.\n This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py\n \"\"\"\n\n @staticmethod\n def forward(\n ctx,\n hidden_states,\n layers,\n attention_mask,\n head_mask,\n num_hashes,\n all_hidden_states,\n all_attentions,\n output_hidden_states,\n output_attentions,\n ):\n all_buckets = ()\n\n # split duplicated tensor\n hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)\n\n for layer, layer_head_mask in zip(layers, head_mask):\n if output_hidden_states is True:\n all_hidden_states.append(hidden_states)\n\n layer_outputs = layer(\n prev_attn_output=attn_output,\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n head_mask=layer_head_mask,\n num_hashes=num_hashes,\n output_attentions=output_attentions,\n )\n attn_output = layer_outputs.attn_output\n hidden_states = layer_outputs.hidden_states\n all_buckets = all_buckets + (layer_outputs.buckets,)\n\n if output_attentions:\n all_attentions.append(layer_outputs.attention_probs)\n\n # Add last layer\n if output_hidden_states is True:\n all_hidden_states.append(hidden_states)\n\n # attach params to ctx for backward\n ctx.save_for_backward(attn_output.detach(), hidden_states.detach())\n ctx.layers = layers\n ctx.all_buckets = all_buckets\n ctx.head_mask = head_mask\n ctx.attention_mask = attention_mask\n\n # Concatenate 2 RevNet outputs\n return torch.cat([attn_output, hidden_states], dim=-1)\n\n @staticmethod\n def backward(ctx, grad_hidden_states):\n grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)\n\n # retrieve params from ctx for backward\n attn_output, hidden_states = ctx.saved_tensors\n\n # create tuple\n output = ReformerBackwardOutput(\n attn_output=attn_output,\n hidden_states=hidden_states,\n grad_attn_output=grad_attn_output,\n grad_hidden_states=grad_hidden_states,\n )\n\n # free memory\n del grad_attn_output, grad_hidden_states, attn_output, hidden_states\n\n layers = ctx.layers\n all_buckets = ctx.all_buckets\n head_mask = ctx.head_mask\n attention_mask = ctx.attention_mask\n\n for idx, layer in enumerate(layers[::-1]):\n # pop last buckets from stack\n buckets = all_buckets[-1]\n all_buckets = all_buckets[:-1]\n\n # backprop\n output = layer.backward_pass(\n next_attn_output=output.attn_output,\n hidden_states=output.hidden_states,\n grad_attn_output=output.grad_attn_output,\n grad_hidden_states=output.grad_hidden_states,\n head_mask=head_mask[len(layers) - idx - 1],\n attention_mask=attention_mask,\n buckets=buckets,\n )\n\n assert all_buckets == (), \"buckets have to be empty after backpropagation\"\n grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)\n\n # num of return vars has to match num of forward() args\n # return gradient for hidden_states arg and None for other args\n return grad_hidden_states, None, None, None, None, None, None, None, None\n\n\nclass ReformerEncoder(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dropout = config.hidden_dropout_prob\n\n self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])\n # Reformer is using Rev Nets, thus last layer outputs are concatenated and\n # Layer Norm is done over 2 * hidden_size\n self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)\n\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n num_hashes=None,\n output_hidden_states=False,\n output_attentions=False,\n ):\n # hidden_states and attention lists to be filled if wished\n all_hidden_states = []\n all_attentions = []\n\n # concat same tensor for reversible ResNet\n hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)\n hidden_states = _ReversibleFunction.apply(\n hidden_states,\n self.layers,\n attention_mask,\n head_mask,\n num_hashes,\n all_hidden_states,\n all_attentions,\n output_hidden_states,\n output_attentions,\n )\n\n # Apply layer norm to concatenated hidden states\n hidden_states = self.layer_norm(hidden_states)\n\n # Apply dropout\n hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)\n\n return ReformerEncoderOutput(\n hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions\n )\n\n\nclass ReformerOnlyLMHead(nn.Module):\n def __init__(self, config):\n super().__init__()\n # Reformer is using Rev Nets, thus last layer outputs are concatenated and\n # Layer Norm is done over 2 * hidden_size\n self.seq_len_dim = 1\n self.chunk_size_lm_head = config.chunk_size_lm_head\n self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n return apply_chunking_to_forward(self.chunk_size_lm_head, self.seq_len_dim, self.forward_chunk, hidden_states)\n\n def forward_chunk(self, hidden_states):\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n\n\nclass ReformerPreTrainedModel(PreTrainedModel):\n \"\"\" An abstract class to handle weights initialization and\n a simple interface for downloading and loading pretrained models.\n \"\"\"\n\n config_class = ReformerConfig\n base_model_prefix = \"reformer\"\n\n @property\n def dummy_inputs(self):\n input_ids = torch.tensor(DUMMY_INPUTS)\n input_mask = torch.tensor(DUMMY_MASK)\n dummy_inputs = {\n \"input_ids\": input_ids,\n \"attention_mask\": input_mask,\n }\n return dummy_inputs\n\n def _init_weights(self, module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, AxialPositionEmbeddings):\n for weight in module.weights:\n torch.nn.init.normal_(weight, std=self.config.axial_norm_std)\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n elif isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n\n\nREFORMER_START_DOCSTRING = r\"\"\"\n Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.0445>`__\n by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.\n\n This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ sub-class.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n usage and behavior.\n\n Parameters:\n config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the configuration.\n Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\n\nREFORMER_INPUTS_DOCSTRING = r\"\"\"\n Args:\n input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n During training the input_ids sequence_length has to be a multiple of the relevant model's\n chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically\n padded to be a multiple of the chunk length.\n\n Indices can be obtained using :class:`transformers.ReformerTokenizer`.\n See :func:`transformers.PreTrainedTokenizer.encode` and\n :func:`transformers.PreTrainedTokenizer.__call__` for details.\n\n `What are input IDs? <../glossary.html#input-ids>`__\n attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Indices of positions of each input sequence tokens in the position embeddings.\n Selected in the range ``[0, config.max_position_embeddings - 1]``.\n\n `What are position IDs? <../glossary.html#position-ids>`_\n head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n Mask to nullify selected heads of the self-attention modules.\n Mask values selected in ``[0, 1]``:\n :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `input_ids` indices into associated vectors\n than the model's internal embedding lookup matrix.\n num_hashes (:obj:`int`, `optional`, defaults to :obj:`None`):\n `num_hashes` is the number of hashing rounds that should be performed during\n bucketing. Setting `num_hashes` overwrites the default `num_hashes` defined\n in `config.num_hashes`.\n For more information, see `num_hashes` in :class:`transformers.ReformerConfig`.\n output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.\n output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned tensors for more detail.\n return_tuple (:obj:`bool`, `optional`, defaults to :obj:`None`):\n If set to ``True``, the output of the model will be a plain tuple instead of a ``dataclass``.\n\"\"\"\n\n\n@add_start_docstrings(\n \"The bare Reformer Model transformer outputting raw hidden-states\" \"without any specific head on top.\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerModel(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n assert (\n self.config.num_hidden_layers > 0\n ), \"`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']\"\n\n self.embeddings = ReformerEmbeddings(config)\n self.encoder = ReformerEncoder(config)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, value):\n self.embeddings.word_embeddings = value\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=BaseModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n output_hidden_states=None,\n output_attentions=None,\n return_tuple=None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size() # noqa: F841\n device = input_ids.device\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1] # noqa: F841\n device = inputs_embeds.device\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n assert (\n len(input_shape) == 2\n ), \"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {}\".format(input_shape)\n\n # prepare head mask\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)\n\n # original sequence length for padding\n orig_sequence_length = input_shape[-1]\n\n # if needs padding\n least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)\n must_pad_to_match_chunk_length = (\n input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > least_common_mult_chunk_length\n )\n\n if must_pad_to_match_chunk_length:\n padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length\n\n if self.training is True:\n raise ValueError(\n \"If training, sequence Length {} has to be a multiple of least common multiple chunk_length {}. Please consider padding the input to a length of {}.\".format(\n input_shape[-1], least_common_mult_chunk_length, input_shape[-1] + padding_length\n )\n )\n\n # pad input\n input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(\n input_ids,\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n position_ids=position_ids,\n input_shape=input_shape,\n padding_length=padding_length,\n padded_seq_length=least_common_mult_chunk_length,\n device=device,\n )\n\n embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds)\n\n encoder_outputs = self.encoder(\n hidden_states=embedding_output,\n head_mask=head_mask,\n attention_mask=attention_mask,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n )\n sequence_output = encoder_outputs.hidden_states\n\n # if padding was applied\n if must_pad_to_match_chunk_length:\n sequence_output = sequence_output[:, :orig_sequence_length]\n\n hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None\n attentions = encoder_outputs.all_attentions if output_attentions else None\n\n if return_tuple:\n return tuple(v for v in [sequence_output, hidden_states, attentions] if v is not None)\n return BaseModelOutput(last_hidden_state=sequence_output, hidden_states=hidden_states, attentions=attentions)\n\n def _pad_to_mult_of_chunk_length(\n self,\n input_ids,\n inputs_embeds=None,\n attention_mask=None,\n position_ids=None,\n input_shape=None,\n padding_length=None,\n padded_seq_length=None,\n device=None,\n ):\n logger.info(\n \"Input ids are automatically padded from {} to {} to be a multiple of `config.chunk_length`: {}\".format(\n input_shape[-1], input_shape[-1] + padding_length, padded_seq_length\n )\n )\n\n padded_input_ids = torch.full(\n (input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long,\n )\n\n # Extend `attention_mask`\n if attention_mask is not None:\n attention_mask = torch.cat(\n [\n attention_mask,\n torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype,),\n ],\n dim=-1,\n )\n else:\n attention_mask = torch.cat(\n [\n torch.ones(input_shape, device=device, dtype=torch.uint8),\n torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),\n ],\n dim=-1,\n )\n\n # Extend `input_ids` with padding to match least common multiple chunk_length\n if input_ids is not None:\n input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)\n input_shape = input_ids.size()\n\n # Pad position ids if given\n if position_ids is not None:\n padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)\n padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)\n position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)\n\n # Extend `inputs_embeds` with padding to match least common multiple chunk_length\n if inputs_embeds is not None:\n padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)\n inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)\n input_shape = inputs_embeds.size()\n return input_ids, inputs_embeds, attention_mask, position_ids, input_shape\n\n\n@add_start_docstrings(\"\"\"Reformer Model with a `language modeling` head on top. \"\"\", REFORMER_START_DOCSTRING)\nclass ReformerModelWithLMHead(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n assert config.is_decoder, \"If you want to use `ReformerLMHeadModel` make sure that `is_decoder=True`.\"\n self.reformer = ReformerModel(config)\n self.lm_head = ReformerOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def tie_weights(self):\n # word embeddings are not tied in Reformer\n pass\n\n @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=CausalLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n labels=None,\n output_hidden_states=None,\n output_attentions=None,\n return_tuple=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`.\n All labels set to ``-100`` are ignored (masked), the loss is only\n computed for labels in ``[0, ..., config.vocab_size]``\n \"\"\"\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_tuple=return_tuple,\n )\n\n sequence_output = reformer_outputs[0]\n logits = self.lm_head(sequence_output)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))\n\n if return_tuple:\n output = (logits,) + reformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutput(\n loss=loss,\n logits=logits,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self, input_ids, past, **kwargs):\n # TODO(PVP): Add smart caching\n inputs_dict = {\"input_ids\": input_ids}\n\n if \"num_hashes\" in kwargs:\n inputs_dict[\"num_hashes\"] = kwargs[\"num_hashes\"]\n\n return inputs_dict\n\n\n@add_start_docstrings(\"\"\"Reformer Model with a `language modeling` head on top. \"\"\", REFORMER_START_DOCSTRING)\nclass ReformerForMaskedLM(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n assert (\n not config.is_decoder\n ), \"If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention.\"\n self.reformer = ReformerModel(config)\n self.lm_head = ReformerOnlyLMHead(config)\n\n self.init_weights()\n\n def get_output_embeddings(self):\n return self.lm_head.decoder\n\n def tie_weights(self):\n # word embeddings are not tied in Reformer\n pass\n\n @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=MaskedLMOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n labels=None,\n output_hidden_states=None,\n output_attentions=None,\n return_tuple=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n Labels for computing the masked language modeling loss.\n Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)\n Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels\n \"\"\"\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_tuple=return_tuple,\n )\n\n sequence_output = reformer_outputs[0]\n logits = self.lm_head(sequence_output)\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss() # -100 index = padding token\n masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if return_tuple:\n output = (logits,) + reformer_outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return MaskedLMOutput(\n loss=masked_lm_loss,\n logits=logits,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n \"\"\"Reformer Model transformer with a sequence classification/regression head on top (a linear layer\n on top of the pooled output) e.g. for GLUE tasks. \"\"\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerForSequenceClassification(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.reformer = ReformerModel(config)\n self.classifier = ReformerClassificationHead(config)\n if config.is_decoder is True:\n logger.warning(\"You might want to disable causal masking for sequence classification\")\n\n self.init_weights()\n\n def tie_weights(self):\n # word embeddings are not tied in Reformer\n pass\n\n @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=SequenceClassifierOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n labels=None,\n output_hidden_states=None,\n output_attentions=None,\n return_tuple=None,\n ):\n r\"\"\"\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for computing the sequence classification/regression loss.\n Indices should be in :obj:`[0, ..., config.num_labels - 1]`.\n If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),\n If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n \"\"\"\n\n outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_tuple=return_tuple,\n )\n\n sequence_output = outputs[0]\n logits = self.classifier(sequence_output)\n\n loss = None\n if labels is not None:\n if self.num_labels == 1:\n # We are doing regression\n loss_fct = MSELoss()\n loss = loss_fct(logits.view(-1), labels.view(-1))\n else:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if return_tuple:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions,\n )\n\n\nclass ReformerClassificationHead(nn.Module):\n \"\"\"Head for sentence-level classification tasks.\"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.out_proj = nn.Linear(config.hidden_size, config.num_labels)\n\n def forward(self, hidden_states, **kwargs):\n hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.dense(hidden_states)\n hidden_states = torch.tanh(hidden_states)\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.out_proj(hidden_states)\n return hidden_states\n\n\n@add_start_docstrings(\n \"\"\"Reformer Model with a span classification head on top for\n extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on\n top of hidden-states output to compute `span start logits` and `span end logits`. \"\"\",\n REFORMER_START_DOCSTRING,\n)\nclass ReformerForQuestionAnswering(ReformerPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.num_labels = config.num_labels\n\n self.reformer = ReformerModel(config)\n # 2 * config.hidden_size because we use reversible residual layers\n self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)\n\n self.init_weights()\n\n def tie_weights(self):\n # word embeddings are not tied in Reformer\n pass\n\n @add_start_docstrings_to_callable(REFORMER_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n tokenizer_class=_TOKENIZER_FOR_DOC,\n checkpoint=\"google/reformer-crime-and-punishment\",\n output_type=QuestionAnsweringModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n head_mask=None,\n inputs_embeds=None,\n num_hashes=None,\n start_positions=None,\n end_positions=None,\n output_hidden_states=None,\n output_attentions=None,\n return_tuple=None,\n ):\n r\"\"\"\n start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`).\n Position outside of the sequence are not taken into account for computing the loss.\n \"\"\"\n return_tuple = return_tuple if return_tuple is not None else self.config.use_return_tuple\n\n reformer_outputs = self.reformer(\n input_ids,\n position_ids=position_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n num_hashes=num_hashes,\n output_hidden_states=output_hidden_states,\n output_attentions=output_attentions,\n return_tuple=return_tuple,\n )\n\n sequence_output = reformer_outputs[0]\n\n logits = self.qa_outputs(sequence_output)\n start_logits, end_logits = logits.split(1, dim=-1)\n start_logits = start_logits.squeeze(-1)\n end_logits = end_logits.squeeze(-1)\n\n total_loss = None\n if start_positions is not None and end_positions is not None:\n # If we are on multi-GPU, split add a dimension\n if len(start_positions.size()) > 1:\n start_positions = start_positions.squeeze(-1)\n if len(end_positions.size()) > 1:\n end_positions = end_positions.squeeze(-1)\n # sometimes the start/end positions are outside our model inputs, we ignore these terms\n ignored_index = start_logits.size(1)\n start_positions.clamp_(0, ignored_index)\n end_positions.clamp_(0, ignored_index)\n\n loss_fct = CrossEntropyLoss(ignore_index=ignored_index)\n start_loss = loss_fct(start_logits, start_positions)\n end_loss = loss_fct(end_logits, end_positions)\n total_loss = (start_loss + end_loss) / 2\n\n if return_tuple:\n output = (start_logits, end_logits) + reformer_outputs[1:]\n return ((total_loss,) + output) if total_loss is not None else output\n\n return QuestionAnsweringModelOutput(\n loss=total_loss,\n start_logits=start_logits,\n end_logits=end_logits,\n hidden_states=reformer_outputs.hidden_states,\n attentions=reformer_outputs.attentions,\n )\n" ]
[ [ "torch.mean", "torch.nn.functional.dropout2d", "torch.cat", "torch.nn.functional.dropout", "torch.zeros", "torch.sum", "torch.nn.Embedding", "torch.tanh", "torch.rsqrt", "torch.no_grad", "torch.where", "torch.logsumexp", "torch.nn.Dropout", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.randn", "torch.einsum", "torch.reshape", "torch.tensor", "torch.arange", "torch.argsort", "torch.nn.functional.softplus", "torch.enable_grad", "torch.full", "torch.cuda.current_device", "numpy.lcm", "torch.exp", "torch.nn.Linear", "torch.nn.init.normal_", "torch.nn.ParameterList", "torch.seed", "torch.cuda.manual_seed", "torch.manual_seed", "torch.gather", "torch.nn.LayerNorm", "torch.matmul", "torch.chunk", "torch.nn.MSELoss", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rmm286/backman_smooth_turning_path_generation
[ "ab4d55bc348f6dab2fc573762febc2e405af08f3" ]
[ "BackmanAlgorithm/testIterations.py" ]
[ "#!/usr/bin/env python3\nfrom SmoothPlannerClass import SmoothPathPlanner, planShortest\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\ndef testSingleSourceGoal():\n dT = 0.0005\n initialState = [0.0, 0, 0.5*np.pi, 1, 0]\n finalState = [1.0, 1.0, -0.5*np.pi, 1, 0]\n vConstraints = [1.0, 0.0, 2.0, -2.0, 5.0, -5.0]\n kConstraints = [0.785, -0.785, 30.0, -30.0, 30.0, -30.0]\n headlandSpeed = vConstraints[0]\n headlandSpeedReverse = vConstraints[1]\n\n path = planShortest(kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse, initialState, finalState, dT, 0)\n \n plt.figure()\n plt.clf()\n plt.title(\"Final Path\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n plt.arrow(path.poses[0][0], path.poses[0][1], 0.1*np.cos(path.poses[0][2]), 0.1*np.sin(path.poses[0][2]), length_includes_head = True, width = 0.01, head_width = 0.03, color = 'r', alpha = 0.5)\n plt.arrow(path.poses[-1][0], path.poses[-1][1], 0.1*np.cos(path.poses[-1][2]), 0.1*np.sin(path.poses[-1][2]), length_includes_head = True, width = 0.01, head_width = 0.03, color = 'b', alpha = 0.5)\n plt.plot([i[0] for i in path.poses], [i[1] for i in path.poses], 'k')\n plt.savefig(\"./logs/singleSourceGoal.png\")\n\ndef goalStateIterationCircle():\n #circle\n dT = 0.001\n nIteration = 6\n initialState = [0.0, 0, 0.5*np.pi, 1, 0]\n finalStates = [[2*(np.cos(i)), 2*(np.sin(i)), i, 1, 0] for i in np.linspace(0,(2-1/nIteration)*np.pi,nIteration-1)]\n vConstraints = [1.0, -1.0, 2.0, -2.0, 5.0, -5.0]\n kConstraints = [0.785, -0.785, 30.0, -30.0, 30.0, -30.0]\n headlandSpeed = vConstraints[0]\n headlandSpeedReverse = vConstraints[1]\n plt.figure()\n plt.clf()\n plt.title(\"Goal State Iteration\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n\n for finalState in finalStates:\n path = planShortest(kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse, initialState, finalState, dT)\n plt.plot([i[0] for i in path.poses], [i[1] for i in path.poses])\n\n plt.savefig(\"./logs/goalStateIterationCircle.png\")\n\ndef goalStateIterationLine():\n #line\n dT = 0.001\n nIteration = 6\n initialState = [0.0, -1.0, 0.5*np.pi, 1, 0]\n finalStates = [[1+0.5*i, 2.0, 0.5*np.pi, 1, 0] for i in range(nIteration)]\n vConstraints = [1.0, -1.0, 2.0, -2.0, 5.0, -5.0]\n kConstraints = [0.785, -0.785, 30.0, -30.0, 30.0, -30.0]\n headlandSpeed = vConstraints[0]\n headlandSpeedReverse = vConstraints[1]\n plt.figure()\n plt.clf()\n plt.title(\"Goal State Iteration\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n\n for finalState in finalStates:\n path = planShortest(kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse, initialState, finalState, dT)\n plt.plot([i[0] for i in path.poses], [i[1] for i in path.poses])\n\n plt.savefig(\"./logs/goalStateIterationLine.png\")\n\ndef goalStateIterationOrient():\n #final orientation\n\n dT = 0.001\n nIteration = 12\n initialState = [0.0, -1.0, 0.5*np.pi, 1, 0]\n finalStates = [[1, 0.0, i, 1, 0] for i in np.linspace(0,(2-1/nIteration)*np.pi,nIteration-1)]\n vConstraints = [1.0, -1.0, 2.0, -2.0, 5.0, -5.0]\n kConstraints = [0.785, -0.785, 30.0, -30.0, 30.0, -30.0]\n headlandSpeed = vConstraints[0]\n headlandSpeedReverse = vConstraints[1]\n plt.figure()\n plt.clf()\n plt.title(\"Goal State Iteration\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n\n for finalState in finalStates:\n path = planShortest(kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse, initialState, finalState, dT)\n plt.plot([i[0] for i in path.poses], [i[1] for i in path.poses])\n\n plt.savefig(\"./logs/goalStateIterationOrient.png\")\n\n\ndef goalStateIterationCurvature():\n #final orientation\n\n dT = 0.001\n initialState = [0.0, -1.0, 0.5*np.pi, 1, 0]\n finalStates = [[1, 0.0, -0.5*np.pi, 1, 0] for i in np.linspace(-0.785,0.785,5)]\n vConstraints = [1.0, -1.0, 2.0, -2.0, 5.0, -5.0]\n kConstraints = [0.785, -0.785, 30.0, -30.0, 30.0, -30.0]\n headlandSpeed = vConstraints[0]\n headlandSpeedReverse = vConstraints[1]\n plt.figure()\n plt.clf()\n plt.title(\"Goal State Iteration\")\n plt.xlabel(\"x (m)\")\n plt.ylabel(\"y (m)\")\n\n for finalState in finalStates:\n path = planShortest(kConstraints, vConstraints, headlandSpeed, headlandSpeedReverse, initialState, finalState, dT)\n plt.plot([i[0] for i in path.poses], [i[1] for i in path.poses])\n\n plt.savefig(\"./logs/goalStateIterationCurvature.png\")\n\ndef main():\n\n #goalStateIterationLine()\n #goalStateIterationCircle()\n #goalStateIterationOrient()\n #goalStateIterationCurvature()\n testSingleSourceGoal()\n\nmain()" ]
[ [ "matplotlib.pyplot.title", "numpy.linspace", "matplotlib.pyplot.figure", "numpy.cos", "matplotlib.pyplot.savefig", "numpy.sin", "matplotlib.pyplot.plot", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kirakira666/ZENNKU
[ "a62905e21838967af9f16e55d954e130f596f9e3" ]
[ "examples/run_pre_train.py" ]
[ "# coding: utf-8\n# Copyright 2019 Sinovation Ventures AI Institute\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PyTorch pretrain for ZEN model.\"\"\"\nimport sys\nsys.path.append(\"..\")\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport os\nimport torch\nimport logging\nimport json\nimport random\nimport numpy as np\nfrom collections import namedtuple\nimport time\nimport datetime\n\nfrom torch.utils.data import DataLoader, Dataset, RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm\n\nfrom ZEN import WEIGHTS_NAME, CONFIG_NAME\nfrom ZEN import ZenConfig, ZenForPreTraining\nfrom ZEN import BertTokenizer\nfrom ZEN import BertAdam, WarmupLinearSchedule\n\nInputFeatures = namedtuple(\n \"InputFeatures\",\n \"input_ids input_mask segment_ids lm_label_ids is_next ngram_ids ngram_masks ngram_positions ngram_starts \"\n \"ngram_lengths ngram_segment_ids\")\n\nlog_format = '%(asctime)-10s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=log_format)\n\n\ndef convert_example_to_features(example, tokenizer, max_seq_length, max_ngram_in_sequence):\n tokens = example[\"tokens\"]\n segment_ids = example[\"segment_ids\"]\n is_random_next = example[\"is_random_next\"]\n masked_lm_positions = example[\"masked_lm_positions\"]\n masked_lm_labels = example[\"masked_lm_labels\"]\n\n # add ngram level information\n ngram_ids = example[\"ngram_ids\"]\n ngram_positions = example[\"ngram_positions\"]\n ngram_lengths = example[\"ngram_lengths\"]\n ngram_segment_ids = example[\"ngram_segment_ids\"]\n\n assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n # print(input_ids)\n masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)\n\n input_array = np.zeros(max_seq_length, dtype=np.int)\n input_array[:len(input_ids)] = input_ids\n\n mask_array = np.zeros(max_seq_length, dtype=np.bool)\n mask_array[:len(input_ids)] = 1\n\n segment_array = np.zeros(max_seq_length, dtype=np.bool)\n segment_array[:len(segment_ids)] = segment_ids\n\n lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)\n lm_label_array[masked_lm_positions] = masked_label_ids\n\n # add ngram pads\n ngram_id_array = np.zeros(max_ngram_in_sequence, dtype=np.int)\n ngram_id_array[:len(ngram_ids)] = ngram_ids\n\n # record the masked positions\n\n # The matrix here take too much space either in disk or in memory, so the usage have to be lazily convert the\n # the start position and length to the matrix at training time.\n\n ngram_positions_matrix = np.zeros(shape=(max_seq_length, max_ngram_in_sequence), dtype=np.bool)\n for i in range(len(ngram_ids)):\n ngram_positions_matrix[ngram_positions[i]:ngram_positions[i]+ngram_lengths[i], i] = 1\n\n ngram_start_array = np.zeros(max_ngram_in_sequence, dtype=np.int32)\n ngram_start_array[:len(ngram_ids)] = ngram_positions\n\n ngram_length_array = np.zeros(max_ngram_in_sequence, dtype=np.int32)\n ngram_length_array[:len(ngram_ids)] = ngram_lengths\n\n ngram_mask_array = np.zeros(max_ngram_in_sequence, dtype=np.bool)\n ngram_mask_array[:len(ngram_ids)] = 1\n\n ngram_segment_array = np.zeros(max_ngram_in_sequence, dtype=np.bool)\n ngram_segment_array[:len(ngram_ids)] = ngram_segment_ids\n features = InputFeatures(input_ids=input_array,\n input_mask=mask_array,\n segment_ids=segment_array,\n lm_label_ids=lm_label_array,\n is_next=is_random_next,\n ngram_ids=ngram_id_array,\n ngram_masks=ngram_mask_array,\n ngram_positions=ngram_positions_matrix,\n ngram_starts=ngram_start_array,\n ngram_lengths=ngram_length_array,\n ngram_segment_ids=ngram_segment_array)\n return features\n\n\nclass PregeneratedDataset(Dataset):\n def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False, fp16=False):\n self.vocab = tokenizer.vocab\n self.tokenizer = tokenizer\n self.epoch = epoch\n self.data_epoch = epoch % num_data_epochs\n data_file = training_path / f\"epoch_{self.data_epoch}.json\"\n metrics_file = training_path / f\"epoch_{self.data_epoch}_metrics.json\"\n assert data_file.is_file() and metrics_file.is_file()\n metrics = json.loads(metrics_file.read_text())\n num_samples = metrics['num_training_examples']\n seq_len = metrics['max_seq_len']\n max_ngram_in_sequence = metrics['max_ngram_in_sequence']\n self.temp_dir = None\n self.working_dir = None\n self.fp16 = fp16\n if reduce_memory:\n self.temp_dir = \"/tmp\"\n # TemporaryDirectory()\n self.working_dir = Path(self.temp_dir)\n input_ids = np.memmap(filename=self.working_dir / 'input_ids.memmap',\n mode='w+', dtype=np.int32, shape=(num_samples, seq_len))\n input_masks = np.memmap(filename=self.working_dir / 'input_masks.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n segment_ids = np.memmap(filename=self.working_dir / 'segment_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.bool)\n lm_label_ids = np.memmap(filename=self.working_dir / 'lm_label_ids.memmap',\n shape=(num_samples, seq_len), mode='w+', dtype=np.int32)\n lm_label_ids[:] = -1\n is_nexts = np.memmap(filename=self.working_dir / 'is_nexts.memmap',\n shape=(num_samples,), mode='w+', dtype=np.bool)\n # add ngram level features\n ngram_ids = np.memmap(filename=self.working_dir / 'ngram_ids.memmap',\n mode='w+', dtype=np.int32, shape=(num_samples, max_ngram_in_sequence))\n\n ngram_masks = np.memmap(filename=self.working_dir / 'ngram_masks.memmap',\n mode='w+', dtype=np.bool, shape=(num_samples, max_ngram_in_sequence))\n\n ngram_positions = np.memmap(filename=self.working_dir / 'ngram_positions.memmap',\n mode='w+', dtype=np.bool, shape=(num_samples, seq_len, max_ngram_in_sequence))\n\n ngram_starts = np.memmap(filename=self.working_dir / 'ngram_starts.memmap',\n mode='w+', dtype=np.int32, shape=(num_samples, max_ngram_in_sequence))\n\n ngram_lengths = np.memmap(filename=self.working_dir / 'ngram_lengths.memmap',\n mode='w+', dtype=np.int32, shape=(num_samples, max_ngram_in_sequence))\n\n ngram_segment_ids = np.memmap(filename=self.working_dir / 'ngram_segment_ids.memmap',\n mode='w+', dtype=np.bool, shape=(num_samples, max_ngram_in_sequence))\n\n else:\n input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)\n input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)\n lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)\n is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)\n # add ngram level features\n\n ngram_ids = np.zeros(shape=(num_samples, max_ngram_in_sequence), dtype=np.int32)\n ngram_masks = np.zeros(shape=(num_samples, max_ngram_in_sequence), dtype=np.bool)\n\n ngram_positions = np.zeros(shape=(num_samples, seq_len, max_ngram_in_sequence), dtype=np.bool)\n ngram_starts = np.zeros(shape=(num_samples, max_ngram_in_sequence), dtype=np.int32)\n ngram_lengths = np.zeros(shape=(num_samples, max_ngram_in_sequence), dtype=np.int32)\n\n ngram_segment_ids = np.zeros(shape=(num_samples, max_ngram_in_sequence), dtype=np.bool)\n\n logging.info(f\"Loading training examples for epoch {epoch}\")\n with data_file.open() as f:\n for i, line in enumerate(tqdm(f, total=num_samples, desc=\"Training examples\")):\n line = line.strip()\n example = json.loads(line)\n features = convert_example_to_features(example, tokenizer, seq_len, max_ngram_in_sequence)\n input_ids[i] = features.input_ids\n segment_ids[i] = features.segment_ids\n input_masks[i] = features.input_mask\n lm_label_ids[i] = features.lm_label_ids\n is_nexts[i] = features.is_next\n # add ngram related ids\n ngram_ids[i] = features.ngram_ids\n ngram_masks[i] = features.ngram_masks\n ngram_positions[i] = features.ngram_positions\n ngram_starts[i] = features.ngram_starts\n ngram_lengths[i] = features.ngram_lengths\n ngram_segment_ids[i] = features.ngram_segment_ids\n\n assert i == num_samples - 1 # Assert that the sample count metric was true\n logging.info(\"Loading complete!\")\n self.num_samples = num_samples\n self.seq_len = seq_len\n self.input_ids = input_ids\n self.input_masks = input_masks\n self.segment_ids = segment_ids\n self.lm_label_ids = lm_label_ids\n self.is_nexts = is_nexts\n self.ngram_ids = ngram_ids\n self.ngram_masks = ngram_masks\n self.ngram_positions = ngram_positions\n self.ngram_segment_ids = ngram_segment_ids\n self.ngram_starts = ngram_starts\n self.ngram_lengths = ngram_lengths\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, item):\n\n position = torch.tensor(self.ngram_positions[item].astype(np.double))\n if self.fp16:\n position = position.half()\n else:\n position = position.float()\n\n return (torch.tensor(self.input_ids[item].astype(np.int64)),\n torch.tensor(self.input_masks[item].astype(np.int64)),\n torch.tensor(self.segment_ids[item].astype(np.int64)),\n torch.tensor(self.lm_label_ids[item].astype(np.int64)),\n torch.tensor(self.is_nexts[item].astype(np.int64)),\n torch.tensor(self.ngram_ids[item].astype(np.int64)),\n torch.tensor(self.ngram_masks[item].astype(np.int64)),\n position,\n torch.tensor(self.ngram_starts[item].astype(np.int64)),\n torch.tensor(self.ngram_lengths[item].astype(np.int64)),\n torch.tensor(self.ngram_segment_ids[item].astype(np.int64)))\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('--pregenerated_data', type=Path, required=True)\n parser.add_argument('--output_dir', type=Path, required=True)\n parser.add_argument(\"--bert_model\", type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\n parser.add_argument(\"--do_lower_case\", action=\"store_true\")\n parser.add_argument(\"--reduce_memory\", action=\"store_true\",\n help=\"Store training data as on-disc memmaps to massively reduce memory usage\")\n\n parser.add_argument(\"--epochs\", type=int, default=3, help=\"Number of epochs to train for\")\n parser.add_argument(\"--local_rank\",\n type=int,\n default=-1,\n help=\"local_rank for distributed training on gpus\")\n parser.add_argument(\"--no_cuda\",\n action='store_true',\n help=\"Whether not to use CUDA when available\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--train_batch_size\",\n default=32,\n type=int,\n help=\"Total batch size for training.\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead of 32-bit\")\n parser.add_argument('--scratch',\n action='store_true',\n help=\"Whether to train from scratch\")\n parser.add_argument('--loss_scale',\n type=float, default=0,\n help=\"Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\\n\"\n \"0 (default value): dynamic loss scaling.\\n\"\n \"Positive power of 2: static loss scaling value.\\n\")\n parser.add_argument(\"--warmup_proportion\",\n default=0.1,\n type=float,\n help=\"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10%% of training.\")\n parser.add_argument(\"--learning_rate\",\n default=3e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--save_name',\n type=str,\n default=\"zen\",\n help=\"The prefix used for saving the remote model\")\n parser.add_argument(\"--already_trained_epoch\",\n default=0,\n type=int)\n\n args = parser.parse_args()\n\n assert args.pregenerated_data.is_dir(), \\\n \"--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!\"\n\n samples_per_epoch = []\n for i in range(args.epochs):\n epoch_file = args.pregenerated_data / f\"epoch_{i}.json\"\n metrics_file = args.pregenerated_data / f\"epoch_{i}_metrics.json\"\n if epoch_file.is_file() and metrics_file.is_file():\n metrics = json.loads(metrics_file.read_text()) # 将字符串转化为字典\n samples_per_epoch.append(metrics['num_training_examples']) # 训练实例的数目\n else:\n if i == 0:\n exit(\"No training data was found!\")\n print(f\"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).\")\n print(\"This script will loop over the available data, but training diversity may be negatively impacted.\")\n num_data_epochs = i\n break\n else:\n num_data_epochs = args.epochs # epoch的数目\n\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n n_gpu = torch.cuda.device_count()\n else:\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n n_gpu = 1\n # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.distributed.init_process_group(backend='nccl')\n logging.info(\"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}\".format(\n device, n_gpu, bool(args.local_rank != -1), args.fp16))\n\n if args.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, should be >= 1\".format(\n args.gradient_accumulation_steps))\n\n args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps\n\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n print(args.bert_model)\n tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)\n\n total_train_examples = 0 # 所有epochs加起来总的训练实例数目\n for i in range(args.epochs):\n # The modulo takes into account the fact that we may loop over limited epochs of data\n total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]\n\n num_train_optimization_steps = int(\n total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)\n if args.local_rank != -1:\n num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()\n\n if args.scratch:\n config = ZenConfig(21128, 104089)\n model = ZenForPreTraining(config)\n else:\n model = ZenForPreTraining.from_pretrained(args.bert_model)\n\n if args.fp16:\n model.half()\n model.to(device)\n if args.local_rank != -1:\n try:\n from apex.parallel import DistributedDataParallel as DDP\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n model = DDP(model)\n elif n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n\n if args.fp16:\n try:\n from apex.optimizers import FP16_Optimizer\n from apex.optimizers import FusedAdam\n except ImportError:\n raise ImportError(\n \"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.\")\n\n optimizer = FusedAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n bias_correction=False,\n max_grad_norm=1.0)\n if args.loss_scale == 0:\n optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)\n else:\n optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)\n warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n else:\n optimizer = BertAdam(optimizer_grouped_parameters,\n lr=args.learning_rate,\n warmup=args.warmup_proportion,\n t_total=num_train_optimization_steps)\n\n global_step = 0\n logging.info(\"***** Running training *****\")\n logging.info(\" Num examples = %d\", total_train_examples)\n logging.info(\" Batch size = %d\", args.train_batch_size)\n logging.info(\" Num steps = %d\", num_train_optimization_steps)\n model.train()\n for epoch in range(args.epochs):\n\n epoch_dataset = PregeneratedDataset(epoch=epoch,\n training_path=args.pregenerated_data,\n tokenizer=tokenizer,\n num_data_epochs=num_data_epochs,\n reduce_memory=args.reduce_memory,\n fp16=args.fp16)\n if args.local_rank == -1:\n train_sampler = RandomSampler(epoch_dataset)\n else:\n train_sampler = DistributedSampler(epoch_dataset)\n train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n tr_loss = 0\n nb_tr_examples, nb_tr_steps = 0, 0\n with tqdm(total=len(train_dataloader), desc=f\"Epoch {epoch}\") as pbar:\n for step, batch in enumerate(train_dataloader):\n batch = tuple(t.to(device) for t in batch)\n input_ids, input_mask, segment_ids, lm_label_ids, is_next, ngram_ids, ngram_masks, ngram_positions, \\\n ngram_starts, \\\n ngram_lengths, ngram_segment_ids = batch\n\n loss = model(input_ids,\n ngram_ids,\n ngram_positions,\n segment_ids,\n ngram_segment_ids,\n input_mask,\n ngram_masks,\n lm_label_ids,\n is_next)\n\n if n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n if args.fp16:\n optimizer.backward(loss)\n else:\n loss.backward()\n tr_loss += loss.item()\n nb_tr_examples += input_ids.size(0)\n nb_tr_steps += 1\n pbar.update(1)\n mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps\n pbar.set_postfix_str(f\"Loss: {mean_loss:.5f}\")\n if (step + 1) % args.gradient_accumulation_steps == 0:\n if args.fp16:\n # modify learning rate with special warm up BERT uses\n # if args.fp16 is False, BertAdam is used that handles this automatically\n lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step, args.warmup_proportion)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n optimizer.step()\n optimizer.zero_grad()\n global_step += 1\n\n # Save a trained model\n ts = time.time()\n st = datetime.datetime.fromtimestamp(ts).strftime('%m%d%H%M%S')\n\n saving_path = args.output_dir\n\n saving_path = Path(os.path.join(saving_path, args.save_name + st + \"_epoch_\" + str(epoch + args.already_trained_epoch)))\n\n if saving_path.is_dir() and list(saving_path.iterdir()):\n logging.warning(f\"Output directory ({ saving_path }) already exists and is not empty!\")\n saving_path.mkdir(parents=True, exist_ok=True)\n\n logging.info(\"** ** * Saving fine-tuned model ** ** * \")\n model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\n output_model_file = os.path.join(saving_path, WEIGHTS_NAME)\n output_config_file = os.path.join(saving_path, CONFIG_NAME)\n\n torch.save(model_to_save.state_dict(), output_model_file)\n model_to_save.config.to_json_file(output_config_file)\n tokenizer.save_vocabulary(saving_path)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.distributed.init_process_group", "numpy.random.seed", "torch.cuda.set_device", "torch.utils.data.distributed.DistributedSampler", "torch.manual_seed", "numpy.memmap", "torch.utils.data.DataLoader", "torch.utils.data.RandomSampler", "numpy.full", "torch.nn.DataParallel", "torch.cuda.manual_seed_all", "torch.cuda.is_available", "torch.device", "torch.cuda.device_count", "numpy.zeros", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
broadinstitute/str-analysis
[ "f9b5499200d1fbe7cf99ad1ffc84d62056f39948" ]
[ "str_analysis/generate_gnomad_json.py" ]
[ "import argparse\nimport collections\nfrom datetime import datetime\nimport gzip\nimport hashlib\nimport json\nimport math\nimport os\nimport pandas as pd\nimport pkgutil\nimport pwd\nimport requests\nimport tqdm\n\nfrom str_analysis.utils.canonical_repeat_unit import compute_canonical_motif\n\n# Map STR locus ids to readable names for STR loci that are adjacent to the main known pathogenic loci\nfrom str_analysis.utils.export_json import export_json\nfrom str_analysis.utils.known_pathogenic_strs_tsv import parse_known_pathogenic_strs_tsv\n\nADJACENT_REPEAT_LABELS = {\n \"ATXN7_GCC\": \"Adjacent Right STR\",\n \"ATXN8OS_CTA\": \"Adjacent Left STR\",\n \"HTT_CCG\": \"Adjacent Right STR\",\n \"FXN_A\": \"Adjacent Left Homopolymer\",\n \"CNBP_CAGA\": \"Adjacent Right STR #1\",\n \"CNBP_CA\": \"Adjacent Right STR #2\",\n \"NOP56_CGCCTG\": \"Adjacent Right STR\",\n}\n\n# Map gene name to Ensembl gene id for genes that contain known pathogenic STRs\nGENE_NAME_TO_GENE_ID = {\n 'ATXN8': 'ENSG00000230223',\n 'AFF2': 'ENSG00000155966',\n 'AR': 'ENSG00000169083',\n 'ARX': 'ENSG00000004848',\n 'ATN1': 'ENSG00000111676',\n 'ATXN1': 'ENSG00000124788',\n 'ATXN10': 'ENSG00000130638',\n 'ATXN2': 'ENSG00000204842',\n 'ATXN3': 'ENSG00000066427',\n 'ATXN7': 'ENSG00000163635',\n 'BEAN1': 'ENSG00000166546',\n 'C9orf72': 'ENSG00000147894',\n 'CACNA1A': 'ENSG00000141837',\n 'CBL2': 'ENSG0000011039',\n 'CNBP': 'ENSG00000169714',\n 'COMP': 'ENSG00000105664',\n 'CSTB': 'ENSG00000160213',\n 'DAB1': 'ENSG00000173406',\n 'DIP2B': 'ENSG00000066084',\n 'DMD': 'ENSG00000198947',\n 'DMPK': 'ENSG00000104936',\n 'EIF4A3': 'ENSG00000141543',\n 'FMR1': 'ENSG00000102081',\n 'FOXL2': 'ENSG00000183770',\n 'FXN': 'ENSG00000165060',\n 'GIPC1': 'ENSG00000123159',\n 'GLS': 'ENSG00000115419',\n 'HOXA13': 'ENSG00000106031',\n 'HOXD13': 'ENSG00000128714',\n 'HTT': 'ENSG00000197386',\n 'JPH3': 'ENSG00000154118',\n 'LOC642361': 'ENSG00000272447',\n 'LRP12': 'ENSG00000147650',\n 'MARCHF6': 'ENSG00000145495',\n 'NIPA1': 'ENSG00000170113',\n 'NOP56': 'ENSG00000101361',\n 'NOTCH2NLC': 'ENSG00000286219',\n 'PABPN1': 'ENSG00000100836',\n 'PHOX2B': 'ENSG00000109132',\n 'PPP2R2B': 'ENSG00000156475',\n 'PRDM12': 'ENSG00000130711',\n 'PRNP': 'ENSG00000171867',\n 'RAPGEF2': 'ENSG00000109756',\n 'RFC1': 'ENSG00000035928',\n 'RUNX2': 'ENSG00000124813',\n 'SAMD12': 'ENSG00000177570',\n 'SOX3': 'ENSG00000134595',\n 'STARD7': 'ENSG00000084090',\n 'TBP': 'ENSG00000112592',\n 'TBX1': 'ENSG00000184058',\n 'TCF4': 'ENSG00000196628',\n 'TNRC6A': 'ENSG00000090905',\n 'VWA1': 'ENSG00000179403',\n 'XYLT1': 'ENSG00000103489',\n 'YEATS2': 'ENSG00000163872',\n 'ZIC2': 'ENSG00000043355',\n 'ZIC3': 'ENSG00000156925',\n}\n\n# Round ages to the nearest N years so that they can be shared publicly without increasing identifiability\nAGE_RANGE_SIZE = 5\n\n# Truncate the age distribution at this lower and upper bound.\nLOWER_AGE_CUTOFF = 20\nUPPER_AGE_CUTOFF = 80\n\n# Show age for not more than this many of the most expanded samples per locus for each sex/population bucket\nMAX_AGES_PER_BUCKET_TO_DISPLAY_IN_THE_READVIZ_SECTION = 100\n\n# Use this value instead of the age range for samples where age is not available or not shown.\nAGE_NOT_AVAILABLE = \"age_not_available\"\n\nPCR_INFO_NOT_AVAILABLE = \"pcr_info_not_available\"\n\n# Show age only for the these larger sub-populations to avoid increasing identifiability in smaller populations\nPOPULATIONS_WITH_AGE_DISPLAYED_IN_READVIZ_SECTION = {\"sas\", \"oth\", \"asj\", \"amr\", \"fin\", \"eas\", \"afr\", \"nfe\", \"mid\"}\n\n# Fraction of genotypes that can be missing for a locus before generating an error\nMISSING_GENOTYPES_ERROR_THRESHOLD = 0.01\n\n# Fraction of readviz images that can be missing for a locus before generating an error\nMISSING_READVIZ_ERROR_THRESHOLD = 0.04\n\n# Fraction of samples that can be missing age information before generating an error\nMISSING_AGE_THRESHOLD = 0.5\n\n# Fraction of samples that can be pcr-free/pcr-plus information before generating an error\nMISSING_PCR_PROTOCOL_THRESHOLD = 0.25\n\n# Expected number of known pathogenic repeats\nEXPECTED_N_KNOWN_PATHOGENIC_REPEATS = 59\n\n# Add this \"salt\" value to the sha512 hash to prevent dictionary attacks on the encrypted sample ids\nsalt = pwd.getpwuid(os.getuid()).pw_name\n\n\ndef parse_args():\n \"\"\"Parse command-line args, perform basic validation, and then return the args object.\"\"\"\n\n p = argparse.ArgumentParser()\n p.add_argument(\n \"--expansion-hunter-tsv\",\n default=\"~/code/str-analysis/local_files/gnomad_str_data/data/without_offtargets/combined_expansion_hunter.19243_json_files.variants.tsv\",\n help=\"Table generated by running python3 -m str_analysis.combine_expansionhunter_json_to_tsv on all samples \"\n \"called by ExpansionHunter.\"\n )\n p.add_argument(\n \"--non-ref-motif-tsv\",\n default=\"~/code/str-analysis/local_files/gnomad_str_data/data/without_offtargets/combined.173160_json_files.tsv\",\n help=\"Table generated by running python3 -m str_analysis.combine_json_to_tsv on all loci called by \"\n \"str_analysis.call_non_ref_pathogenic_motifs.\",\n )\n p.add_argument(\n \"--gnomad-metadata-tsv\",\n default=\"~/code/sample_metadata/metadata/gnomad_v3.1_metadata_v3.1.tsv.gz\",\n help=\"gnomAD metadata table path.\",\n )\n p.add_argument(\n \"--known-pathogenic-strs-tsv\",\n default=\"~/code/str-analysis/local_files/gnomad_str_data/known_pathogenic_strs.tsv\",\n help=\"Table of known pathogenic STRs.\",\n )\n grp = p.add_mutually_exclusive_group()\n grp.add_argument(\n \"--existing-readviz-filename-list\",\n help=\"A text file that lists all readviz .svg filenames that exist (one per line). These are the encrypted \"\n \"public filenames that don't contain sample ids - for example: ffa0880117e0791d51b0ef85b56f3a54216.svg\",\n )\n grp.add_argument(\n \"--no-readviz\",\n action=\"store_true\",\n help=\"Use this flag to indicate that readviz was not generated for this dataset.\",\n )\n\n p.add_argument(\n \"--output-dir\",\n default=\"gs://gnomad-browser/STRs\",\n help=\"Where to write output files. Supports local and Google storage (gs://) paths.\",\n )\n p.add_argument(\n \"--output-filename-suffix\",\n default=\"\",\n help=\"An optional label to append to all output filenames\",\n )\n args = p.parse_args()\n\n print(\"Args:\")\n for key, value in sorted(args.__dict__.items()):\n print(f\" {key} = {value}\")\n\n for path in args.expansion_hunter_tsv, args.non_ref_motif_tsv, args.gnomad_metadata_tsv, \\\n args.known_pathogenic_strs_tsv:\n if not os.path.isfile(os.path.expanduser(path)):\n p.error(f\"{path} file not found\")\n\n return args\n\n\ndef load_data_df(args):\n \"\"\"Load the tables specified by args.expansion_hunter_tsv, args.non_ref_motif_tsv, and args.gnomad_metadata_tsv.\n Rename and select relevant columns, combine the tables, then return a single combined table.\n\n Args:\n args (argparse.Namespace): The argparse parsed arguments object.\n\n Return:\n pandas.DataFrame: The result of combining the 3 tables.\n \"\"\"\n\n print(f\"Loading {args.expansion_hunter_tsv}\")\n\n def split_by_forward_slash(expansion_hunter_call_repeat_unit):\n repeat_units = expansion_hunter_call_repeat_unit.split(\"/\")\n return repeat_units[0].strip(), repeat_units[-1].strip()\n\n def process_sample_id(sample_id):\n sample_id = sample_id.replace(\"RP-1400::\", \"\").replace(\"v3.1::\", \"\")\n return sample_id.strip().replace(\" \", \"_\").replace(\"-\", \"_\").split(\".\")[0].split(\"_SM_\")[0]\n\n # Parse ExpansionHunter tsv\n df = pd.read_table(args.expansion_hunter_tsv)\n df.loc[:, \"SampleId\"] = df.SampleId.apply(process_sample_id)\n df.loc[:, \"Motif: Allele 1\"] = df[\"RepeatUnit\"]\n df.loc[:, \"Motif: Allele 2\"] = df[\"RepeatUnit\"]\n df.loc[:, \"ReadvizFilename\"] = None if args.no_readviz else df[\"SampleId\"] + \".\" + df[\"LocusId\"] + \".svg\"\n df = df[[\n \"SampleId\", \"LocusId\", \"VariantId\", \"ReferenceRegion\",\n \"Motif: Allele 1\", \"Motif: Allele 2\",\n \"Num Repeats: Allele 1\", \"Num Repeats: Allele 2\",\n \"Genotype\", \"GenotypeConfidenceInterval\",\n \"RepeatUnit\", \"ReadvizFilename\",\n ]]\n\n # Parse the args.non_ref_motif_tsv generated by call_non_ref_pathogenic_motifs\n print(f\"Loading {args.non_ref_motif_tsv}\")\n non_ref_motifs_df = pd.read_table(args.non_ref_motif_tsv)\n non_ref_motifs_df = non_ref_motifs_df[~non_ref_motifs_df[\"expansion_hunter_call_genotype\"].isna()]\n\n non_ref_motifs_df[\"Motif: Allele 1\"], non_ref_motifs_df[\"Motif: Allele 2\"] = zip(\n *non_ref_motifs_df[\"expansion_hunter_call_repeat_unit\"].apply(split_by_forward_slash))\n\n non_ref_motifs_df.loc[:, \"Num Repeats: Allele 1\"], non_ref_motifs_df.loc[:, \"Num Repeats: Allele 2\"] = zip(\n *non_ref_motifs_df[\"expansion_hunter_call_genotype\"].apply(split_by_forward_slash))\n\n non_ref_motifs_df.loc[:, \"SampleId\"] = non_ref_motifs_df.sample_id.apply(process_sample_id)\n non_ref_motifs_df.loc[:, \"LocusId\"] = non_ref_motifs_df[\"locus_id\"]\n non_ref_motifs_df.loc[:, \"VariantId\"] = non_ref_motifs_df[\"locus_id\"]\n non_ref_motifs_df.loc[:, \"ReferenceRegion\"] = non_ref_motifs_df[\"locus_coords\"]\n non_ref_motifs_df.loc[:, \"Genotype\"] = non_ref_motifs_df[\"expansion_hunter_call_genotype\"]\n non_ref_motifs_df.loc[:, \"GenotypeConfidenceInterval\"] = non_ref_motifs_df[\"expansion_hunter_call_CI\"]\n non_ref_motifs_df.loc[:, \"RepeatUnit\"] = None # will be set later\n non_ref_motifs_df.loc[:, \"ReadvizFilename\"] = None if args.no_readviz else non_ref_motifs_df[\"expansion_hunter_call_reviewer_svg\"]\n non_ref_motifs_df = non_ref_motifs_df[[\n \"SampleId\", \"LocusId\", \"VariantId\", \"ReferenceRegion\",\n \"Motif: Allele 1\", \"Motif: Allele 2\",\n \"Num Repeats: Allele 1\", \"Num Repeats: Allele 2\",\n \"Genotype\", \"GenotypeConfidenceInterval\",\n \"RepeatUnit\", \"ReadvizFilename\",\n ]]\n\n df = df[~df[\"LocusId\"].isin(set(non_ref_motifs_df[\"LocusId\"]))]\n df = pd.concat([df, non_ref_motifs_df])\n\n # Parse gnomAD metadata tsv\n print(f\"Loading {args.gnomad_metadata_tsv}\")\n gnomad_df = pd.read_table(args.gnomad_metadata_tsv)\n gnomad_df = gnomad_df[gnomad_df.release]\n gnomad_df.loc[:, \"age\"] = gnomad_df[\"project_meta.age\"].fillna(gnomad_df[\"project_meta.age_alt\"])\n gnomad_df[\"age\"].fillna(AGE_NOT_AVAILABLE, inplace=True)\n\n gnomad_df.loc[:, \"pcr_free\"] = gnomad_df[\"project_meta.product\"].apply(\n lambda s: pd.NA if not s or pd.isna(s) else (True if \"pcr-free\" in s.lower() else False), convert_dtype=\"boolean\")\n gnomad_df[\"pcr_free\"].fillna(gnomad_df[\"project_meta.v2_pcr_free\"].astype(\"boolean\"), inplace=True)\n gnomad_df[\"pcr_free\"].fillna(PCR_INFO_NOT_AVAILABLE, inplace=True)\n gnomad_df.loc[:, \"pcr_protocol\"] = gnomad_df[\"pcr_free\"].replace({True: \"pcr_free\", False: \"pcr_plus\"})\n\n gnomad_df = gnomad_df[[\n \"s\", \"population_inference.pop\", \"sex_imputation.sex_karyotype\",\n \"age\", \"pcr_protocol\",\n ]]\n gnomad_df.loc[:, \"s\"] = gnomad_df.s.apply(process_sample_id)\n\n unknown_sample_ids = set(df.SampleId) - set(gnomad_df.s)\n if len(unknown_sample_ids) > 0:\n print(f\"WARNING: Dropping {len(unknown_sample_ids)} sample ids in {args.expansion_hunter_tsv} that \"\n f\"were not found in the gnomAD metadata table, or were found but are not 'release': \", unknown_sample_ids)\n\n # Merge the data frames\n print(f\"Combining STR data tables with gnomAD metadata\")\n df = pd.merge(left=df, right=gnomad_df, how=\"inner\", left_on=\"SampleId\", right_on=\"s\").drop(columns=\"s\")\n\n print(f\"Found {len(set(df.SampleId))} gnomAD 'release' samples\")\n locus_id_with_max_samples = None\n max_num_samples = 0\n for locus_id in sorted(set(df.LocusId)):\n num_samples = len(set(df[df.LocusId == locus_id].SampleId))\n if num_samples > max_num_samples:\n locus_id_with_max_samples = locus_id\n max_num_samples = num_samples\n\n print(f\"Found {num_samples} {locus_id} 'release' samples\")\n\n if locus_id_with_max_samples is not None:\n missing_age_counter = sum(df[df.LocusId == locus_id_with_max_samples][\"age\"] == AGE_NOT_AVAILABLE)\n message = (f\"missing age for {missing_age_counter} out of {max_num_samples} \"\n f\"({100*missing_age_counter/max_num_samples:0.1f}%) individuals\")\n if missing_age_counter/max_num_samples > MISSING_AGE_THRESHOLD:\n raise ValueError(f\"ERROR: {message}\")\n print(f\"WARNING: {message}\")\n\n missing_pcr_protocol_counter = sum(df[df.LocusId == locus_id_with_max_samples][\"pcr_protocol\"] == \"pcr_plus\")\n message = (f\"missing pcr_protocol for {missing_pcr_protocol_counter} out of {max_num_samples} \" \n f\"({100*missing_pcr_protocol_counter/max_num_samples:0.1f}%) individuals\")\n if missing_pcr_protocol_counter/max_num_samples > MISSING_PCR_PROTOCOL_THRESHOLD:\n raise ValueError(f\"ERROR: {message}\")\n print(f\"WARNING: {message}\")\n\n return df\n\n\ndef init_gnomad_json(df):\n \"\"\"Compute an initial .json structure with a key for each STR locus. Initialize sub-dictionaries that will hold\n the allele count histogram and scatter plot counts for each locus.\n\n Args:\n df (pandas.DataFrame): Combined DataFrame generated by load_data_df(..)\n\n Return:\n dict: An initial version of the main .json structure being generated by this script.\n \"\"\"\n\n # Compute the STR loci\n df = df[[\"LocusId\", \"VariantId\", \"ReferenceRegion\", \"RepeatUnit\"]]\n df = df.drop_duplicates()\n\n # Init sub-dictionaries for each locus\n gnomad_json = {}\n for _, row in tqdm.tqdm(df.iterrows(), unit=\" rows\", total=len(df)):\n locus_id = row[\"LocusId\"]\n variant_id = row[\"VariantId\"]\n adjacent_repeat_label = ADJACENT_REPEAT_LABELS[variant_id] if variant_id in ADJACENT_REPEAT_LABELS else None\n\n if locus_id not in gnomad_json:\n gnomad_json[locus_id] = {\n \"LocusId\": locus_id,\n }\n\n repeat_specific_fields = {\n \"ReferenceRegion\": row[\"ReferenceRegion\"],\n \"ReferenceRepeatUnit\": row.get(\"RepeatUnit\"),\n \"AlleleCountHistogram\": {},\n \"AlleleCountScatterPlot\": {},\n \"AgeDistribution\": {},\n }\n\n if adjacent_repeat_label is not None:\n if \"AdjacentRepeats\" not in gnomad_json[locus_id]:\n gnomad_json[locus_id][\"AdjacentRepeats\"] = {}\n\n if variant_id not in gnomad_json[locus_id][\"AdjacentRepeats\"]:\n gnomad_json[locus_id][\"AdjacentRepeats\"][adjacent_repeat_label] = repeat_specific_fields\n else:\n gnomad_json[locus_id].update(repeat_specific_fields)\n\n return gnomad_json\n\n\ndef add_gene_ids(gnomad_json):\n \"\"\"Add the GeneId field to gnomad_json.\n\n Args:\n gnomad_json (dict): The main .json structure being generated by this script.\n \"\"\"\n for locus_id in gnomad_json:\n gene_name = gnomad_json[locus_id][\"GeneName\"]\n if gene_name in GENE_NAME_TO_GENE_ID:\n gnomad_json[locus_id][\"GeneId\"] = GENE_NAME_TO_GENE_ID[gene_name]\n continue\n\n # Get gene id via the Ensembl API.\n response = None\n while response is None or not response.ok or not response.json():\n print(f\"Getting gene id for {gene_name}\")\n request_url = f\"https://rest.ensembl.org/lookup/symbol/homo_sapiens/{gene_name}\"\n request_url += \"?content-type=application/json;expand=1\"\n response = requests.get(request_url)\n\n response_json = response.json()\n if not response_json.get('id'):\n print(\"Unable to get ensembl details for\", gene_name)\n continue\n\n gene_id = response_json['id']\n gnomad_json[locus_id][\"GeneId\"] = gene_id\n\n\ndef add_known_pathogenic_STR_annotations(args, gnomad_json):\n \"\"\"Load the args.known_pathogenic_strs_tsv table and add metadata from it to gnomad_json.\n\n Args:\n args (argparse.Namespace): The argparse parsed arguments object.\n gnomad_json (dict): The main .json structure being generated by this script.\n \"\"\"\n\n known_pathogenic_strs_info = parse_known_pathogenic_strs_tsv(args.known_pathogenic_strs_tsv)\n if len(known_pathogenic_strs_info) != EXPECTED_N_KNOWN_PATHOGENIC_REPEATS:\n raise ValueError(f\"{args.known_pathogenic_strs_tsv} contains {len(known_pathogenic_strs_info)} pathogenic loci.\"\n f\" Expected {EXPECTED_N_KNOWN_PATHOGENIC_REPEATS} loci.\")\n locus_ids_without_annotations = set(gnomad_json.keys()) - set(known_pathogenic_strs_info)\n if locus_ids_without_annotations:\n raise ValueError(f\"LocusIds not found in known pathogenic STRs spreadsheet: {locus_ids_without_annotations}\")\n\n # Compute STRipy urls\n for locus_id in gnomad_json:\n stripy_name = locus_id\n stripy_url = f\"https://stripy.org/database/{stripy_name}\"\n r = requests.get(stripy_url)\n if r.ok and \"invalid locus\" not in r.content.decode(\"UTF-8\").lower():\n known_pathogenic_strs_info[locus_id][\"STRipyName\"] = stripy_name\n else:\n print(f\"WARNING: STRipy page not found for {locus_id}\")\n\n # Add the metadata to gnomad_json\n for locus_id in gnomad_json:\n gnomad_json[locus_id].update(known_pathogenic_strs_info[locus_id])\n\n\ndef compute_most_common_motif_lookup_dict(df):\n \"\"\"Create a lookup dictionary that maps (LocusId, canonical motif) pairs to the most common non-canonical motif\n among observed motifs that share this same canonical motif. This allows converting motif rearrangements such as\n AAAAG, AAAGA, AAGAA, etc. at the RFC1 locus into \"AAAAG\" which is the rearrangement that is seen most frequently\n in the general population. Similarly, for the HTT locus, \"AGC\", \"CAG\", and \"GCA\" would get converted to \"CAG\" since\n that's the only rearrangement that's seen in practice.\n\n Args:\n df (pandas.DataFrame): Combined DataFrame generated by load_data_df(..)\n\n Return:\n dict: A dictionary of the form {(\"RFC1\", \"AAAAG\"): \"AAAAG\", ...}\n \"\"\"\n\n # First, create a dictionary that maps each (LocusId, Motif) pair to the number of times it occurs in df.\n # Example entries: ('RFC1', 'GAAAG'): 805, ('RFC1', 'AAAGG'): 774, etc.\n motif_counts = pd.concat([\n df[[\"LocusId\", \"Motif: Allele 1\"]].rename(columns={\"Motif: Allele 1\": \"Motif\"}),\n df[[\"LocusId\", \"Motif: Allele 2\"]].rename(columns={\"Motif: Allele 2\": \"Motif\"}),\n ]).value_counts().to_dict()\n\n # Create a new dictionary that maps (LocusId, canonical motif) pairs to the most common non-canonical motif\n # observed among motifs that share the same canonical motif. Using the example from the previous comment, it would\n # map ('RFC1', 'AAAGG') to 'GAAAG' rather than 'AAAGG' because 'GAAAG' is observed 805 times while 'AAAGG' is only\n # observed 774 times in df.\n most_common_motif_lookup = {}\n for (locus_id, motif), counter in motif_counts.items():\n key = (locus_id, compute_canonical_motif(motif))\n if key not in most_common_motif_lookup:\n most_common_motif_lookup[key] = (motif, counter)\n continue\n\n previous_motif, previous_counter = most_common_motif_lookup[key]\n if previous_counter < counter:\n most_common_motif_lookup[key] = (motif, counter)\n\n # Drop the counter from the value\n most_common_motif_lookup = {key: motif for key, (motif, _) in most_common_motif_lookup.items()}\n\n return most_common_motif_lookup\n\n\ndef add_motif_classification_field(gnomad_json, most_common_motif_lookup):\n \"\"\"For repeats where the pathogenic motif differs from the reference motif, add info on which motifs are known to be\n disease-associated and which are benign.\n\n Args:\n gnomad_json (dict): The main .json structure being generated by this script.\n most_common_motif_lookup (dict): The dictionary generated by compute_most_common_motif_lookup_dict(..)\n \"\"\"\n\n non_ref_pathogenic_motif_info = json.loads(pkgutil.get_data(\"str_analysis\", \"data/locus_info.json\"))\n\n for locus_id in gnomad_json:\n gene_name = gnomad_json[locus_id][\"GeneName\"]\n if gene_name not in non_ref_pathogenic_motif_info:\n continue\n\n gnomad_json[locus_id][\"RepeatUnitClassification\"] = {}\n for classification, motifs in non_ref_pathogenic_motif_info[gene_name][\"Motifs\"].items():\n for motif in motifs:\n canonical_motif = compute_canonical_motif(motif)\n motif_key = most_common_motif_lookup.get((locus_id, canonical_motif))\n if motif_key is None:\n # If this known-benign or known-pathogenic motif wasn't detected in any gnomAD samples, just\n # include it as-is, the way it's recorded in data/locus_info.json\n motif_key = motif\n gnomad_json[locus_id][\"RepeatUnitClassification\"][motif_key] = classification\n\n\ndef add_histograms_and_compute_readviz_paths(df, gnomad_json, most_common_motif_lookup, no_readviz_images=False):\n \"\"\"Populate the AlleleCountHistogram, AlleleCountScatterPlot and AgeDistribution. Also, compute encrypted readviz\n paths and add these & other metadata to readviz_json.\n\n Args:\n df (pandas.DataFrame): Combined DataFrame generated by load_data_df(..)\n gnomad_json (dict): The main .json structure being generated by this script.\n most_common_motif_lookup (dict): The dictionary generated by compute_most_common_motif_lookup_dict(..)\n no_readviz_images (bool): If True, this method will assume the REViewer images were not generated for this dataset.\n\n Return:\n (list, dict): 2-tuple containing (readviz_paths_to_rename, readviz_json) where\n readviz_paths_to_rename is a list of 2-tuples that matches the original readviz svg filename with the\n corresponding encrypted filename that can be made public.\n readviz_json is the .json data structure that will be loaded into the gnomAD browser to generate the\n readviz section of the STR pages. It contains the encrypted readviz filename and associated metadata\n for each sample.\n \"\"\"\n\n readviz_paths_to_rename = set()\n readviz_json = {}\n age_counter = collections.defaultdict(int)\n\n df = df.sort_values([\"Num Repeats: Allele 2\", \"Num Repeats: Allele 1\", \"Motif: Allele 2\", \"Motif: Allele 1\"], ascending=False)\n for _, row in tqdm.tqdm(df.iterrows(), unit=\" rows\", total=len(df)):\n locus_id = row[\"LocusId\"]\n variant_id = row[\"VariantId\"]\n\n is_adjacent_repeat = variant_id in ADJACENT_REPEAT_LABELS\n adjacent_repeat_label = ADJACENT_REPEAT_LABELS[variant_id] if is_adjacent_repeat else None\n canonical_motif1 = compute_canonical_motif(row[\"Motif: Allele 1\"])\n canonical_motif2 = compute_canonical_motif(row[\"Motif: Allele 2\"])\n motif1 = most_common_motif_lookup[locus_id, canonical_motif1]\n motif2 = most_common_motif_lookup[locus_id, canonical_motif2]\n\n # Get gnomAD fields\n sex_karyotype = row[\"sex_imputation.sex_karyotype\"]\n population = row[\"population_inference.pop\"]\n pcr_protocol = row[\"pcr_protocol\"]\n\n # Compute age_range\n if row[\"age\"] == AGE_NOT_AVAILABLE:\n age_range = AGE_NOT_AVAILABLE\n else:\n age = int(row[\"age\"])\n age_lower_bound = AGE_RANGE_SIZE * math.floor(age/AGE_RANGE_SIZE)\n age_upper_bound = AGE_RANGE_SIZE * math.ceil((age + 0.1)/AGE_RANGE_SIZE)\n assert age_lower_bound != age_upper_bound\n if age_upper_bound <= LOWER_AGE_CUTOFF:\n age_range = f\"<{LOWER_AGE_CUTOFF}\"\n elif age_lower_bound >= UPPER_AGE_CUTOFF:\n age_range = f\">{UPPER_AGE_CUTOFF}\"\n else:\n age_range = f\"{age_lower_bound}-{age_upper_bound}\"\n\n age_range_to_show_in_readviz_section = AGE_NOT_AVAILABLE\n if (population in POPULATIONS_WITH_AGE_DISPLAYED_IN_READVIZ_SECTION\n and age_counter[locus_id, sex_karyotype] < MAX_AGES_PER_BUCKET_TO_DISPLAY_IN_THE_READVIZ_SECTION):\n age_counter[locus_id, sex_karyotype] += 1\n age_range_to_show_in_readviz_section = age_range\n\n # Get num_repeats1, num_repeats2\n try:\n num_repeats1 = int(float(row[\"Num Repeats: Allele 1\"]))\n num_repeats2 = float(row[\"Num Repeats: Allele 2\"])\n except ValueError as e:\n print(\"Num Repeats parse error\", e, row[\"Genotype\"], row[\"GenotypeConfidenceInterval\"], \". Skipping..\")\n continue\n\n if sex_karyotype == \"XY\" and \"X\" in row[\"ReferenceRegion\"]:\n is_hemizygous = True\n if math.isnan(num_repeats2) or num_repeats2 == num_repeats1:\n num_repeats2 = num_repeats1\n else:\n print(f\"ERROR: Locus is male and on chrX, but has different values for allele1, allele2: {row.to_dict()}\")\n continue\n else:\n is_hemizygous = False\n\n num_repeats2 = int(num_repeats2)\n\n # Update histogram and scatter plot counts\n histogram_key1 = f\"{population}/{sex_karyotype}/{motif1}\"\n histogram_key2 = f\"{population}/{sex_karyotype}/{motif2}\"\n scatter_plot_key = f\"{population}/{sex_karyotype}/{motif1}/{motif2}\"\n age_distribution_key = f\"{age_range}\"\n\n if is_adjacent_repeat:\n data_dict = gnomad_json[locus_id][\"AdjacentRepeats\"][adjacent_repeat_label]\n else:\n data_dict = gnomad_json[locus_id]\n\n for histogram_key in histogram_key1, histogram_key2:\n if histogram_key not in data_dict[\"AlleleCountHistogram\"]:\n data_dict[\"AlleleCountHistogram\"][histogram_key] = collections.defaultdict(int)\n\n data_dict[\"AlleleCountHistogram\"][histogram_key1][f\"{num_repeats1}\"] += 1\n if not is_hemizygous:\n data_dict[\"AlleleCountHistogram\"][histogram_key2][f\"{num_repeats2}\"] += 1\n\n if scatter_plot_key not in data_dict[\"AlleleCountScatterPlot\"]:\n data_dict[\"AlleleCountScatterPlot\"][scatter_plot_key] = collections.defaultdict(int)\n data_dict[\"AlleleCountScatterPlot\"][scatter_plot_key][f\"{num_repeats1}/{num_repeats2}\"] += 1\n\n if age_range != AGE_NOT_AVAILABLE:\n if age_distribution_key not in data_dict[\"AgeDistribution\"]:\n data_dict[\"AgeDistribution\"][age_distribution_key] = collections.defaultdict(int)\n for num_repeats in num_repeats1, num_repeats2:\n data_dict[\"AgeDistribution\"][age_distribution_key][f\"{num_repeats}\"] += 1\n\n # Update readviz metadata\n if not is_adjacent_repeat:\n encrypted_svg_prefix = hashlib.sha512(f\"{locus_id}_{row['SampleId']}_{salt}\".encode(\"UTF-8\")).hexdigest()\n # The sha digest is 128 letters long - which is too long for a filename. Use only the first 35 letters.\n encrypted_svg_filename = f\"{encrypted_svg_prefix[:35]}.svg\"\n\n if not no_readviz_images:\n original_svg_filename = row[\"ReadvizFilename\"]\n readviz_paths_to_rename.add((original_svg_filename, f\"{locus_id}/{encrypted_svg_filename}\"))\n\n if locus_id not in readviz_json:\n readviz_json[locus_id] = []\n\n readviz_json[locus_id].append({\n \"Allele1Motif\": motif1,\n \"Allele2Motif\": motif2,\n \"Allele1HistogramKey\": histogram_key1,\n \"Allele2HistogramKey\": histogram_key2 if not is_hemizygous else None,\n \"ScatterPlotKey\": scatter_plot_key,\n \"ScatterPlotX\": num_repeats2,\n \"ScatterPlotY\": num_repeats1,\n \"Sex\": sex_karyotype,\n \"Age\": age_range_to_show_in_readviz_section,\n \"Population\": population,\n \"PcrProtocol\": pcr_protocol,\n \"Genotype\": row[\"Genotype\"],\n \"GenotypeConfidenceInterval\": row[\"GenotypeConfidenceInterval\"],\n \"ReadvizFilename\": None if no_readviz_images else encrypted_svg_filename,\n })\n\n return list(readviz_paths_to_rename), readviz_json\n\n\ndef sort_keys(gnomad_json):\n \"\"\"Sort keys in the output json. Python built-in dictionaries preserve key order since python3.6, so this works.\n For example, for the \"AFF2\" locus this sorts keys so that\n\n {\n \"ReferenceRegion\": \"chrX:148500631-148500691\",\n \"ReferenceRepeatUnit\": \"GCC\",\n \"LocusId\": \"AFF2\",\n \"AgeDistribution\": {\n \"35-40\": {\n \"4\": 3,\n \"0\": 10,\n \"2\": 2,\n ...\n },\n \"20-25\": {\n \"5\": 5,\n \"6\": 12,\n \"0\": 10,\n \"2\": 2,\n \"4\": 3,\n ...\n },\n }\n }\n\n is converted to\n\n {\n \"LocusId\": \"AFF2\",\n \"ReferenceRegion\": \"chrX:148500631-148500691\",\n \"ReferenceRepeatUnit\": \"GCC\",\n \"AgeDistribution\": {\n \"20-25\": {\n \"0\": 10,\n \"2\": 2,\n \"4\": 3,\n \"5\": 5,\n \"6\": 12,\n ...\n },\n \"35-40\": {\n \"0\": 10,\n \"2\": 2,\n \"4\": 3,\n ...\n },\n }\n }\n\n Args:\n gnomad_json (dict): The main .json structure being generated by this script.\n \"\"\"\n\n def sort_by_key(key_type=str):\n def key_lookup(key_value):\n return key_type(key_value[0])\n return key_lookup\n\n def top_level_sort_order(key_value):\n # Sort top-level keys so that the histograms are last\n return key_value[0] in (\"AlleleCountHistogram\", \"AlleleCountScatterPlot\", \"AgeDistribution\"), key_value[0]\n\n for locus_id, locus_data in gnomad_json.items():\n for histogram_name, histogram_key_type in (\n (\"AlleleCountHistogram\", int),\n (\"AlleleCountScatterPlot\", str),\n (\"AgeDistribution\", int),\n ):\n # `histogram_key` here refers to the top level keys in the histogram\n # For example the \"20-25\" age range is a `histogram_key` within \"AgeDistribution\"\n # Each of the age ranges within \"AgeDistribution\" has a nested dict. \n # e.g., \"0\" is a key within \"20-25\": \n # gnomad_json[\"AFF2\"][\"AgeDistribution\"][\"20-25\"][\"0\"] = 10\n # This first `for` loop below sorts these nested dicts (e.g., sorts the dicts within the age range \"20-25\")\n for histogram_key in locus_data[histogram_name]:\n locus_data[histogram_name][histogram_key] = {\n key: value for key, value in sorted(\n locus_data[histogram_name][histogram_key].items(), key=sort_by_key(key_type=histogram_key_type))\n }\n # This sorts the `histogram_key` values\n # e.g, this sorts \"20-25\", \"25-30\", \"30-35\", etc. within \"AgeDistribution\" \n locus_data[histogram_name] = {\n key: value for key, value in sorted(\n locus_data[histogram_name].items(), key=sort_by_key())\n }\n\n # This sorts the top level keys, which contain the histogram names above (e.g., AgeDistribution)\n # and other keys, like \"Diseases\", \"GeneID\", \"GeneName\", etc.\n gnomad_json[locus_id] = {\n key: value for key, value in sorted(gnomad_json[locus_id].items(), key=top_level_sort_order)\n }\n\n\ndef remove_readviz_filenames_that_dont_exist(args, readviz_json):\n \"\"\"Remove ReadvizFilename entries that aren't listed in args.existing_readviz_filename_list.\n\n Args:\n args (argparse.Namespace): The argparse parsed arguments object.\n readviz_json (dict): The .json data structure that will be loaded into the gnomAD browser to generate the\n readviz section of the STR pages. It contains the encrypted readviz filename and associated\n metadata for each sample.\n \"\"\"\n readviz_filenames_df = pd.read_table(args.existing_readviz_filename_list, names=[\"filenames\"])\n readviz_filenames_list = readviz_filenames_df[\"filenames\"]\n readviz_filenames_set = set(readviz_filenames_list)\n\n if len(readviz_filenames_list) > len(readviz_filenames_set):\n raise ValueError(f\"{args.existing_readviz_filename_list} contains duplicate entries\")\n\n for locus_id in readviz_json:\n removed = total = 0\n for record in readviz_json[locus_id]:\n total += 1\n if record[\"ReadvizFilename\"] not in readviz_filenames_set:\n record[\"ReadvizFilename\"] = None\n removed += 1\n\n message = (f\"{locus_id:20s}: {removed} out of {total} ({100*removed/total:0.2f}%) readviz images removed \"\n f\"because they are missing from {args.existing_readviz_filename_list}\")\n\n if removed/total > MISSING_READVIZ_ERROR_THRESHOLD:\n raise ValueError(message)\n\n print(message)\n\n\ndef validate_json(df, gnomad_json, readviz_json, no_readviz_images=False):\n \"\"\"Perform basic checks to validate the gnomad_json and readviz_json data structure.\n\n Args:\n df (pandas.DataFrame): Combined DataFrame generated by load_data_df(..).\n gnomad_json (dict): The main .json structure being generated by this script.\n readviz_json (dict): The .json data structure that will be loaded into the gnomAD browser to generate the\n readviz section of the STR pages. It contains the encrypted readviz filename and associated\n metadata for each sample.\n no_readviz_images (bool): If True, this method will skip validation of the \"ReadvizFilename\" field. This is used\n for datasets where REViewer images were not generated.\n \"\"\"\n\n total_samples = len(set(df[\"SampleId\"]))\n if len(gnomad_json) != EXPECTED_N_KNOWN_PATHOGENIC_REPEATS:\n raise ValueError(f\"gnomad_json contains {len(gnomad_json)} pathogenic loci. \"\n f\"Expected {EXPECTED_N_KNOWN_PATHOGENIC_REPEATS} loci.\")\n\n gnomad_json_str_loci = set(gnomad_json)\n readviz_json_str_loci = set(readviz_json)\n if gnomad_json_str_loci != readviz_json_str_loci:\n raise ValueError(f\"gnomad_json locus ids are different from readviz_json locus ids:\\n\"\n f\"{gnomad_json_str_loci} \\n{readviz_json_str_loci}\")\n\n fraction_male_samples = sum(df[\"sex_imputation.sex_karyotype\"] == \"XY\")/len(df)\n for locus_id, data in gnomad_json.items():\n # Check that expected keys are present and have non-null values.\n for key in \"ReferenceRepeatUnit\", \"LocusId\", \"GeneName\", \"GeneId\", \"ReferenceRegion\", \\\n \"AlleleCountHistogram\", \"AlleleCountScatterPlot\", \"AgeDistribution\", \"Diseases\":\n if data[key] is None:\n raise ValueError(f\"{locus_id} {key} is None\")\n\n # Check that expected keys are present in the data[\"Diseases\"] dictionary and have non-null values.\n for key in \"Symbol\", \"Name\", \"Inheritance\", \"PathogenicMin\", \"OMIM\":\n for i, disease_data in enumerate(data[\"Diseases\"]):\n if disease_data[key] is None:\n raise ValueError(f\"{locus_id} disease #{i} {key} is None\")\n\n # Check that total counts in the histogram and scatter plot roughly match expectation, taking into account\n # hemizygous genotypes (which only contribute 1 count) and missing genotypes due to low coverage in some samples\n if \"X\" in data[\"ReferenceRegion\"]:\n expected_counts_in_histogram = total_samples * (2 - fraction_male_samples)\n else:\n expected_counts_in_histogram = total_samples * 2\n\n for key, expected_counts in [\n (\"AlleleCountHistogram\", expected_counts_in_histogram),\n (\"AlleleCountScatterPlot\", total_samples)\n ]:\n total_counts_in_plot = sum([sum(d.values()) for d in data[key].values()])\n if not ((1 - MISSING_GENOTYPES_ERROR_THRESHOLD) * expected_counts < total_counts_in_plot <= expected_counts):\n raise ValueError(f\"ERROR: {locus_id} total counts in {key} = {total_counts_in_plot} while expected counts = {expected_counts}\")\n\n total_readviz_samples = len(readviz_json[locus_id])\n if total_readviz_samples < (1 - MISSING_GENOTYPES_ERROR_THRESHOLD) * total_samples:\n raise ValueError(f\"{locus_id}: only {total_readviz_samples} readviz records. Expected {total_samples}\")\n if total_readviz_samples > total_samples:\n raise ValueError(f\"{locus_id}: found {total_readviz_samples} readviz records which is more than the total \"\n f\"number of samples ({total_samples})\")\n\n if not no_readviz_images:\n total_readviz_samples_with_image = sum(1 for r in readviz_json[locus_id] if r[\"ReadvizFilename\"] is not None)\n if total_readviz_samples_with_image < (1 - MISSING_READVIZ_ERROR_THRESHOLD) * total_readviz_samples:\n raise ValueError(f\"{locus_id}: found {total_readviz_samples_with_image} readviz images. Expected at \"\n f\"least {total_readviz_samples}.\")\n\n\ndef export_readviz_rename_list(readviz_paths_to_rename, readviz_rename_list_output_path):\n \"\"\"Utility function for writing out the readviz_paths_to_rename data structure.\n\n Args:\n readviz_paths_to_rename (list): list of 2-tuples that matches the original readviz svg filename with the\n corresponding encrypted filename that can be made public.\n readviz_rename_list_output_path (str): Local output path where to write the readviz_paths_to_rename table.\n \"\"\"\n\n if not readviz_rename_list_output_path.endswith(\".gz\"):\n raise ValueError(f\"{readviz_rename_list_output_path} needs to end in .gz\")\n\n print(f\"Writing {readviz_rename_list_output_path}\")\n with gzip.open(readviz_rename_list_output_path, \"wt\") as f:\n for a, b in readviz_paths_to_rename:\n f.write(f\"{a}\\t{b}\\n\")\n\n\ndef main():\n \"\"\"Generate 3 files: the main gnomAD STR json data file which will be loaded into the gnomAD browser to populate\n the STR pages, a readviz_rename_list table which maps REViewer image filenames to the corresponding encrypted\n filenames which can be made public without revealing sample ids, and a readviz_paths_json file which contains\n metadata on samples and readviz images.\n \"\"\"\n\n args = parse_args()\n\n # Generate the 3 data structures\n df = load_data_df(args)\n gnomad_json = init_gnomad_json(df)\n add_known_pathogenic_STR_annotations(args, gnomad_json)\n add_gene_ids(gnomad_json)\n most_common_motif_lookup = compute_most_common_motif_lookup_dict(df)\n add_motif_classification_field(gnomad_json, most_common_motif_lookup)\n readviz_paths_to_rename, readviz_json = add_histograms_and_compute_readviz_paths(df, gnomad_json, most_common_motif_lookup, no_readviz_images=args.no_readviz)\n if args.existing_readviz_filename_list:\n remove_readviz_filenames_that_dont_exist(args, readviz_json)\n sort_keys(gnomad_json)\n\n # Perform validity checks\n validate_json(df, gnomad_json, readviz_json, no_readviz_images=args.no_readviz)\n\n # Write out the data structures\n date_stamp = datetime.now().strftime(\"%Y_%m_%d\")\n local_output_dir = os.path.expanduser(os.path.dirname(args.expansion_hunter_tsv))\n output_filename_label = f\"__{args.output_filename_suffix}\" if args.output_filename_suffix else \"\"\n\n df.to_csv(f\"{local_output_dir}/gnomAD_STR_calls_with_gnomAD_metadata_and_sample_ids{output_filename_label}__{date_stamp}.tsv.gz\",\n compression=\"gzip\", sep=\"\\t\", index=False, header=True)\n\n readviz_metadata_df = pd.DataFrame([\n {**readviz_record, **{\"LocusId\": locus_id}}\n for locus_id, readviz_records in readviz_json.items() for readviz_record in readviz_records\n ])\n readviz_metadata_df.to_csv(f\"{local_output_dir}/gnomAD_STR_readviz_metadata{output_filename_label}__{date_stamp}.tsv.gz\",\n compression=\"gzip\", sep=\"\\t\", index=False, header=True)\n\n export_json(gnomad_json, f\"{local_output_dir}/gnomAD_STR_distributions{output_filename_label}__{date_stamp}.json.gz\", args.output_dir)\n export_json(readviz_json, f\"{local_output_dir}/gnomAD_STR_readviz_metadata{output_filename_label}__{date_stamp}.json.gz\", args.output_dir)\n export_readviz_rename_list(readviz_paths_to_rename, f\"{local_output_dir}/readviz_rename_list{output_filename_label}__{date_stamp}.tsv.gz\")\n\n print(\"Done\")\n\n\nif __name__ == \"__main__\":\n main()\n " ]
[ [ "pandas.isna", "pandas.read_table", "pandas.concat", "pandas.merge" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]