repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
KRR-Oxford/BERTMap | [
"26eb78288885b8749c5bd970d44fa0ec18c6f417"
] | [
"bertmap/map/bert_embeds_map.py"
] | [
"\"\"\"Mapping Generation on using Pretrained/Fine-tuned BERT with various pooling strategies and cosine-similarity.\n\"\"\"\n\nimport time\nfrom typing import List, Optional\n\nimport torch\nfrom bertmap.bert import BERTStatic\nfrom bertmap.map import OntoMapping\nfrom bertmap.onto import OntoBox\nfrom bertmap.utils import get_device\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\nclass BERTEmbedsMapping(OntoMapping):\n def __init__(\n self,\n src_ob: OntoBox,\n tgt_ob: OntoBox,\n candidate_limit: Optional[int] = 50,\n save_dir: str = \"\",\n batch_size: int = 32,\n max_length: int = 128,\n nbest: int = 1,\n bert_checkpoint: str = \"some checkpoint\",\n tokenizer_path: str = \"emilyalsentzer/Bio_ClinicalBERT\",\n string_match: bool = True,\n strategy: str = \"mean\",\n device_num: int = 0,\n ):\n\n super().__init__(src_ob, tgt_ob, candidate_limit, save_dir)\n # basic attributes\n self.batch_size = batch_size\n self.nbest = nbest\n self.string_match = string_match\n self.strategy = strategy\n assert self.strategy == \"mean\" or self.strategy == \"cls\"\n\n self.bert = BERTStatic(\n bert_checkpoint=bert_checkpoint,\n tokenizer_path=tokenizer_path,\n with_classifier=False,\n max_length=max_length,\n )\n self.device = get_device(device_num=device_num)\n self.bert.model.to(self.device)\n\n def alignment(self, flag=\"SRC\"):\n self.start_time = time.time()\n print_flag = (\n f\"{flag}: {self.src_ob.onto_text.iri_abbr}\"\n if flag == \"SRC\"\n else f\"{flag}: {self.tgt_ob.onto_text.iri_abbr}\"\n )\n from_ob, to_ob = self.from_to_config(flag=flag)\n i = 0\n for from_class in from_ob.onto.classes():\n from_class_iri = from_ob.onto_text.abbr_entity_iri(from_class.iri)\n from_labels = from_ob.onto_text.texts[from_class_iri][\"label\"]\n search_space = (\n to_ob.onto_text.text.keys()\n if not self.candidate_limit\n else to_ob.select_candidates(from_labels, self.candidate_limit)\n )\n from_class_idx = from_ob.onto_text.class2idx[from_class_iri]\n # assert from_class_idx == i\n i += 1 # to test the order preservation in OntoText dict\n if len(search_space) == 0:\n self.log_print(\n f\"[Time: {round(time.time() - self.start_time)}][{print_flag}][Class-idx: {from_class_idx}] No candidates available for for current entity ...\"\n )\n continue\n nbest_results = self.batch_alignment(\n from_class_iri, from_labels, search_space, flag=flag\n )\n for to_class_iri, mapping_score in nbest_results:\n if mapping_score <= 0.01:\n mapping_score = 0.0\n result = (from_class_iri, to_class_iri, mapping_score)\n self.log_print(\n f\"[Time: {round(time.time() - self.start_time)}][{print_flag}][Class-idx: {from_class_idx}][Mapping: {result}]\"\n )\n\n def batch_alignment(\n self, from_class_iri: str, from_labels: List[str], search_space: List[str], flag: str\n ):\n\n from_ob, to_ob = self.from_to_config(flag=flag)\n # here batch size refers to maximum number of to-labels in a batch\n to_label_size = max(self.batch_size // len(from_labels), self.nbest + 1)\n to_labels_iterator = to_ob.onto_text.labels_iterator(search_space, to_label_size)\n \n searched_class_num = 0\n batch_nbest_scores = torch.tensor([-1] * self.nbest).to(self.device)\n batch_nbest_idxs = torch.tensor([-1] * self.nbest).to(self.device)\n from_text_dict = from_ob.onto_text.texts[from_class_iri]\n from_embed = self.bert.ontotext_embeds(\n {from_class_iri: from_text_dict}, strategy=self.strategy\n )\n\n for to_batch in to_labels_iterator:\n batch_label_pairs = []\n batch_lens = []\n # prepare a batch of label pairs for a given from-onto class\n for to_class_iri, text_dict in to_batch.items():\n to_labels = text_dict[\"label\"]\n label_pairs = [\n [from_label, to_label] for to_label in to_labels for from_label in from_labels\n ]\n # return the map if the to-class has a label that is exactly the same as one of the labels of the from-class\n if self.string_match:\n for pair in label_pairs:\n if pair[0] == pair[1]:\n return [(to_class_iri, 1.0)]\n batch_label_pairs += label_pairs\n batch_lens.append(len(to_labels) * len(from_labels))\n\n # retrieve the batch embeds\n to_batch_embeds = self.bert.ontotext_embeds(to_batch, strategy=self.strategy)\n\n # compare the cosine similarity scores between two batches\n sim_scores = (\n torch.tensor(cosine_similarity(from_embed, to_batch_embeds))\n .to(self.device)\n .squeeze(0)\n )\n K = len(sim_scores) if len(sim_scores) < self.nbest else self.nbest\n nbest_scores, nbest_idxs = torch.topk(sim_scores, k=K)\n nbest_idxs += searched_class_num\n # we do the substitution for every batch to prevent from memory overflow\n batch_nbest_scores, temp_idxs = torch.topk(\n torch.cat([batch_nbest_scores, nbest_scores]), k=self.nbest\n )\n batch_nbest_idxs = torch.cat([batch_nbest_idxs, nbest_idxs])[temp_idxs]\n searched_class_num += len(to_batch)\n\n batch_nbest_class_iris = [search_space[idx] for idx in batch_nbest_idxs]\n return list(zip(batch_nbest_class_iris, batch_nbest_scores.cpu().detach().numpy()))\n"
] | [
[
"torch.topk",
"torch.cat",
"sklearn.metrics.pairwise.cosine_similarity",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ftrotter/aggStatModelsAndHumanJudgment_PUBL | [
"c35bb3b33136f3a7603f9885f4d25a69f916d6ab"
] | [
"data/predictionsStatsTable.py"
] | [
"#mcandrew\n\nimport sys\nimport numpy as np\nimport pandas as pd\n\nsys.path.append(\"../\")\nfrom mods.datahelp import grabData\n\ndef computeMedianAndIQR(d):\n\n stats = {\"quantile\":[], \"value\":[]}\n for quantile in [ 0.010, 0.025, 0.050, 0.100, 0.150, 0.200\n ,0.250, 0.300, 0.350, 0.400, 0.450, 0.500, 0.550, 0.600\n , 0.650 ,0.700 ,0.750 ,0.800 ,0.850 ,0.900 ,0.950\n ,0.975, 0.990 ]:\n d[\"distanceFromQ\"] = abs(d.cprobs - quantile)\n val = d.sort_values(\"distanceFromQ\").iloc[0][\"bin\"]\n\n stats[\"quantile\"].append(quantile)\n stats[\"value\"].append(val)\n\n # add in mode\n stats[\"quantile\"].append(\"mode\")\n stats[\"value\"].append( d.sort_values(\"dens\").iloc[-2][\"bin\"] )\n \n return pd.DataFrame(stats)\n \nif __name__ == \"__main__\":\n\n \"\"\" This code produces quantiles from predictive consensus distributions for each question asked that required a quantitiative output.\n \"\"\"\n\n n=0\n gd = grabData(\"../data\")\n predictions = gd.predictions()\n \n for (surveynum,qid),data in predictions.groupby([\"surveynum\",\"qid\"]):\n print(qid)\n\n stats = computeMedianAndIQR(data)\n stats['surveynum'] = surveynum\n stats['qid'] = qid\n\n if n==0:\n H=True\n M=\"w\"\n n=1\n else:\n H=False\n M=\"a\"\n stats.to_csv(\"./quantilesFromConsensusPredictions.csv\",mode=M,header=H,index=False)\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
haha94322/HMQAR | [
"b753348c2f076cbb16770f66f4503245cf507301"
] | [
"models/MQAR.py"
] | [
"import numpy as np\nimport itertools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.module import Module\nimport torch.nn.functional as F\n\nclass MQAR(Module):\n def __init__(self, module_dim, num_objects, max_subset_size):\n super(MQAR, self).__init__()\n\n self.module_dim = module_dim\n\n self.self_attn_en = nn.MultiheadAttention(module_dim, 8, dropout=0.1)\n self.self_attn_de = nn.MultiheadAttention(module_dim, 8, dropout=0.1)\n\n self.k_objects_fusion = nn.ModuleList()\n self.k_objects_fusion_de = nn.Linear(2 * module_dim, module_dim)\n\n\n # self.k_objects_fusion = nn.Linear(2 * module_dim, module_dim)\n for i in range(min(num_objects, max_subset_size + 1), 0, -1):\n self.k_objects_fusion.append(nn.Linear(2 * module_dim, module_dim))\n\n\n self.activation = nn.ELU()\n self.activations = F.relu\n self.max_subset_size = max_subset_size\n self.q_proj = nn.Linear(module_dim, module_dim, bias=False)\n\n self.dropout_out = nn.Dropout(0.1)\n self.dropout_out2 = nn.Dropout(0.1)\n\n def encoder(self, object_list, cond_feat):\n\n batch_size = object_list.size(0)\n MQAR_feats = []\n\n object_list_in = object_list.permute(1, 0, 2)\n\n q = k = object_list_in\n srcs = self.self_attn_en(q, k, value=object_list_in)[0]\n # srcs = object_list_in\n\n for scaleID, src in enumerate(srcs):\n g_feat = src\n h_feat = torch.cat((g_feat, cond_feat), dim=-1)\n h_feat = self.activation(self.k_objects_fusion[scaleID](h_feat)) #self.k_objects_fusion[scaleID]\n MQAR_feats.append(h_feat)\n\n return MQAR_feats\n\n def decoder(self, encoder_out, cond_feat):\n cond_feat = self.q_proj(cond_feat)\n batch_size = cond_feat.size(0)\n clip_level_MQAR_output = encoder_out\n clip_level_MQAR_outputs = clip_level_MQAR_output.view(batch_size, -1, self.module_dim).permute(1, 0, 2)\n # clip_level_MQAR_outputs = clip_level_MQAR_question\n q = cond_feat.unsqueeze(0)\n k = clip_level_MQAR_outputs\n clip_level_MQAR_outputs = self.self_attn_de(q, k, value=clip_level_MQAR_outputs)[0] # .squeeze(0)\n clip_level_MQAR_outputs = clip_level_MQAR_outputs[0, :, :]\n\n clip_level_MQAR_outputs = clip_level_MQAR_output.sum(1) + self.dropout_out2(clip_level_MQAR_outputs)\n\n h_feat = torch.cat((clip_level_MQAR_outputs, cond_feat), dim=-1)\n h_feat = self.activation(self.k_objects_fusion_de(h_feat))\n\n h_feat = clip_level_MQAR_output.sum(1) + h_feat\n return h_feat\n\n\n\n def forward(self, object_list, question, key):\n encoder_out = self.encoder(object_list, question)\n #\n encoder_out = torch.cat(\n [frame_relation.unsqueeze(1) for frame_relation in encoder_out],\n dim=1)\n # encoder_out = object_list\n decoder_out = self.decoder(encoder_out, key)\n # decoder_out = encoder_out.sum(1)\n\n return encoder_out, decoder_out\n\n\n\n"
] | [
[
"torch.nn.Dropout",
"torch.nn.MultiheadAttention",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.ELU",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Heinlein-vi/simulated-unsupervised-tensorflow | [
"eafa49f0a98641c99b66c8eafbac48314363ae0f"
] | [
"model.py"
] | [
"import tensorflow as tf\nfrom tensorflow.contrib.framework import arg_scope\n\nfrom layers import *\nfrom utils import show_all_variables\n\nclass Model(object):\n def __init__(self, config, data_loader):\n self.data_loader = data_loader\n\n self.task = config.task\n self.debug = config.debug\n self.config = config\n\n self.input_height = config.input_height\n self.input_width = config.input_width\n self.input_channel = config.input_channel\n\n self.reg_scale = config.reg_scale\n self.learning_rate = config.learning_rate\n self.max_grad_norm = config.max_grad_norm\n self.batch_size = config.batch_size\n\n self.layer_dict = {}\n\n self._build_placeholders()\n self._build_model()\n self._build_steps()\n self._build_optim()\n\n show_all_variables()\n\n def _build_placeholders(self):\n image_dims = [self.input_height, self.input_width, self.input_channel]\n\n min_after_dequeue = 5000\n capacity = min_after_dequeue + 3 * self.batch_size\n\n self.synthetic_batch_size = tf.placeholder(tf.int32, [], \"synthetic_batch_size\")\n self.synthetic_filenames, self.synthetic_images = \\\n image_from_paths(self.data_loader.synthetic_data_paths,\n self.data_loader.synthetic_data_dims, seed=self.config.random_seed)\n\n self.x_filename, self.x = tf.train.shuffle_batch(\n [self.synthetic_filenames, self.synthetic_images],\n batch_size=self.synthetic_batch_size,\n num_threads=4, capacity=capacity,\n min_after_dequeue=min_after_dequeue, name='synthetic_inputs')\n\n self.test_x_filename, self.test_x = tf.train.batch(\n [self.synthetic_filenames, self.synthetic_images],\n batch_size=self.synthetic_batch_size,\n num_threads=1, capacity=capacity,\n name='synthetic_test_inputs')\n\n if not self.config.is_train:\n self.x_filename, self.x = \\\n self.test_x_filename, self.test_x\n\n self.y = tf.placeholder(\n tf.uint8, [None, None, None, self.input_channel], name='real_inputs')\n self.R_x_history = tf.placeholder(\n tf.float32, [None, None, None, self.input_channel], 'R_x_history')\n\n resize_dim = [self.input_height, self.input_width]\n self.resized_x = tf.image.resize_images(self.x, resize_dim)\n self.resized_y = tf.image.resize_images(self.y, resize_dim)\n self.resized_test_x = tf.image.resize_images(self.test_x, resize_dim)\n\n self.normalized_x = normalize(self.resized_x)\n self.normalized_y = normalize(self.resized_y)\n\n self.refiner_step = tf.Variable(0, name='refiner_step', trainable=False)\n self.discrim_step = tf.Variable(0, name='discrim_step', trainable=False)\n\n def _build_optim(self):\n def minimize(loss, step, var_list):\n if self.config.optimizer == \"sgd\":\n optim = tf.train.GradientDescentOptimizer(self.learning_rate)\n elif self.config.optimizer == \"adam\":\n optim = tf.train.AdamOptimizer(self.learning_rate)\n else:\n raise Exception(\"[!] Unkown optimizer: {}\".format(self.config.optimizer))\n\n if self.max_grad_norm != None:\n grads_and_vars = optim.compute_gradients(loss)\n new_grads_and_vars = []\n for idx, (grad, var) in enumerate(grads_and_vars):\n if grad is not None and var in var_list:\n new_grads_and_vars.append((tf.clip_by_norm(grad, self.max_grad_norm), var))\n return optim.apply_gradients(new_grads_and_vars,\n global_step=step)\n else:\n return optim.minimize(loss, global_step=step, var_list=var_list)\n\n if self.task == \"generative\":\n self.refiner_optim = minimize(\n self.refiner_loss, self.refiner_step, self.refiner_vars)\n\n self.discrim_optim = minimize(\n self.discrim_loss, self.discrim_step, self.discrim_vars)\n\n self.discrim_optim_with_history = minimize(\n self.discrim_loss_with_history, self.discrim_step, self.discrim_vars)\n elif self.task == \"estimate\":\n raise Exception(\"[!] Not implemented yet\")\n\n def _build_model(self):\n with arg_scope([resnet_block, conv2d, max_pool2d, tanh],\n layer_dict=self.layer_dict):\n self.R_x = self._build_refiner(self.normalized_x)\n self.denormalized_R_x = denormalize(self.R_x)\n\n self.D_y, self.D_y_logits = \\\n self._build_discrim(self.normalized_y, name=\"D_y\")\n self.D_R_x, self.D_R_x_logits = \\\n self._build_discrim(self.R_x, name=\"D_R_x\", reuse=True)\n self.D_R_x_history, self.D_R_x_history_logits = \\\n self._build_discrim(self.R_x_history,\n name=\"D_R_x_history\", reuse=True)\n\n #self.estimate_outputs = self._build_estimation_network()\n self._build_loss()\n\n def _build_loss(self):\n # Refiner loss\n def fake_label(layer):\n return tf.zeros_like(layer, dtype=tf.int32)[:,:,:,0]\n\n def real_label(layer):\n return tf.ones_like(layer, dtype=tf.int32)[:,:,:,0]\n\n def log_loss(logits, label, name):\n return tf.reduce_sum(SE_loss(logits=logits, labels=label), [1, 2], name=name)\n\n with tf.name_scope(\"refiner\"):\n self.realism_loss = log_loss(\n self.D_R_x_logits, real_label(self.D_R_x_logits), \"realism_loss\")\n self.regularization_loss = \\\n self.reg_scale * tf.reduce_sum(\n tf.abs(self.R_x - self.normalized_x), [1, 2, 3],\n name=\"regularization_loss\")\n\n self.refiner_loss = tf.reduce_mean(\n self.realism_loss + self.regularization_loss,\n name=\"refiner_loss\")\n\n if self.debug:\n self.refiner_loss = tf.Print(\n self.refiner_loss, [self.R_x], \"R_x\")\n self.refiner_loss = tf.Print(\n self.refiner_loss, [self.D_R_x], \"D_R_x\")\n self.refiner_loss = tf.Print(\n self.refiner_loss, [self.normalized_x], \"normalized_x\")\n self.refiner_loss = tf.Print(\n self.refiner_loss, [self.denormalized_R_x], \"denormalized_R_x\")\n self.refiner_loss = tf.Print(\n self.refiner_loss, [self.regularization_loss], \"reg_loss\")\n\n self.refiner_summary = tf.summary.merge([\n #tf.summary.image(\"synthetic_images\",\n # self.x, max_outputs=self.config.max_image_summary),\n #tf.summary.image(\"refined_images\",\n # self.denormalized_R_x, max_outputs=self.config.max_image_summary),\n tf.summary.scalar(\"refiner/realism_loss\",\n tf.reduce_mean(self.realism_loss)),\n tf.summary.scalar(\"refiner/regularization_loss\",\n tf.reduce_mean(self.regularization_loss)),\n tf.summary.scalar(\"refiner/loss\",\n tf.reduce_mean(self.refiner_loss)),\n ])\n\n # Discriminator loss\n with tf.name_scope(\"discriminator\"):\n self.refiner_d_loss = log_loss(\n self.D_R_x_logits, fake_label(self.D_R_x_logits), \"refiner_d_loss\")\n self.synthetic_d_loss = log_loss(\n self.D_y_logits, real_label(self.D_y_logits), \"synthetic_d_loss\")\n\n self.discrim_loss = tf.reduce_mean(\n self.refiner_d_loss + \\\n self.synthetic_d_loss, name=\"discrim_loss\")\n\n # with history\n self.refiner_d_loss_with_history = log_loss(\n self.D_R_x_history_logits,\n fake_label(self.D_R_x_history_logits),\n \"refiner_d_loss_with_history\")\n self.discrim_loss_with_history = tf.reduce_mean(\n tf.concat([self.refiner_d_loss, self.refiner_d_loss_with_history], axis=0) + \\\n self.synthetic_d_loss, name=\"discrim_loss_with_history\")\n\n if self.debug:\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.D_R_x_logits], \"D_R_x_logits\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.D_y_logits], \"D_y_logits\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.refiner_d_loss], \"refiner_d_loss\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.refiner_d_loss_with_history], \"refiner_d_loss_with_history\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.synthetic_d_loss], \"synthetic_d_loss\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.D_R_x_history_logits], \"D_R_x_history_logits\")\n self.discrim_loss_with_history = tf.Print(\n self.discrim_loss_with_history, [self.D_y_logits], \"D_y_logits\")\n\n self.discrim_summary = tf.summary.merge([\n #tf.summary.image(\"real_images\",\n # self.resized_y, max_outputs=self.config.max_image_summary),\n tf.summary.scalar(\"synthetic_d_loss\",\n tf.reduce_mean(self.synthetic_d_loss)),\n tf.summary.scalar(\"refiner_d_loss\",\n tf.reduce_mean(self.refiner_d_loss)),\n tf.summary.scalar(\"discrim_loss\",\n tf.reduce_mean(self.discrim_loss)),\n ])\n self.discrim_summary_with_history = tf.summary.merge([\n #tf.summary.image(\"real_images\",\n # self.resized_y, max_outputs=self.config.max_image_summary),\n tf.summary.scalar(\"synthetic_d_loss\",\n tf.reduce_mean(self.synthetic_d_loss)),\n tf.summary.scalar(\"refiner_d_loss_with_history\",\n tf.reduce_mean(self.refiner_d_loss_with_history)),\n tf.summary.scalar(\"discrim_loss_with_history\",\n tf.reduce_mean(self.discrim_loss_with_history)),\n ])\n\n def _build_steps(self):\n def run(sess, feed_dict, fetch,\n summary_op, summary_writer, output_op=None):\n if summary_writer is not None:\n fetch['summary'] = summary_op\n if output_op is not None:\n fetch['output'] = output_op\n\n result = sess.run(fetch, feed_dict=feed_dict)\n if result.has_key('summary'):\n summary_writer.add_summary(result['summary'], result['step'])\n summary_writer.flush()\n return result\n\n def train_refiner(sess, feed_dict, summary_writer=None, with_output=False):\n fetch = {\n 'loss': self.refiner_loss,\n 'optim': self.refiner_optim,\n 'step': self.refiner_step,\n }\n return run(sess, feed_dict, fetch,\n self.refiner_summary, summary_writer,\n output_op=self.R_x if with_output else None)\n\n def test_refiner(sess, feed_dict, summary_writer=None, with_output=False):\n fetch = {\n 'filename': self.x_filename,\n 'loss': self.refiner_loss,\n 'step': self.refiner_step,\n }\n return run(sess, feed_dict, fetch,\n self.refiner_summary, summary_writer,\n output_op=self.R_x if with_output else None)\n\n def train_discrim(sess, feed_dict, summary_writer=None,\n with_history=False, with_output=False):\n fetch = {\n 'loss': self.discrim_loss_with_history,\n 'optim': self.discrim_optim_with_history,\n 'step': self.discrim_step,\n }\n return run(sess, feed_dict, fetch,\n self.discrim_summary_with_history if with_history \\\n else self.discrim_summary, summary_writer,\n output_op=self.D_R_x if with_output else None)\n\n def test_discrim(sess, feed_dict, summary_writer=None,\n with_history=False, with_output=False):\n fetch = {\n 'loss': self.discrim_loss,\n 'step': self.discrim_step,\n }\n return run(sess, feed_dict, fetch,\n self.discrim_summary_with_history if with_history \\\n else self.discrim_summary, summary_writer,\n output_op=self.D_R_x if with_output else None)\n\n self.train_refiner = train_refiner\n self.test_refiner = test_refiner\n self.train_discrim = train_discrim\n self.test_discrim = test_discrim\n\n def _build_refiner(self, layer):\n with tf.variable_scope(\"refiner\") as sc:\n layer = conv2d(layer, 64, 3, 1, scope=\"conv_1\")\n layer = repeat(layer, 4, resnet_block, scope=\"resnet\")\n layer = conv2d(layer, 1, 1, 1, \n activation_fn=None, scope=\"conv_2\")\n output = tanh(layer, name=\"tanh\")\n self.refiner_vars = tf.contrib.framework.get_variables(sc)\n return output \n\n def _build_discrim(self, layer, name, reuse=False):\n with tf.variable_scope(\"discriminator\", reuse=reuse) as sc:\n layer = conv2d(layer, 96, 3, 2, scope=\"conv_1\", name=name)\n layer = conv2d(layer, 64, 3, 2, scope=\"conv_2\", name=name)\n layer = max_pool2d(layer, 3, 1, scope=\"max_1\", name=name)\n layer = conv2d(layer, 32, 3, 1, scope=\"conv_3\", name=name)\n layer = conv2d(layer, 32, 1, 1, scope=\"conv_4\", name=name)\n logits = conv2d(layer, 2, 1, 1, scope=\"conv_5\", name=name)\n output = tf.nn.softmax(logits, name=\"softmax\")\n self.discrim_vars = tf.contrib.framework.get_variables(sc)\n return output, logits\n\n def _build_estimation_network(self):\n layer = self.normalized_x\n with tf.variable_scope(\"estimation\"):\n layer = conv2d(layer, 96, 3, 2, scope=\"conv_1\")\n layer = conv2d(layer, 64, 3, 2, scope=\"conv_2\")\n layer = max_pool2d(layer, 64, 3, scope=\"max_1\")\n layer = conv2d(layer, 32, 3, 1, scope=\"conv_3\")\n layer = conv2d(layer, 32, 1, 1, scope=\"conv_4\")\n layer = conv2d(layer, 2, 1, 1, activation_fn=slim.softmax)\n return layer\n"
] | [
[
"tensorflow.concat",
"tensorflow.train.AdamOptimizer",
"tensorflow.train.batch",
"tensorflow.Variable",
"tensorflow.clip_by_norm",
"tensorflow.name_scope",
"tensorflow.train.shuffle_batch",
"tensorflow.Print",
"tensorflow.image.resize_images",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.softmax",
"tensorflow.contrib.framework.get_variables",
"tensorflow.reduce_mean",
"tensorflow.contrib.framework.arg_scope",
"tensorflow.ones_like",
"tensorflow.variable_scope",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
wuyou33/broad | [
"01a4a653614c03850908372d064055a0ef45e35b"
] | [
"example_code/broad_utils.py"
] | [
"# SPDX-FileCopyrightText: 2021 Daniel Laidig <[email protected]>\n#\n# SPDX-License-Identifier: MIT OR CC-BY-4.0\n\nimport numpy as np\nimport scipy.io as spio\n\n\ndef calculateErrorQuatEarth(imu_quat, opt_quat):\n \"\"\"\n Calculates quaternion that represents the orientation estimation error in the global coordinate system.\n\n :param imu_quat: IMU orientation, shape (N, 4)\n :param opt_quat: OMC orientation, shape (N, 4)\n :return: error quaternion, shape (N, 4)\n \"\"\"\n # normalize the input quaternions just in case\n imu_quat = imu_quat / np.linalg.norm(imu_quat, axis=1)[:, None]\n opt_quat = opt_quat / np.linalg.norm(opt_quat, axis=1)[:, None]\n # calculate the relative orientation expressed in the global coordinate system\n # imu_quat * (inv(opt_quat) * imu_quat) * inv(imu_quat) = imu_quat * inv(opt_quat)\n out = quatmult(imu_quat, invquat(opt_quat))\n # normalize the output quaternion\n out = out / np.linalg.norm(out, axis=1)[:, None]\n return out\n\n\ndef calculateTotalError(q_diff):\n \"\"\"\n Calculates the total error, i.e. the total absolute rotation angle of the quaternion.\n\n :param q_diff: error quaternion, shape (N, 4)\n :return: error in rad, shape (N,)\n \"\"\"\n return 2 * np.arccos(np.clip(np.abs(q_diff[:, 0]), 0, 1))\n\n\ndef calculateHeadingError(q_diff_earth):\n \"\"\"\n Calculates the heading error.\n\n :param q_diff_earth: error quaternion in global coordinates (c.f. calculateErrorQuatEarth), shape (N, 4)\n :return: error in rad, shape (N,)\n \"\"\"\n return 2 * np.arctan(np.abs(q_diff_earth[:, 3] / q_diff_earth[:, 0]))\n\n\ndef calculateInclinationError(q_diff_earth):\n \"\"\"\n Calculates the inclination error.\n\n :param q_diff_earth: error quaternion in global coordinates (c.f. calculateErrorQuatEarth), shape (N, 4)\n :return: error in rad, shape (N,)\n \"\"\"\n return 2 * np.arccos(np.clip(np.sqrt(q_diff_earth[:, 0] ** 2 + q_diff_earth[:, 3] ** 2), 0, 1))\n\n\ndef calculateRMSE(imu_quat, opt_quat, movement):\n \"\"\"\n Calculates total/heading/inclination errors in degrees (only considering movement phases).\n\n :param imu_quat: IMU orientation, shape (N, 4)\n :param opt_quat: OMC orientation, shape (N, 4)\n :param movement: boolean indexing array that denotes motion phases, shape (N,)\n :return: dict containing total, heading and inclination errors in degrees\n \"\"\"\n assert movement.dtype == bool\n\n q_diff_earth = calculateErrorQuatEarth(imu_quat, opt_quat)\n\n totalError = calculateTotalError(q_diff_earth)[movement]\n headingError = calculateHeadingError(q_diff_earth)[movement]\n inclError = calculateInclinationError(q_diff_earth)[movement]\n\n return dict(\n total_rmse_deg=np.rad2deg(rmse(totalError)),\n heading_rmse_deg=np.rad2deg(rmse(headingError)),\n inclination_rmse_deg=np.rad2deg(rmse(inclError))\n )\n\n\ndef quatmult(q1, q2):\n \"\"\"\n Quaternion multiplication.\n\n If two Nx4 arrays are given, they are multiplied row-wise. Alternative one of the inputs can be a single\n quaternion which is then multiplied to all rows of the other input array.\n \"\"\"\n\n q1 = np.asarray(q1, float)\n q2 = np.asarray(q2, float)\n\n # if both input quaternions are 1D arrays, we also want to return a 1D output\n is1D = max(len(q1.shape), len(q2.shape)) < 2\n\n # but to be able to use the same indexing in all cases, make sure everything is in 2D arrays\n if q1.shape == (4,):\n q1 = q1.reshape((1, 4))\n if q2.shape == (4,):\n q2 = q2.reshape((1, 4))\n\n # check the dimensions\n N = max(q1.shape[0], q2.shape[0])\n assert q1.shape == (N, 4) or q1.shape == (1, 4)\n assert q2.shape == (N, 4) or q2.shape == (1, 4)\n\n # actual quaternion multiplication\n q3 = np.zeros((N, 4), np.float)\n q3[:, 0] = q1[:, 0] * q2[:, 0] - q1[:, 1] * q2[:, 1] - q1[:, 2] * q2[:, 2] - q1[:, 3] * q2[:, 3]\n q3[:, 1] = q1[:, 0] * q2[:, 1] + q1[:, 1] * q2[:, 0] + q1[:, 2] * q2[:, 3] - q1[:, 3] * q2[:, 2]\n q3[:, 2] = q1[:, 0] * q2[:, 2] - q1[:, 1] * q2[:, 3] + q1[:, 2] * q2[:, 0] + q1[:, 3] * q2[:, 1]\n q3[:, 3] = q1[:, 0] * q2[:, 3] + q1[:, 1] * q2[:, 2] - q1[:, 2] * q2[:, 1] + q1[:, 3] * q2[:, 0]\n\n if is1D:\n q3 = q3.reshape((4,))\n\n return q3\n\n\ndef invquat(q):\n \"\"\"Calculates the inverse of unit quaternions.\"\"\"\n\n q = np.asarray(q, np.float)\n if len(q.shape) != 2:\n assert q.shape == (4,)\n qConj = q.copy()\n qConj[1:] *= -1\n return qConj\n else:\n assert q.shape[1] == 4\n qConj = q.copy()\n qConj[:, 1:] *= -1\n return qConj\n\n\ndef quatFromRotMat(R):\n \"\"\"Gets a quaternion from a rotation matrix.\"\"\"\n assert R.shape == (3, 3)\n\n w_sq = (1 + R[0, 0] + R[1, 1] + R[2, 2]) / 4\n x_sq = (1 + R[0, 0] - R[1, 1] - R[2, 2]) / 4\n y_sq = (1 - R[0, 0] + R[1, 1] - R[2, 2]) / 4\n z_sq = (1 - R[0, 0] - R[1, 1] + R[2, 2]) / 4\n\n q = np.zeros((4,), np.float)\n q[0] = np.sqrt(w_sq)\n q[1] = np.copysign(np.sqrt(x_sq), R[2, 1] - R[1, 2])\n q[2] = np.copysign(np.sqrt(y_sq), R[0, 2] - R[2, 0])\n q[3] = np.copysign(np.sqrt(z_sq), R[1, 0] - R[0, 1])\n return q\n\n\ndef quatFromAccMag(acc, mag):\n \"\"\"Calculates an initial orientation from a accelerometer and magnetomter sample.\"\"\"\n assert acc.shape == (3,)\n assert mag.shape == (3,)\n z = acc\n x = np.cross(np.cross(z, -mag), z)\n y = np.cross(z, x)\n R = np.column_stack([x/np.linalg.norm(x), y/np.linalg.norm(y), z/np.linalg.norm(z)])\n return quatFromRotMat(R)\n\n\ndef rmse(diff):\n \"\"\"Calculates the RMS of the input signal.\"\"\"\n return np.sqrt(np.nanmean(diff**2))\n\n\ndef loadResults(filename):\n \"\"\"Loads result files created by process_data.py\"\"\"\n results = spio.loadmat(filename, squeeze_me=True)\n # recover params dictionary\n paramNames = results['param_names']\n if not isinstance(paramNames, np.ndarray):\n paramNames = [paramNames] # undo squeeze if there is only one parameter\n results['params'] = {p: results['param_values'][p].item() for p in paramNames}\n return results\n\n\ndef getAveragedRmseValues(trialInfo, results):\n \"\"\"\n Determines averaged RMSE values for each trial and for each group of trials.\n\n :param trialInfo: trial and group information (loaded from trials.json)\n :param results: result data structure (see loadResults)\n :return: nested dict with averaged rmse results\n \"\"\"\n\n metrics = ('total_rmse_deg', 'heading_rmse_deg', 'inclination_rmse_deg')\n\n # determine parameter values that minimize the average total RMSE over all trials (i.e. the TAGP)\n tagpParams, tagpParamInd = getTagpParams(results)\n\n # determine RMSE values by group (with TAGP parameters and the minimum error in the search grid)\n trials = {}\n for trialName in trialInfo['trials']:\n trials[trialName] = dict(\n tagp_parameters={metric: results[metric][f'trial_{trialName}'].item()[tagpParamInd] for metric in metrics},\n minimum_value={metric: np.min(results[metric][f'trial_{trialName}'].item()) for metric in metrics},\n )\n\n # combine errors for all groups\n groups = {}\n for groupInfo in trialInfo['groups']:\n groupName = groupInfo['name']\n groupTrials = [trialName for trialName, info in trialInfo['trials'].items() if groupName in info['groups']]\n groups[groupName] = dict(tagp_parameters={}, minimum_value={})\n for params in groups[groupName].keys():\n for metric in metrics:\n values = [trials[n][params][metric] for n in groupTrials]\n groups[groupName][params][metric] = np.mean(values)\n\n return dict(\n tagp_parameters=tagpParams,\n tagp_parameter_ind=tagpParamInd,\n groups=groups,\n trials=trials,\n )\n\n\ndef getTagpParams(results):\n \"\"\"\n Determines the parameter setting that is associated with the TAGP (i.e. the lowest average error achievable when\n using a common parameter set for all trials).\n\n :param results: result data structure (see loadResults)\n :return: tagpParams (dict containing the parameter values associated with the TAGP), tagpInd (indexing tuple into\n the cost array)\n \"\"\"\n # the TAGP is defined by the mean over all 39 trials\n trialNames = [n.lstrip('trial_') for n in sorted(results['total_rmse_deg'].dtype.names)]\n assert len(trialNames) == 39\n\n cost = getMeanError(results, trialNames, 'total_rmse_deg')\n tagpParamInd = np.unravel_index(np.argmin(cost), cost.shape)\n tagpParamInd = tuple([ind.item() for ind in tagpParamInd]) # convert from np.int64 to regular int\n tagpParams = {paramName: values[ind] for (paramName, values), ind in zip(results['params'].items(), tagpParamInd)}\n return tagpParams, tagpParamInd\n\n\ndef getMeanError(results, trialNames, errorMetric):\n \"\"\"\n Determines the averaged error matrix for the given metric and the given list of trials.\n\n :param results: result data structure (see loadResults)\n :param trialNames: list of trial names\n :param errorMetric: error metric to consider\n :return: averaged error matrix\n \"\"\"\n trialNames = list(trialNames)\n assert len(trialNames) == len(set(trialNames))\n vals = [results[errorMetric][f'trial_{name}'].item() for name in trialNames]\n return np.mean(vals, axis=0)\n"
] | [
[
"numpy.sqrt",
"numpy.abs",
"numpy.asarray",
"scipy.io.loadmat",
"numpy.linalg.norm",
"numpy.mean",
"numpy.nanmean",
"numpy.argmin",
"numpy.cross",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
wangtz/models | [
"fcca91e05ae49b9e1bd2f325cfca3ba261e0b41e"
] | [
"official/mnist/mnist_tpu.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MNIST model training using TPUs.\n\nThis program demonstrates training of the convolutional neural network model\ndefined in mnist.py on Google Cloud TPUs (https://cloud.google.com/tpu/).\n\nIf you are not interested in TPUs, you should ignore this file.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nimport tensorflow as tf # pylint: disable=g-bad-import-order\n\n# For open source environment, add grandparent directory for import\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.path[0]))))\n\nfrom official.mnist import dataset # pylint: disable=wrong-import-position\nfrom official.mnist import mnist # pylint: disable=wrong-import-position\n\n# Cloud TPU Cluster Resolver flags\ntf.flags.DEFINE_string(\n \"tpu\", default=None,\n help=\"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\ntf.flags.DEFINE_string(\n \"tpu_zone\", default=None,\n help=\"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\ntf.flags.DEFINE_string(\n \"gcp_project\", default=None,\n help=\"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\n# Model specific parameters\ntf.flags.DEFINE_string(\"data_dir\", \"\",\n \"Path to directory containing the MNIST dataset\")\ntf.flags.DEFINE_string(\"model_dir\", None, \"Estimator model_dir\")\ntf.flags.DEFINE_integer(\"batch_size\", 1024,\n \"Mini-batch size for the training. Note that this \"\n \"is the global batch size and not the per-shard batch.\")\ntf.flags.DEFINE_integer(\"train_steps\", 1000, \"Total number of training steps.\")\ntf.flags.DEFINE_integer(\"eval_steps\", 0,\n \"Total number of evaluation steps. If `0`, evaluation \"\n \"after training is skipped.\")\ntf.flags.DEFINE_float(\"learning_rate\", 0.05, \"Learning rate.\")\n\ntf.flags.DEFINE_bool(\"use_tpu\", True, \"Use TPUs rather than plain CPUs\")\ntf.flags.DEFINE_bool(\"enable_predict\", True, \"Do some predictions at the end\")\ntf.flags.DEFINE_integer(\"iterations\", 50,\n \"Number of iterations per TPU training loop.\")\ntf.flags.DEFINE_integer(\"num_shards\", 8, \"Number of shards (TPU chips).\")\n\nFLAGS = tf.flags.FLAGS\n\n\ndef metric_fn(labels, logits):\n accuracy = tf.metrics.accuracy(\n labels=labels, predictions=tf.argmax(logits, axis=1))\n return {\"accuracy\": accuracy}\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"model_fn constructs the ML model used to predict handwritten digits.\"\"\"\n\n del params\n image = features\n if isinstance(image, dict):\n image = features[\"image\"]\n\n model = mnist.create_model(\"channels_last\")\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n logits = model(image, training=False)\n predictions = {\n 'class_ids': tf.argmax(logits, axis=1),\n 'probabilities': tf.nn.softmax(logits),\n }\n return tf.contrib.tpu.TPUEstimatorSpec(mode, predictions=predictions)\n\n logits = model(image, training=(mode == tf.estimator.ModeKeys.TRAIN))\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n learning_rate = tf.train.exponential_decay(\n FLAGS.learning_rate,\n tf.train.get_global_step(),\n decay_steps=100000,\n decay_rate=0.96)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n if FLAGS.use_tpu:\n optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=optimizer.minimize(loss, tf.train.get_global_step()))\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits]))\n\n\ndef train_input_fn(params):\n \"\"\"train_input_fn defines the input pipeline used for training.\"\"\"\n batch_size = params[\"batch_size\"]\n data_dir = params[\"data_dir\"]\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # `tf.contrib.tpu.RunConfig` for details.\n ds = dataset.train(data_dir).cache().repeat().shuffle(\n buffer_size=50000).batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef eval_input_fn(params):\n batch_size = params[\"batch_size\"]\n data_dir = params[\"data_dir\"]\n ds = dataset.test(data_dir).batch(batch_size, drop_remainder=True)\n return ds\n\n\ndef predict_input_fn(params):\n batch_size = params[\"batch_size\"]\n data_dir = params[\"data_dir\"]\n # Take out top 10 samples from test data to make the predictions.\n ds = dataset.test(data_dir).take(10).batch(batch_size)\n return ds\n\n\ndef main(argv):\n del argv # Unused.\n tf.logging.set_verbosity(tf.logging.INFO)\n\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu,\n zone=FLAGS.tpu_zone,\n project=FLAGS.gcp_project\n )\n\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n model_dir=FLAGS.model_dir,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tf.contrib.tpu.TPUConfig(FLAGS.iterations, FLAGS.num_shards),\n )\n\n estimator = tf.contrib.tpu.TPUEstimator(\n model_fn=model_fn,\n use_tpu=FLAGS.use_tpu,\n train_batch_size=FLAGS.batch_size,\n eval_batch_size=FLAGS.batch_size,\n predict_batch_size=FLAGS.batch_size,\n params={\"data_dir\": FLAGS.data_dir},\n config=run_config)\n # TPUEstimator.train *requires* a max_steps argument.\n estimator.train(input_fn=train_input_fn, max_steps=FLAGS.train_steps)\n # TPUEstimator.evaluate *requires* a steps argument.\n # Note that the number of examples used during evaluation is\n # --eval_steps * --batch_size.\n # So if you change --batch_size then change --eval_steps too.\n if FLAGS.eval_steps:\n estimator.evaluate(input_fn=eval_input_fn, steps=FLAGS.eval_steps)\n\n # Run prediction on top few samples of test data.\n if FLAGS.enable_predict:\n predictions = estimator.predict(input_fn=predict_input_fn)\n\n for pred_dict in predictions:\n template = ('Prediction is \"{}\" ({:.1f}%).')\n\n class_id = pred_dict['class_ids']\n probability = pred_dict['probabilities'][class_id]\n\n print(template.format(class_id, 100 * probability))\n\n\nif __name__ == \"__main__\":\n absl_app.run()\n"
] | [
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.nn.softmax",
"tensorflow.losses.sparse_softmax_cross_entropy",
"tensorflow.flags.DEFINE_string",
"tensorflow.train.get_global_step",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.ConfigProto",
"tensorflow.contrib.tpu.CrossShardOptimizer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.logging.set_verbosity",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.flags.DEFINE_float",
"tensorflow.argmax",
"tensorflow.flags.DEFINE_bool",
"tensorflow.flags.DEFINE_integer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maruidea/Super-SloMo | [
"6079be7cc6c0004f215777da3f490cbb117b0569"
] | [
"video_to_slomo.py"
] | [
"#!/usr/bin/env python3\nimport argparse\nimport os\nimport os.path\nimport ctypes\nfrom shutil import rmtree, move\nfrom PIL import Image\nimport torch\nimport torchvision.transforms as transforms\nimport model\nimport dataloader\nimport platform\nfrom tqdm import tqdm\n\n# For parsing commandline arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--ffmpeg_dir\", type=str, default=\"\", help='path to ffmpeg.exe')\nparser.add_argument(\"--video\", type=str, required=True, help='path of video to be converted')\nparser.add_argument(\"--checkpoint\", type=str, required=True, help='path of checkpoint for pretrained model')\nparser.add_argument(\"--fps\", type=float, default=30, help='specify fps of output video. Default: 30.')\nparser.add_argument(\"--sf\", type=int, required=True, help='specify the slomo factor N. This will increase the frames by Nx. Example sf=2 ==> 2x frames')\nparser.add_argument(\"--batch_size\", type=int, default=1, help='Specify batch size for faster conversion. This will depend on your cpu/gpu memory. Default: 1')\nparser.add_argument(\"--output\", type=str, default=\"output.mkv\", help='Specify output file name. Default: output.mp4')\nparser.add_argument(\"--compress\", action='store_true')\nargs = parser.parse_args()\n\ndef check():\n \"\"\"\n Checks the validity of commandline arguments.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n error : string\n Error message if error occurs otherwise blank string.\n \"\"\"\n\n\n error = \"\"\n if (args.sf < 2):\n error = \"Error: --sf/slomo factor has to be atleast 2\"\n if (args.batch_size < 1):\n error = \"Error: --batch_size has to be atleast 1\"\n if (args.fps < 1):\n error = \"Error: --fps has to be atleast 1\"\n if \".mkv\" not in args.output:\n error = \"output needs to have mkv container\"\n return error\n\ndef extract_frames(video, outDir):\n \"\"\"\n Converts the `video` to images.\n\n Parameters\n ----------\n video : string\n full path to the video file.\n outDir : string\n path to directory to output the extracted images.\n\n Returns\n -------\n error : string\n Error message if error occurs otherwise blank string.\n \"\"\"\n\n\n error = \"\"\n print('{} -i {} -vsync 0 {}/%06d.png'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), video, outDir))\n retn = os.system('{} -i \"{}\" -vsync 0 {}/%06d.png'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), video, outDir))\n if retn:\n error = \"Error converting file:{}. Exiting.\".format(video)\n return error\n\ndef create_video(dir):\n error = \"\"\n if args.compress:\n print('{} -r {} -i {}/%d.png -pix_fmt yuv420p {}'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), args.fps, dir, args.output))\n retn = os.system('{} -r {} -i {}/%d.png -pix_fmt yuv420p \"{}\"'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), args.fps, dir, args.output))\n else:\n print('{} -r {} -i {}/%d.png -vcodec ffvhuff {}'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), args.fps, dir, args.output))\n retn = os.system('{} -r {} -i {}/%d.png -vcodec ffvhuff \"{}\"'.format(os.path.join(args.ffmpeg_dir, \"ffmpeg\"), args.fps, dir, args.output))\n\n if retn:\n error = \"Error creating output video. Exiting.\"\n return error\n\n\ndef main():\n # Check if arguments are okay\n error = check()\n if error:\n print(error)\n exit(1)\n\n # Create extraction folder and extract frames\n IS_WINDOWS = 'Windows' == platform.system()\n extractionDir = \"tmpSuperSloMo\"\n if not IS_WINDOWS:\n # Assuming UNIX-like system where \".\" indicates hidden directories\n extractionDir = \".\" + extractionDir\n if os.path.isdir(extractionDir):\n rmtree(extractionDir)\n os.mkdir(extractionDir)\n if IS_WINDOWS:\n FILE_ATTRIBUTE_HIDDEN = 0x02\n # ctypes.windll only exists on Windows\n ctypes.windll.kernel32.SetFileAttributesW(extractionDir, FILE_ATTRIBUTE_HIDDEN)\n\n extractionPath = os.path.join(extractionDir, \"input\")\n outputPath = os.path.join(extractionDir, \"output\")\n os.mkdir(extractionPath)\n os.mkdir(outputPath)\n error = extract_frames(args.video, extractionPath)\n if error:\n print(error)\n exit(1)\n\n # Initialize transforms\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n mean = [0.429, 0.431, 0.397]\n std = [1, 1, 1]\n normalize = transforms.Normalize(mean=mean,\n std=std)\n\n negmean = [x * -1 for x in mean]\n revNormalize = transforms.Normalize(mean=negmean, std=std)\n\n # Temporary fix for issue #7 https://github.com/avinashpaliwal/Super-SloMo/issues/7 -\n # - Removed per channel mean subtraction for CPU.\n if (device == \"cpu\"):\n transform = transforms.Compose([transforms.ToTensor()])\n TP = transforms.Compose([transforms.ToPILImage()])\n else:\n transform = transforms.Compose([transforms.ToTensor(), normalize])\n TP = transforms.Compose([revNormalize, transforms.ToPILImage()])\n\n # Load data\n videoFrames = dataloader.Video(root=extractionPath, transform=transform)\n videoFramesloader = torch.utils.data.DataLoader(videoFrames, batch_size=args.batch_size, shuffle=False)\n\n # Initialize model\n flowComp = model.UNet(6, 4)\n flowComp.to(device)\n for param in flowComp.parameters():\n param.requires_grad = False\n ArbTimeFlowIntrp = model.UNet(20, 5)\n ArbTimeFlowIntrp.to(device)\n for param in ArbTimeFlowIntrp.parameters():\n param.requires_grad = False\n\n flowBackWarp = model.backWarp(videoFrames.dim[0], videoFrames.dim[1], device)\n flowBackWarp = flowBackWarp.to(device)\n\n dict1 = torch.load(args.checkpoint, map_location='cpu')\n ArbTimeFlowIntrp.load_state_dict(dict1['state_dictAT'])\n flowComp.load_state_dict(dict1['state_dictFC'])\n\n # Interpolate frames\n frameCounter = 1\n\n with torch.no_grad():\n for _, (frame0, frame1) in enumerate(tqdm(videoFramesloader), 0):\n\n I0 = frame0.to(device)\n I1 = frame1.to(device)\n\n flowOut = flowComp(torch.cat((I0, I1), dim=1))\n F_0_1 = flowOut[:,:2,:,:]\n F_1_0 = flowOut[:,2:,:,:]\n\n # Save reference frames in output folder\n for batchIndex in range(args.batch_size):\n (TP(frame0[batchIndex].detach())).resize(videoFrames.origDim, Image.BILINEAR).save(os.path.join(outputPath, str(frameCounter + args.sf * batchIndex) + \".png\"))\n frameCounter += 1\n\n # Generate intermediate frames\n for intermediateIndex in range(1, args.sf):\n t = float(intermediateIndex) / args.sf\n temp = -t * (1 - t)\n fCoeff = [temp, t * t, (1 - t) * (1 - t), temp]\n\n F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0\n F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0\n\n g_I0_F_t_0 = flowBackWarp(I0, F_t_0)\n g_I1_F_t_1 = flowBackWarp(I1, F_t_1)\n\n intrpOut = ArbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))\n\n F_t_0_f = intrpOut[:, :2, :, :] + F_t_0\n F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1\n V_t_0 = torch.sigmoid(intrpOut[:, 4:5, :, :])\n V_t_1 = 1 - V_t_0\n\n g_I0_F_t_0_f = flowBackWarp(I0, F_t_0_f)\n g_I1_F_t_1_f = flowBackWarp(I1, F_t_1_f)\n\n wCoeff = [1 - t, t]\n\n Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)\n\n # Save intermediate frame\n for batchIndex in range(args.batch_size):\n (TP(Ft_p[batchIndex].cpu().detach())).resize(videoFrames.origDim, Image.BILINEAR).save(os.path.join(outputPath, str(frameCounter + args.sf * batchIndex) + \".png\"))\n frameCounter += 1\n\n # Set counter accounting for batching of frames\n frameCounter += args.sf * (args.batch_size - 1)\n\n # Generate video from interpolated frames\n create_video(outputPath)\n\n # Remove temporary files\n rmtree(extractionDir)\n\n exit(0)\n\nmain()\n"
] | [
[
"torch.sigmoid",
"torch.cat",
"torch.load",
"torch.utils.data.DataLoader",
"torch.no_grad",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ricamo/FastDensityPeak | [
"e629efb64590e4816564ac48f664a4fa2e900573"
] | [
"datasets/datasets.py"
] | [
"\"\"\"\nThese functions servs to load properly the datasets included in the project.\nThey return the loaded dataframe (with some basic preprocessing, if needed),\nthe name of the class column and the ideal number of bins for discretizating the\nnumerical attributes (if any, else None).\n\"\"\"\nimport os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\nDATA_FOLDER = 'datasets/real'\ncat_types = ['object','str','category']\n\ndef preprocess(df, cl, min_bins=10):\n \"\"\"\n Function that preprocess a pandas dataframe so that the resulting dataset \n is made only by numerical data that correspond to discrete categories.\n Args:\n data: the data to preprocess\n cl: the column name of the class\n Returns:\n X: numpy preprocessed data\n y: numpy preprocessed target\n encoders: dicitonary with the label_encoders associated to the encoded attributes\n \"\"\" \n columns = df.columns\n columns_no_class = np.delete(columns, np.where(columns==cl)) \n encoders = {}\n for c in columns: \n if df[c].dtype.name in cat_types: # Check if is not a numerical column\n le = LabelEncoder() # Encode the values\n df[c] = le.fit_transform(df[c])\n if c == cl:\n encoders['cl'] = le\n else:\n encoders[c] = le \n X = df[columns_no_class].values\n y = df[cl].values\n return X, y, columns_no_class, encoders\n\ndef load_adult_stretch():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'adult-stretch.csv'))\n return df, 'class', None\n\ndef load_crx():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'crx.csv'), header=None)\n # Drop the rows that contain missing values\n df = df.replace('?',np.NaN)\n df = df.dropna(axis=0)\n # Convert object columns to float\n df[1] = df[1].astype(float)\n df[13] = df[13].astype(float)\n return df, 15, 6\n\ndef load_hillvalley():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'hill_valley.csv'))\n return df, 'class', 6\n\ndef load_banknote():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'banknote.csv'), header=None)\n return df, 4, 6\n\ndef load_pendigits():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'pendigits.csv'))\n return df, 'class', 5\n\ndef load_nursery():\n df = pd.read_csv(os.path.join(DATA_FOLDER, 'nursery.csv'))\n return df, 'class', None\n\ndef load_custom():\n \"\"\"\n Function to load a custom dataset by the user. The attribute name\n of the class is asked. \n \"\"\"\n again =True\n while(again):\n path = input(\"Path to the csv file:\\n\")\n if os.path.isfile(path):\n try:\n df = pd.read_csv(path)\n again = False\n except:\n print(\"Impossible to read the file\")\n else:\n print(\"File doesn't exist\")\n\n again =True\n while(again):\n cl = input(\"Define the column name of the class (or the index):\\n\")\n if cl in df.columns:\n again = False\n elif cl.isdigit() and int(cl) >= 0 and int(cl) < df.columns.size:\n cl = df.columns[int(cl)]\n again = False\n else:\n print(\"Class not found\")\n return df, cl, 10\n\n\ndef load(i):\n datasets = {1:load_crx,2:load_banknote,3:load_pendigits,4:load_nursery,5:load_custom}\n df, cl, bins = datasets[i]()\n X, y, columns_no_class, encoders = preprocess(df, cl, bins)\n return X, y\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"numpy.where",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nicolalandro/softpool | [
"ca77161ab70e5fe6c6505dc40f448bd8e1d78a48"
] | [
"GCN/layers_treegcn/gcn.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport math\n\nclass TreeGCN(nn.Module):\n def __init__(self, depth, features, degrees, support=10, node=1, upsample=False, activation=True):\n self.depth = depth\n self.in_feature = features[depth]\n self.out_feature = features[depth+1]\n self.node = node\n self.degree = degrees[depth]\n self.upsample = upsample\n self.activation = activation\n super(TreeGCN, self).__init__()\n\n self.W_root = nn.ModuleList([nn.Linear(features[inx], self.out_feature, bias=False) for inx in range(self.depth+1)])\n\n if self.upsample:\n self.W_branch = nn.Parameter(torch.FloatTensor(self.node, self.in_feature, self.degree*self.in_feature))\n \n self.W_loop = nn.Sequential(nn.Linear(self.in_feature, self.in_feature*support, bias=False),\n nn.Linear(self.in_feature*support, self.out_feature, bias=False))\n\n self.bias = nn.Parameter(torch.FloatTensor(1, self.degree, self.out_feature))\n\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)\n\n self.init_param()\n\n def init_param(self):\n if self.upsample:\n init.xavier_uniform_(self.W_branch.data, gain=init.calculate_gain('relu'))\n\n stdv = 1. / math.sqrt(self.out_feature)\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, tree):\n root = 0\n for inx in range(self.depth+1):\n root_num = tree[inx].size(1)\n repeat_num = int(self.node / root_num)\n root_node = self.W_root[inx](tree[inx])\n self.batch = root_node.shape[0]\n root = root + root_node.repeat(1,1,repeat_num).view(self.batch,-1,self.out_feature)\n\n branch = 0\n if self.upsample:\n branch = tree[-1].unsqueeze(2) @ self.W_branch\n branch = self.leaky_relu(branch)\n branch = branch.view(self.batch,self.node*self.degree,self.in_feature)\n \n branch = self.W_loop(branch)\n\n branch = root.repeat(1,1,self.degree).view(self.batch,-1,self.out_feature) + branch\n else:\n branch = self.W_loop(tree[-1])\n\n branch = root + branch\n\n if self.activation:\n branch = self.leaky_relu(branch + self.bias.repeat(1,self.node,1))\n tree.append(branch)\n\n return tree\n"
] | [
[
"torch.nn.Linear",
"torch.FloatTensor",
"torch.nn.LeakyReLU",
"torch.nn.init.calculate_gain"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrschweizer/PyThat | [
"61c411b0a3eb70be39d4c601b51a1eafc67b51dd"
] | [
"examples/example.py"
] | [
"from PyThat import MeasurementTree\n# from h5to_nc import MeasurementTree\nimport xarray as xr\nimport matplotlib.pyplot as plt\n\n# Define path to .h5 file\npath = r'D:\\Pycharm\\PyThat\\examples\\floquet_just_spectrum_analyzer_large_incomplete.h5'\n\nindex = (3, 0)\n# Optional: If the index is known beforehand, it can be specified here. Otherwise the user will be asked to choose.\n# index = (2, 1)\n\n# Create measurement_tree object. Path argument should point towards thatec h5 file.\nmeasurement_tree = MeasurementTree(path, index=index, override=True)\n\nprint(measurement_tree.logs)\nprint(measurement_tree.definition)\nprint(measurement_tree.labbook)\nprint(measurement_tree.tree_string)\n\ndata: xr.DataArray = measurement_tree.array\ndata.isel({'Set Magnetic Field': 5}).plot()\n\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
therooler/qml | [
"d48c853af9abb6b6db58de830b8832122042adb7"
] | [
"demonstrations/tutorial_qaoa_intro.py"
] | [
"r\"\"\"\nIntro to QAOA\n=========================\n\n.. meta::\n :property=\"og:description\": Learn how to implement QAOA with PennyLane\n :property=\"og:image\": https://pennylane.ai/qml/_images/qaoa_layer.png\n\n.. related::\n\n tutorial_qaoa_maxcut QAOA for MaxCut\n\n*Author: Jack Ceroni. Posted: 18 Nov 2020. Last updated: 11 Jan 2021.*\n\nThe Quantum Approximate Optimization Algorithm (QAOA) is a widely-studied\nmethod for solving combinatorial optimization problems on NISQ devices.\nThe applications of QAOA are broad and far-reaching, and the performance\nof the algorithm is of great interest to the quantum computing research community.\n\n.. figure:: ../demonstrations/qaoa_module/qaoa_circuit.png\n :align: center\n :width: 90%\n\nThe goal of this tutorial is to introduce the basic concepts of QAOA and\nto guide you through PennyLane's built-in QAOA\nfunctionality. You will learn how to use time evolution to establish a\nconnection between Hamiltonians and quantum circuits, and how to layer\nthese circuits to create more powerful algorithms. These simple ingredients,\ntogether with the ability to optimize quantum circuits, are the building blocks of QAOA. By focusing\non the fundamentals, PennyLane provides general and flexible capabilities that can be tailored and\nrefined to implement QAOA for a wide variety of problems. In the last part of the tutorial, you will\nlearn how to bring these pieces together and deploy a complete QAOA workflow to solve the\nminimum vertex cover problem. Let's get started! 🎉\n\nCircuits and Hamiltonians\n-------------------------\n\nWhen considering quantum circuits, it is often convenient to define them by a\nseries of quantum gates. But there are many instances where\nit is useful to think of a quantum circuit in terms of a\n`Hamiltonian <https://en.wikipedia.org/wiki/Hamiltonian_(quantum_mechanics)>`__.\nIndeed, gates are physically implemented by performing time evolution under a carefully engineered\nHamiltonian. These transformations are described by the time evolution operator,\nwhich is a unitary defined as:\"\"\"\n\n######################################################################\n# .. math:: U(H, \\ t) \\ = \\ e^{-i H t / \\hbar}.\n#\n# The time evolution operator is determined completely in terms of a Hamiltonian\n# :math:`H` and a scalar :math:`t` representing time. In fact, any unitary\n# :math:`U` can be written in the form :math:`e^{i \\gamma H}`, where :math:`\\gamma` is a scalar\n# and :math:`H` is a Hermitian operator,\n# interpreted as a Hamiltonian. Thus, time evolution establishes a connection that allows us to\n# describe quantum circuits in terms of Hamiltonians. 🤯\n#\n# In general, implementing a quantum circuit that exactly exponentiates a Hamiltonian\n# with many non-commuting terms, i.e., a Hamiltonian of the form:\n#\n# .. math:: H \\ = \\ H_1 \\ + \\ H_2 \\ + \\ H_3 \\ + \\ \\cdots \\ + \\ H_N,\n#\n# is very challenging. Instead, we can use the\n# `Trotter-Suzuki <https://en.wikipedia.org/wiki/Lie_product_formula>`__ decomposition formula\n#\n# .. math:: e^{A \\ + \\ B} \\ \\approx \\ \\Big(e^{A/n} e^{B/n}\\Big)^{n},\n#\n# to implement an *approximate* time-evolution unitary:\n#\n# .. math:: U(H, t, n) \\ = \\ \\displaystyle\\prod_{j \\ = \\ 1}^{n}\n# \\displaystyle\\prod_{k} e^{-i H_k t / n} \\ \\ \\ \\ \\ \\ \\ \\ \\ \\ H \\\n# = \\ \\displaystyle\\sum_{k} H_k,\n#\n# where :math:`U` approaches :math:`e^{-i H t}` as :math:`n`\n# becomes larger.\n#\n# .. figure:: ../demonstrations/qaoa_module/ham_circuit.png\n# :align: center\n# :width: 70%\n#\n# In PennyLane, this is implemented using the :func:`~.pennylane.templates.ApproxTimeEvolution` template.\n# For example, let's say we have the following Hamiltonian:\n\nimport pennylane as qml\n\nH = qml.Hamiltonian(\n [1, 1, 0.5],\n [qml.PauliX(0), qml.PauliZ(1), qml.PauliX(0) @ qml.PauliX(1)]\n)\nprint(H)\n\n\n######################################################################\n#\n# We can implement the approximate time-evolution operator corresponding to this\n# Hamiltonian:\n\ndev = qml.device('default.qubit', wires=2)\n\nt = 1\nn = 2\n\[email protected](dev)\ndef circuit():\n qml.templates.ApproxTimeEvolution(H, t, n)\n return [qml.expval(qml.PauliZ(i)) for i in range(2)]\n\ncircuit()\nprint(circuit.draw())\n\n######################################################################\n# Layering circuits\n# -----------------\n#\n# Think of all the times you have copied a text or image, then pasted it repeatedly to create\n# many duplicates. This is also a useful feature when designing quantum algorithms!\n# The idea of repetition is ubiquitous in quantum computing:\n# from amplitude amplification in `Grover’s algorithm\n# <https://en.wikipedia.org/wiki/Grover%27s_algorithm>`__\n# to layers in `quantum neural networks\n# <https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.1.033063>`__\n# and `Hamiltonian simulation\n# <https://en.wikipedia.org/wiki/Hamiltonian_simulation>`__, repeated application\n# of a circuit is a central tool in quantum algorithms.\n#\n#\n# .. figure:: ../demonstrations/qaoa_module/repeat.png\n# :align: center\n# :width: 100%\n#\n#\n# Circuit repetition is implemented in PennyLane using the :func:`~.pennylane.layer` function. This method\n# allows us to take a function containing either quantum operations, a template, or even a single\n# quantum gate, and repeatedly apply it to a set of wires.\n#\n# .. figure:: ../demonstrations/qaoa_module/qml_layer.png\n# :align: center\n# :width: 90%\n#\n# To create a larger circuit consisting of many repetitions, we pass the circuit to be\n# repeated as an argument and specify the number of repetitions. For example, let's\n# say that we want to layer the following circuit three times:\n\ndef circ(theta):\n qml.RX(theta, wires=0)\n qml.Hadamard(wires=1)\n qml.CNOT(wires=[0, 1])\n\[email protected](dev)\ndef circuit(param):\n circ(param)\n return [qml.expval(qml.PauliZ(i)) for i in range(2)]\n\ncircuit(0.5)\nprint(circuit.draw())\n\n######################################################################\n#\n# We simply pass this function into the :func:`~.pennylane.layer` function:\n#\n\[email protected](dev)\ndef circuit(params, **kwargs):\n qml.layer(circ, 3, params)\n return [qml.expval(qml.PauliZ(i)) for i in range(2)]\n\ncircuit([0.3, 0.4, 0.5])\nprint(circuit.draw())\n\n######################################################################\n#\n# We have learned how time evolution can be used to create circuits from Hamiltonians,\n# and how these can be layered to create longer circuits. We are now ready to\n# explore QAOA.\n#\n\n\n######################################################################\n# QAOA\n# ----\n#\n# The quantum approximate optimization algorithm (QAOA) is a general technique that can be used\n# to find approximate solutions to combinatorial optimization problems, in particular problems\n# that can be cast as searching for an optimal bitstring. QAOA consists of the following\n# steps:\n#\n# 1. Define a *cost Hamiltonian* :math:`H_C` such that its ground state\n# encodes the solution to the optimization problem.\n#\n# 2. Define a *mixer Hamiltonian* :math:`H_M`.\n#\n# 3. Construct the circuits :math:`e^{-i \\gamma H_C}` and :math:`e^{-i\\alpha H_M}`. We call\n# these the *cost* and *mixer layers*, respectively.\n#\n# 4. Choose a parameter :math:`n\\geq 1` and build the circuit\n#\n# .. math:: U(\\boldsymbol\\gamma, \\ \\boldsymbol\\alpha) \\ = \\ e^{-i \\alpha_n H_M}\n# e^{-i \\gamma_n H_C} \\ ... \\ e^{-i \\alpha_1 H_M} e^{-i \\gamma_1 H_C},\n#\n# consisting of repeated application of the cost and mixer layers.\n#\n# 5. Prepare an initial state, apply :math:`U(\\boldsymbol\\gamma,\\boldsymbol\\alpha)`,\n# and use classical techniques to optimize the parameters.\n#\n# 6. After the circuit has been optimized, measurements of the output state reveal\n# approximate solutions to the optimization problem.\n#\n# In summary, the starting point of QAOA is the specification of cost and mixer Hamiltonians.\n# We then use time evolution and layering to create a variational circuit and optimize its\n# parameters. The algorithm concludes by sampling from the circuit to get an approximate solution to\n# the optimization problem. Let's see it in action! 🚀\n#\n\n######################################################################\n# Minimum Vertex Cover with QAOA\n# ------------------------------\n#\n# Our goal is to find the `minimum vertex\n# cover <https://en.wikipedia.org/wiki/Vertex_cover>`__ of a graph:\n# a collection of vertices such that\n# each edge in the graph contains at least one of the vertices in the cover. Hence,\n# these vertices \"cover\" all the edges 👍.\n# We wish to find the vertex cover that has the\n# smallest possible number of vertices.\n#\n# Vertex covers can be represented by a bit string\n# where each bit denotes whether the corresponding vertex is present in the cover. For example,\n# the bit string 01010 represents a cover consisting of the second and fourth vertex in a graph with five vertices.\n#\n# .. figure:: ../demonstrations/qaoa_module/minvc.png\n# :align: center\n# :width: 90%\n#\n# To implement QAOA with PennyLane, we first import the necessary dependencies:\n#\n\nfrom pennylane import qaoa\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport networkx as nx\n\n\n######################################################################\n#\n# We also define the four-vertex graph for which we\n# want to find the minimum vertex cover:\n\nedges = [(0, 1), (1, 2), (2, 0), (2, 3)]\ngraph = nx.Graph(edges)\n\nnx.draw(graph, with_labels=True)\nplt.show()\n\n\n######################################################################\n#\n# There are two minimum vertex covers of this graph: the vertices 0 and 2,\n# and the vertices 1 and 2. These can be respectively represented by the bit strings 1010 and\n# 0110. The goal of the algorithm is to sample these bit strings with high probability.\n#\n# The PennyLane QAOA module has a collection of built-in optimization\n# problems, including minimum vertex cover. For each problem, you can retrieve the cost Hamiltonian\n# as well as a recommended mixer Hamiltonian. This\n# makes it straightforward to obtain the Hamiltonians for specific problems while still\n# permitting the flexibility to make other choices, for example by adding constraints or\n# experimenting with different mixers.\n#\n# In our case, the cost\n# Hamiltonian has two ground states, :math:`|1010\\rangle` and :math:`|0110\\rangle`, coinciding\n# with the solutions of the problem. The mixer Hamiltonian is the simple, non-commuting sum of Pauli-X\n# operations on each node of the graph:\n\ncost_h, mixer_h = qaoa.min_vertex_cover(graph, constrained=False)\n\nprint(\"Cost Hamiltonian\", cost_h)\nprint(\"Mixer Hamiltonian\", mixer_h)\n\n######################################################################\n#\n# A single layer of QAOA consists of time evolution under these\n# Hamiltonians:\n#\n# .. figure:: ../demonstrations/qaoa_module/layer.png\n# :align: center\n# :width: 90%\n#\n# While it is possible to use :func:`~.pennylane.templates.ApproxTimeEvolution`, the QAOA module allows you to\n# build the cost and mixer layers directly using the functions :func:`~.pennylane.qaoa.cost_layer` and\n# :func:`~.pennylane.qaoa.mixer_layer`, which take as input the respective Hamiltonian and variational parameters:\n\n\ndef qaoa_layer(gamma, alpha):\n qaoa.cost_layer(gamma, cost_h)\n qaoa.mixer_layer(alpha, mixer_h)\n\n######################################################################\n#\n# We are now ready to build the full variational circuit. The number of wires is equal to\n# the number of vertices of the graph. We initialize the state to an even superposition over\n# all basis states.\n# For this example, we employ a circuit consisting of two QAOA layers:\n\n\nwires = range(4)\ndepth = 2\n\ndef circuit(params, **kwargs):\n for w in wires:\n qml.Hadamard(wires=w)\n qml.layer(qaoa_layer, depth, params[0], params[1])\n\n\n######################################################################\n#\n# Note that :func:`~.pennylane.layer` allows us to pass variational parameters\n# ``params[0]`` and ``params[1]`` into each layer of the circuit. That's it! The last\n# step is PennyLane's specialty: optimizing the circuit parameters.\n#\n# The cost function is the expectation value of :math:`H_C`, which we want to minimize. We\n# use the function :func:`~.pennylane.expval` which returns the\n# expectation value of the Hamiltonian with respect to the circuit's output state.\n# We also define the device on which the simulation is performed. We use the\n# PennyLane-Qulacs plugin to run the circuit on the Qulacs simulator:\n#\n\ndev = qml.device(\"qulacs.simulator\", wires=wires)\n\[email protected](dev)\ndef cost_function(params):\n circuit(params)\n return qml.expval(cost_h)\n\n\n######################################################################\n#\n# Finally, we optimize the cost function using the built-in\n# :func:`~.pennylane.GradientDescentOptimizer`. We perform optimization for seventy steps and initialize the\n# parameters:\n\n\noptimizer = qml.GradientDescentOptimizer()\nsteps = 70\nparams = [[0.5, 0.5], [0.5, 0.5]]\n\n\n######################################################################\n#\n# Notice that we set each of the initial parameters to :math:`0.5`. For demonstration purposes,\n# we chose initial parameters that we know work fairly well, and don't get stuck in any local minima.\n#\n# The choice of initial parameters for a variational circuit is usually a difficult problem,\n# so we won't linger on it too much in this tutorial, but it is important to note that\n# finding an initial set of parameters that work well for a few toy problems often yields good results\n# for more complex instances of the algorithm as well.\n#\n# Now, we can optimize the circuit:\n#\n\nfor i in range(steps):\n params = optimizer.step(cost_function, params)\n\nprint(\"Optimal Parameters\")\nprint(params)\n\n\n######################################################################\n#\n# With the optimal parameters, we can now reconstruct the probability\n# landscape. We redefine the\n# full QAOA circuit with the optimal parameters, but this time we\n# return the probabilities of measuring each bitstring:\n#\n\[email protected](dev)\ndef probability_circuit(gamma, alpha):\n circuit([gamma, alpha])\n return qml.probs(wires=wires)\n\n\nprobs = probability_circuit(params[0], params[1])\n\n\n######################################################################\n#\n# Finally, we can display a bar graph showing the probability of\n# measuring each bitstring:\n\nplt.style.use(\"seaborn\")\nplt.bar(range(2 ** len(wires)), probs)\nplt.show()\n\n\n######################################################################\n#\n# The states\n# :math:`|6\\rangle \\ = \\ |0110\\rangle` and\n# :math:`|10\\rangle \\ = \\ |1010\\rangle` have the highest probabilities of\n# being measured, just as expected!\n#\n# .. figure:: ../demonstrations/qaoa_module/graph.png\n# :align: center\n# :width: 90%\n#\n\n######################################################################\n# Customizing QAOA\n# ----------------\n#\n# QAOA is not one-size-fits-all when it comes to solving optimization problems. In many cases,\n# cost and mixer Hamiltonians will be very specific to one scenario, and not necessarily\n# fit within the structure of the pre-defined problems in the :func:`~.pennylane.qaoa` submodule. Luckily,\n# one of the core principles behind the entire PennyLane library is customizability, and this principle hold true for\n# QAOA submodule as well!\n#\n# The QAOA workflow above gave us two optimal solutions: :math:`|6\\rangle = |0110\\rangle`\n# and :math:`|10\\rangle = |1010\\rangle`. What if we add a constraint\n# that made one of these solutions \"better\" than the other? Let's imagine that we are interested in\n# solutions that minimize the original cost function,\n# *but also colour the first and third vertices* :math:`1`. A constraint of this form will\n# favour :math:`|10\\rangle`, making it the only true ground state.\n#\n# It is easy to introduce constraints of this form in PennyLane.\n# We can use the :func:`~.pennylane.qaoa.edge_driver` cost\n# Hamiltonian to \"reward\" cases in which the first and last vertices of the graph\n# are :math:`0`:\n\nreward_h = qaoa.edge_driver(nx.Graph([(0, 2)]), ['11'])\n\n######################################################################\n#\n# We then weigh and add the constraining term\n# to the original minimum vertex cover Hamiltonian:\n\nnew_cost_h = cost_h + 2 * reward_h\n\n######################################################################\n#\n# Notice that PennyLane allows for simple addition and multiplication of\n# Hamiltonian objects using inline arithmetic operations ➕ ➖ ✖️➗! Finally, we can\n# use this new cost Hamiltonian to define a new QAOA workflow:\n\n\ndef qaoa_layer(gamma, alpha):\n qaoa.cost_layer(gamma, new_cost_h)\n qaoa.mixer_layer(alpha, mixer_h)\n\ndef circuit(params, **kwargs):\n for w in wires:\n qml.Hadamard(wires=w)\n qml.layer(qaoa_layer, depth, params[0], params[1])\n\[email protected](dev)\ndef cost_function(params):\n circuit(params)\n return qml.expval(new_cost_h)\n\nparams = [[0.5, 0.5], [0.5, 0.5]]\n\nfor i in range(steps):\n params = optimizer.step(cost_function, params)\n\nprint(\"Optimal Parameters\")\nprint(params)\n\n\n######################################################################\n#\n# We then reconstruct the probability landscape with the optimal parameters:\n#\n\[email protected](dev)\ndef probability_circuit(gamma, alpha):\n circuit([gamma, alpha])\n return qml.probs(wires=wires)\n\nprobs = probability_circuit(params[0], params[1])\n\nplt.style.use(\"seaborn\")\nplt.bar(range(2 ** len(wires)), probs)\nplt.show()\n\n######################################################################\n#\n# Just as we expected, the :math:`|10\\rangle` state is now favoured\n# over :math:`|6\\rangle`!\n#\n\n######################################################################\n# Conclusion\n# ----------\n#\n# You have learned how to use the PennyLane QAOA functionality, while\n# also surveying some of the fundamental features that make the QAOA module simple and\n# flexible. Now, it's your turn to experiment with QAOA! If you need some inspiration for how to get\n# started:\n#\n# - Experiment with different optimizers and different devices. Which ones work the best?\n# - Play around with some of the other built-in cost and mixer Hamiltonians.\n# - Try making your own custom constraining terms. Is QAOA properly amplifying some bitstrings over others?\n#\n# .. figure:: ../demonstrations/qaoa_module/qaoa_circuit.png\n# :align: center\n# :width: 90%\n#\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
caisarl76/savn | [
"843b4bbed8aea4c1ff13e6b37515eb42da5c0985"
] | [
"runners/nonadaptivea3c_train.py"
] | [
"from __future__ import division\n\nimport time\n\nfrom datasets.data import get_data\nfrom datasets.glove import Glove\n\nimport setproctitle\n\nfrom models.model_io import ModelOptions\n\nfrom agents.random_agent import RandomNavigationAgent\n\nimport random\n\nfrom .train_util import (\n compute_loss,\n new_episode,\n run_episode,\n transfer_gradient_from_player_to_shared,\n end_episode,\n reset_player,\n)\n\n\ndef nonadaptivea3c_train(\n rank,\n args,\n create_shared_model,\n shared_model,\n initialize_agent,\n optimizer,\n res_queue,\n end_flag,\n):\n\n glove = Glove(args.glove_file)\n scenes, possible_targets, targets = get_data(args.scene_types, args.train_scenes)\n\n random.seed(args.seed + rank)\n idx = [j for j in range(len(args.scene_types))]\n random.shuffle(idx)\n\n setproctitle.setproctitle(\"Training Agent: {}\".format(rank))\n\n gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]\n\n import torch\n\n torch.cuda.set_device(gpu_id)\n\n torch.manual_seed(args.seed + rank)\n if gpu_id >= 0:\n torch.cuda.manual_seed(args.seed + rank)\n\n player = initialize_agent(create_shared_model, args, rank, gpu_id=gpu_id)\n compute_grad = not isinstance(player, RandomNavigationAgent)\n\n model_options = ModelOptions()\n\n j = 0\n\n while not end_flag.value:\n\n # Get a new episode.\n total_reward = 0\n player.eps_len = 0\n new_episode(\n args, player, scenes[idx[j]], possible_targets, targets[idx[j]], glove=glove\n )\n player_start_time = time.time()\n\n # Train on the new episode.\n while not player.done:\n # Make sure model is up to date.\n player.sync_with_shared(shared_model)\n # Run episode for num_steps or until player is done.\n total_reward = run_episode(player, args, total_reward, model_options, True)\n # Compute the loss.\n loss = compute_loss(args, player, gpu_id, model_options)\n if compute_grad:\n # Compute gradient.\n player.model.zero_grad()\n loss[\"total_loss\"].backward()\n torch.nn.utils.clip_grad_norm_(player.model.parameters(), 100.0)\n # Transfer gradient to shared model and step optimizer.\n transfer_gradient_from_player_to_shared(player, shared_model, gpu_id)\n optimizer.step()\n # Clear actions and repackage hidden.\n if not player.done:\n reset_player(player)\n\n for k in loss:\n loss[k] = loss[k].item()\n\n end_episode(\n player,\n res_queue,\n title=args.scene_types[idx[j]],\n total_time=time.time() - player_start_time,\n total_reward=total_reward,\n )\n reset_player(player)\n\n j = (j + 1) % len(args.scene_types)\n\n player.exit()\n"
] | [
[
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.set_device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scott-mao/CroP | [
"f1e0a25224e341683cf47e7ce451ce0fe996e950"
] | [
"main_seml.py"
] | [
"import sys\nimport warnings\n\nfrom models import GeneralModel\nfrom models.statistics.Metrics import Metrics\nfrom utils.config_utils import *\nfrom utils.model_utils import *\nfrom utils.system_utils import *\n# from rigl_torch.RigL import RigLScheduler\n\nwarnings.filterwarnings(\"ignore\")\n\nimport logging\nfrom sacred import Experiment\nimport numpy as np\nimport seml\n\n\nex = Experiment()\nseml.setup_logger(ex)\n\n\[email protected]_run_hook\ndef collect_stats(_run):\n seml.collect_exp_stats(_run)\n\n\[email protected]\ndef config():\n overwrite = None\n db_collection = None\n if db_collection is not None:\n ex.observers.append(seml.create_mongodb_observer(db_collection, overwrite=overwrite))\n\ndef main(\n arguments,\n metrics: Metrics\n):\n if arguments.disable_autoconfig:\n autoconfig(arguments)\n\n global out\n out = metrics.log_line\n out(f\"starting at {get_date_stamp()}\")\n\n # hardware\n device = configure_device(arguments)\n\n if arguments.disable_cuda_benchmark:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n # for reproducibility\n configure_seeds(arguments, device)\n\n # filter for incompatible properties\n assert_compatibilities(arguments)\n # get model\n model: GeneralModel = find_right_model(\n NETWORKS_DIR, arguments.model,\n device=device,\n hidden_dim=arguments.hidden_dim,\n input_dim=arguments.input_dim,\n output_dim=arguments.output_dim,\n is_maskable=arguments.disable_masking,\n is_tracking_weights=arguments.track_weights,\n is_rewindable=arguments.enable_rewinding,\n is_growable=arguments.growing_rate > 0,\n outer_layer_pruning=arguments.outer_layer_pruning,\n maintain_outer_mask_anyway=(\n not arguments.outer_layer_pruning) and (\n \"Structured\" in arguments.prune_criterion),\n l0=arguments.l0,\n l0_reg=arguments.l0_reg,\n N=arguments.N,\n beta_ema=arguments.beta_ema,\n l2_reg=arguments.l2_reg\n ).to(device)\n\n # get criterion\n criterion = find_right_model(\n CRITERION_DIR, arguments.prune_criterion,\n model=model,\n limit=arguments.pruning_limit,\n start=0.5,\n steps=arguments.snip_steps,\n device=arguments.pruning_device\n )\n\n # load pre-trained weights if specified\n load_checkpoint(arguments, metrics, model)\n\n # load data\n train_loader, test_loader = find_right_model(\n DATASETS, arguments.data_set,\n arguments=arguments\n )\n\n # get loss function\n loss = find_right_model(\n LOSS_DIR, arguments.loss,\n device=device,\n l1_reg=arguments.l1_reg,\n lp_reg=arguments.lp_reg,\n l0_reg=arguments.l0_reg,\n hoyer_reg=arguments.hoyer_reg\n )\n\n # get optimizer\n optimizer = find_right_model(\n OPTIMS, arguments.optimizer,\n params=model.parameters(),\n lr=arguments.learning_rate,\n weight_decay=arguments.l2_reg if not arguments.l0 else 0,\n # momentum=arguments.momentum if arguments.momentum else 0\n )\n from torch.optim.lr_scheduler import StepLR, OneCycleLR\n if arguments.model == 'VGG16' or arguments.prune_criterion != 'EmptyCrit':\n scheduler = StepLR(optimizer, step_size=30000, gamma=0.2)\n elif arguments.prune_criterion == 'EmptyCrit':\n scheduler = OneCycleLR(optimizer, max_lr=arguments.learning_rate,\n steps_per_epoch=len(train_loader), epochs=arguments.epochs)\n # now, create the RigLScheduler object\n # pruner = RigLScheduler(model,\n # optimizer,\n # dense_allocation=0.1,\n # sparsity_distribution='uniform',\n # T_end=5859,\n # delta=100,\n # alpha=0.3,\n # grad_accumulation_n=1,\n # static_topo=False,\n # ignore_linear_layers=False,\n # state_dict=None)\n run_name = f'_model={arguments.model}_dataset={arguments.data_set}_prune-criterion={arguments.prune_criterion}' + \\\n f'_pruning-limit={arguments.pruning_limit}_train-scheme={arguments.train_scheme}_seed={arguments.seed}'\n if not arguments.eval:\n\n # build trainer\n trainer = find_right_model(\n TRAINERS_DIR, arguments.train_scheme,\n model=model,\n loss=loss,\n optimizer=optimizer,\n device=device,\n arguments=arguments,\n train_loader=train_loader,\n test_loader=test_loader,\n metrics=metrics,\n criterion=criterion,\n scheduler=scheduler,\n run_name=run_name\n # pruner=pruner\n )\n\n from codecarbon import EmissionsTracker\n tracker = EmissionsTracker()\n tracker.start()\n trainer.train()\n emissions: float = tracker.stop()\n\n else:\n\n tester = find_right_model(\n TESTERS_DIR, arguments.test_scheme,\n train_loader=train_loader,\n test_loader=test_loader,\n model=model,\n loss=loss,\n optimizer=optimizer,\n device=device,\n arguments=arguments,\n )\n\n return tester.evaluate()\n\n out(f\"finishing at {get_date_stamp()}\")\n results = {}\n results['emissions'] = emissions\n results['run_name'] = run_name\n results['crit'] = np.array([i.detach().cpu() for i in trainer.crit])\n results['test_acc'] = trainer.test_acc\n results['train_acc'] = trainer.train_acc\n results['train_loss'] = trainer.train_loss\n results['test_loss'] = trainer.test_loss\n results['sparse_weight'] = trainer.sparse_weight\n results['sparse_node'] = trainer.sparse_node\n results['sparse_hm'] = trainer.sparse_hm\n results['sparse_log_disk_size'] = trainer.sparse_log_disk_size\n results['flops_per_sample'] = trainer.flops_per_sample\n results['flops_log_cum'] = trainer.flops_log_cum\n results['gpu_ram'] = trainer.gpu_ram\n results['max_gpu_ram'] = trainer.max_gpu_ram\n results['batch_time'] = trainer.batch_time\n results['gpu_time'] = trainer.time_gpu\n return results\n\ndef assert_compatibilities(arguments):\n check_incompatible_props([arguments.loss != \"L0CrossEntropy\", arguments.l0], \"l0\", arguments.loss)\n check_incompatible_props([arguments.train_scheme != \"L0Trainer\", arguments.l0], \"l0\", arguments.train_scheme)\n check_incompatible_props([arguments.l0, arguments.group_hoyer_square, arguments.hoyer_square],\n \"Choose one mode, not multiple\")\n # check_incompatible_props(\n # [\"Structured\" in arguments.prune_criterion, \"Group\" in arguments.prune_criterion, \"ResNet\" in arguments.model],\n # \"structured\", \"residual connections\")\n # todo: add more\n\n\ndef load_checkpoint(arguments, metrics, model):\n if (not (arguments.checkpoint_name is None)) and (not (arguments.checkpoint_model is None)):\n path = os.path.join(RESULTS_DIR, arguments.checkpoint_name, MODELS_DIR, arguments.checkpoint_model)\n state = DATA_MANAGER.load_python_obj(path)\n try:\n model.load_state_dict(state)\n except KeyError as e:\n print(list(state.keys()))\n raise e\n out(f\"Loaded checkpoint {arguments.checkpoint_name} from {arguments.checkpoint_model}\")\n\n\ndef log_start_run(arguments, metrics):\n arguments.PyTorch_version = torch.__version__\n arguments.PyThon_version = sys.version\n arguments.pwd = os.getcwd()\n metrics.log_line(\"PyTorch version:\", torch.__version__, \"Python version:\", sys.version)\n metrics.log_line(\"Working directory: \", os.getcwd())\n metrics.log_line(\"CUDA avalability:\", torch.cuda.is_available(), \"CUDA version:\", torch.version.cuda)\n metrics.log_line(arguments)\n\n\ndef get_arguments():\n global arguments\n arguments = parse()\n if arguments.disable_autoconfig:\n autoconfig(arguments)\n return arguments\n\nfrom dataclasses import dataclass\n\[email protected]\ndef run(arguments):\n @dataclass\n class args:\n eval_freq = arguments['eval_freq'] # evaluate every n batches\n save_freq = arguments['save_freq'] # save model every n epochs, besides before and after training\n batch_size = arguments['batch_size'] # 128 # size of batches, for Imagenette 128\n seed = arguments['seed'] # random seed\n max_training_minutes = arguments['max_training_minutes'] # one hour and a 45 minutes max, process killed after n minutes (after finish of epoch)\n plot_weights_freq = arguments['plot_weights_freq'] # plot pictures to tensorboard every n epochs\n prune_freq = arguments['prune_freq'] # if pruning during training: how long to wait between pruning events after first pruning\n prune_delay = arguments['prune_delay'] # \"if pruning during training: how long to wait before pruning first time\n lower_limit = arguments['lower_limit']\n epochs = arguments['epochs']\n rewind_to = arguments['rewind_to'] # rewind to this epoch if rewinding is done\n hidden_dim = arguments['hidden_dim']\n input_dim = arguments['input_dim']\n output_dim = arguments['output_dim']\n N = arguments['N'] # size of dataset (used for l0)\n snip_steps = arguments['snip_steps'] # 's' in algorithm box, number of pruning steps for 'rule of thumb', TODO\n pruning_rate = arguments['pruning_rate'] # pruning rate passed to criterion at pruning event. however, most override this\n growing_rate = arguments['growing_rate'] # grow back so much every epoch (for future criterions)\n pruning_limit = arguments['pruning_limit'] # Prune until here, if structured in nodes, if unstructured in weights. most criterions use this instead of the pruning_rate\n prune_to = arguments['prune_to']\n learning_rate = arguments['learning_rate']\n grad_clip = arguments['grad_clip']\n grad_noise = arguments['grad_noise'] # added gaussian noise to gradients\n l2_reg = arguments['l2_reg'] # weight decay\n l1_reg = arguments['l1_reg'] # l1-norm regularisation\n lp_reg = arguments['lp_reg'] # lp regularisation with p < 1\n l0_reg = arguments['l0_reg'] # l0 reg lambda hyperparam\n hoyer_reg = arguments['hoyer_reg'] # hoyer reg lambda hyperparam\n beta_ema = arguments['beta_ema'] # l0 reg beta ema hyperparam\n\n loss = arguments['loss']\n optimizer = arguments['optimizer']\n model = arguments['model'] # WideResNet28x10 # ResNet not supported with structured\n data_set = arguments['data_set']\n prune_criterion = arguments['prune_criterion'] # HoyerSquare is one shot, pruning limit doesn't matter in this implementation\n train_scheme = arguments['train_scheme'] # default: DefaultTrainer\n\n device = arguments['device']\n structured_prior = arguments['structured_prior']\n pruning_device = arguments['pruning_device']\n\n checkpoint_name = arguments['checkpoint_name']\n checkpoint_model = arguments['checkpoint_model']\n\n disable_cuda_benchmark = arguments['disable_cuda_benchmark'] # speedup (disable) vs reproducibility (leave it)\n eval = arguments['eval']\n disable_autoconfig = arguments['disable_autoconfig'] # for the brave\n preload_all_data = arguments['preload_all_data'] # load all data into ram memory for speedups\n tuning = arguments['tuning'] # splits trainset into train and validationset, omits test set\n\n\n track_weights = arguments['track_weights'] # \"keep statistics on the weights through training\n disable_masking = arguments['disable_masking'] # disable the ability to prune unstructured\n enable_rewinding = arguments['enable_rewinding'] # enable the ability to rewind to previous weights\n outer_layer_pruning = arguments['outer_layer_pruning'] # allow to prune outer layers (unstructured) or not (structured)\n random_shuffle_labels = arguments['random_shuffle_labels'] # run with random-label experiment from zhang et al\n l0 = arguments['l0'] # run with l0 criterion, might overwrite some other arguments\n hoyer_square = arguments['hoyer_square'] # \"run in unstructured DeephoyerSquare criterion, might overwrite some other arguments\n group_hoyer_square = arguments['group_hoyer_square'] # run in unstructured Group-DeephoyerSquare criterion, might overwrite some other arguments\n\n disable_histograms = arguments['disable_histograms']\n disable_saliency = arguments['disable_saliency']\n disable_confusion = arguments['disable_confusion']\n disable_weightplot = arguments['disable_weightplot']\n disable_netplot = arguments['disable_netplot']\n skip_first_plot = arguments['skip_first_plot']\n\n metrics = Metrics()\n out = metrics.log_line\n print = out\n ensure_current_directory()\n # get_arguments()\n log_start_run(args, metrics)\n out(\"\\n\\n\")\n metrics._batch_size = args.batch_size\n metrics._eval_freq = args.eval_freq\n\n results = main(args, metrics)\n\n # the returned result will be written into the database\n return results\n"
] | [
[
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
javierggomez/capacity-estimator-via-dine | [
"003cba3b21719f2e6ab4850beb9d589a23c3c2b3"
] | [
"utils.py"
] | [
"import tensorflow as tf\r\nimport sys\r\nimport os\r\nimport logging\r\nimport time\r\nimport shutil\r\nfrom configs import ConfigAWGN, ConfigFF_MA_AGN, ConfigFB_MA_AGN\r\n\r\nlogger = logging.getLogger(\"logger\")\r\n\r\n\r\ndef define_configs(args):\r\n if args.config_name == \"awgn\":\r\n config = ConfigAWGN()\r\n elif args.config_name == \"arma_ff\":\r\n config = ConfigFF_MA_AGN()\r\n elif args.config_name == \"arma_fb\":\r\n config = ConfigFB_MA_AGN()\r\n else:\r\n raise ValueError(\"Invalid choice of configuration\")\r\n\r\n config = read_flags(config, args)\r\n\r\n seed_tmp = time.time()\r\n config.seed = int((seed_tmp - int(seed_tmp))*1e6) if args.seed is None else args.seed\r\n print(config.seed)\r\n\r\n simulation_name = get_simulation_name(config)\r\n config.simulation_name = simulation_name\r\n config.directory = directory = \"{}/results/{}/{}/{}/{}\".format(os.path.dirname(sys.argv[0]),\r\n config.config_name,\r\n config.name,\r\n simulation_name,\r\n config.seed)\r\n\r\n create_exp_dir(directory, scripts_to_save=['algorithm.py',\r\n 'configs.py',\r\n 'main.py',\r\n 'rnn_modified.py',\r\n 'utils.py'])\r\n\r\n print(\"Using tensrflow version {}\".format(tf.__version__))\r\n gpus = tf.config.experimental.list_physical_devices('GPU')\r\n if gpus:\r\n try:\r\n # Currently, memory growth needs to be the same across GPUs\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\r\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\r\n except RuntimeError as e:\r\n # Memory growth must be set before GPUs have been initialized\r\n print(e)\r\n\r\n return config\r\n\r\n\r\ndef read_flags(config, args):\r\n # assign flags into config\r\n for arg in sorted(vars(args)):\r\n key = arg\r\n val = getattr(args, arg)\r\n if val is not None:\r\n setattr(config, key, val)\r\n\r\n return config\r\n\r\n\r\ndef get_simulation_name(args):\r\n waiver = ['seed', 'verbose', 'config_name', 'name']\r\n name = []\r\n for arg in sorted(vars(args)):\r\n key = arg\r\n val = getattr(args, arg)\r\n if key == name:\r\n continue\r\n if val is not None and key not in waiver:\r\n name.append(key + \"-\" + str(val).replace(\",\",\"-\").replace(\" \", \"\").replace(\"[\", \"\").replace(\"]\", \"\"))\r\n return \"{}\".format(\"_\".join(name))\r\n\r\n\r\ndef create_exp_dir(path, scripts_to_save=None):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n\r\n if scripts_to_save is not None:\r\n if not os.path.exists(os.path.join(path, 'scripts')):\r\n os.makedirs(os.path.join(path, 'scripts'))\r\n for script in scripts_to_save:\r\n dst_file = os.path.join(path, 'scripts', os.path.basename(script))\r\n shutil.copyfile(os.path.join(os.path.dirname(sys.argv[0]),script), dst_file)\r\n\r\n\r\ndef define_logger(args, directory):\r\n logFormatter = logging.Formatter(\"%(message)s\")\r\n logger = logging.getLogger(\"logger\")\r\n\r\n logger.setLevel(logging.INFO)\r\n\r\n\r\n fileHandler = logging.FileHandler(\"{0}/logger.log\".format(directory))\r\n\r\n fileHandler.setFormatter(logFormatter)\r\n logger.addHandler(fileHandler)\r\n\r\n if args.verbose:\r\n consoleHandler = logging.StreamHandler()\r\n consoleHandler.setFormatter(logFormatter)\r\n logger.addHandler(consoleHandler)\r\n\r\n return logger\r\n\r\n\r\ndef preprocess(args):\r\n ###################################### general configs ######################################\r\n\r\n config = define_configs(args)\r\n logger = define_logger(args, config.directory)\r\n\r\n logger.info(\"\\n\"*10)\r\n logger.info(\"cmd line: python \" + \" \".join(sys.argv) + \"\\n\"*2)\r\n logger.info(\"Simulation configurations:\\n\" + \"-\"*30)\r\n config.show()\r\n logger.info(\"\\n\" * 5)\r\n return config, logger\r\n"
] | [
[
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.config.experimental.set_memory_growth"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ntoussaint/fetalnav | [
"a6701a33f1ed8ac412f5ee09c0704d866ce7dad2"
] | [
"experiments/train.py"
] | [
"import argparse\nimport os\nimport sys\nfrom pkgutil import iter_modules\nimport numpy as np\n\nimport torch\nfrom torchvision import transforms as torchtransforms\n\nfrom fetalnav.transforms import itk_transforms as itktransforms\nfrom fetalnav.transforms import tensor_transforms as tensortransforms\nfrom fetalnav.datasets.itk_metadata_classification import ITKMetaDataClassification\n\nfrom fetalnav.models.spn_models import *\n\nimport engine as torchengine\n\n\ndef module_exists(module_name):\n return module_name in (name for loader, name, ispkg in iter_modules())\n\n\nparser = argparse.ArgumentParser(description='Model Training')\nparser.add_argument('data', metavar='DIR',\n help='path to dataset (e.g. ../data/')\nparser.add_argument('--image-size', '-i', default=224, type=int,\n metavar='N', help='image size (default: 128)')\nparser.add_argument('-j', '--workers', default=8, type=int, metavar='N',\n help='number of data loading workers (default: 8)')\nparser.add_argument('--epochs', default=10, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('-b', '--batch-size', default=4, type=int,\n metavar='N', help='mini-batch size (default: 4)')\nparser.add_argument('--lr', '--learning-rate', default=0.01, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=0.0005, type=float,\n metavar='W', help='weight decay (default: 5e-4)')\nparser.add_argument('--resume', default=None, type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\nparser.add_argument('--save_model_path', default='logs/ITKMetaDataClassification', type=str, metavar='PATH',\n help='path to logs (default: none)')\nparser.add_argument('--model', default='resnet18', type=str, metavar='MD',\n help='Model Type to use')\nparser.add_argument('--spn', '--soft-proposal', dest='spn', action='store_true',\n help='Use Soft Proposal Layer')\nparser.add_argument('--aspect-ratio', '--aspect', default=1., type=float, metavar='A',\n help='Natural cropped aspect ratio of the image (default: 1.), tune to 1.5 for Polar images')\nparser.add_argument('--tensorboard_path', default=None, type=str, metavar='PATH',\n help='path to tensorboard logs (default: none)')\n\n\ndef main():\n global args, best_prec1, use_gpu\n args = parser.parse_args()\n\n use_gpu = torch.cuda.is_available()\n\n # create transformation and data augmentation schemes\n resample = itktransforms.Resample(new_spacing=[.5, .5, 1.])\n tonumpy = itktransforms.ToNumpy(outputtype='float')\n totensor = torchtransforms.ToTensor()\n crop = tensortransforms.CropToRatio(outputaspect=args.aspect_ratio)\n resize = tensortransforms.Resize(size=[args.image_size, args.image_size], interp='bilinear')\n rescale = tensortransforms.Rescale(interval=(0,1))\n flip = tensortransforms.Flip(axis=2)\n\n transform = torchtransforms.Compose(\n [resample,\n tonumpy,\n totensor,\n crop,\n resize,\n rescale,\n flip])\n validation_transform = torchtransforms.Compose(\n [resample,\n tonumpy,\n totensor,\n crop,\n resize,\n rescale])\n\n # load datasets\n train_dataset = ITKMetaDataClassification(root=args.data, mode='train', transform=transform)\n val_dataset = ITKMetaDataClassification(root=args.data, mode='validate', transform=validation_transform)\n\n # estimate the samples' weights\n train_cardinality = train_dataset.get_class_cardinality()\n val_cardinality = val_dataset.get_class_cardinality()\n train_sample_weights = torch.from_numpy(train_dataset.get_sample_weights())\n\n print('')\n print('train-dataset: ')\n for idx, c in enumerate(train_dataset.get_classes()):\n print('{}: \\t{}'.format(train_cardinality[idx], c))\n print('')\n print('validate-dataset: ')\n for idx, c in enumerate(val_dataset.get_classes()):\n print('{}: \\t{}'.format(val_cardinality[idx], c))\n print('')\n\n # create samplers weighting samples according to the occurence of their respective class\n train_sampler = torch.utils.data.sampler.WeightedRandomSampler(train_sample_weights,\n int(np.median(train_cardinality)),\n replacement=True)\n\n # create data loaders\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, sampler=train_sampler)\n\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers)\n\n # class labels\n classes = train_dataset.get_classes()\n num_classes = len(classes)\n\n if args.model == 'resnet18':\n model = resnet18_sp(num_classes, num_maps=512, in_channels=1)\n elif args.model == 'resnet34':\n model = resnet34_sp(num_classes, num_maps=512, in_channels=1)\n elif args.model == 'vgg13':\n model = vgg13_sp(num_classes, batch_norm=False, num_maps=512, in_channels=1)\n elif args.model == 'vgg13_bn':\n model = vgg13_sp(num_classes, batch_norm=True, num_maps=512, in_channels=1)\n elif args.model == 'vgg16':\n model = vgg16_sp(num_classes, batch_norm=False, num_maps=512, in_channels=1)\n elif args.model == 'vgg16_bn':\n model = vgg16_sp(num_classes, batch_norm=True, num_maps=512, in_channels=1)\n elif args.model == 'alexnet':\n model = alexnet_sp(num_classes, num_maps=512, in_channels=1)\n else:\n print('No network known: {}, possible choices are ffnet|vgg11|vgg16|alexnet|resnet18|densenet'.format(args.model))\n sys.exit(0)\n\n print(model)\n\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = nn.MultiLabelSoftMarginLoss()\n\n logger = None\n if args.tensorboard_path is not None:\n if not os.path.exists(args.tensorboard_path):\n os.makedirs(args.tensorboard_path)\n logger = None\n if module_exists('tensorboardX'):\n from tensorboardX import SummaryWriter\n logger = SummaryWriter(log_dir=args.tensorboard_path)\n\n state = {'batch_size': args.batch_size,\n 'image_size': args.image_size,\n 'max_epochs': args.epochs,\n 'evaluate': args.evaluate,\n 'resume': args.resume,\n 'train_transform': transform,\n 'val_transform': transform,\n 'save_model_path': args.save_model_path,\n 'epoch': args.start_epoch,\n 'arch': args.model,\n 'workers': args.workers,\n 'Logger': logger,\n 'classes': classes\n }\n\n if not os.path.exists(state['save_model_path']):\n os.makedirs(state['save_model_path'])\n\n # launch learning procedure\n engine = torchengine.MultiLabelMAPEngine(state)\n engine.learning(model, criterion, train_loader, val_loader, optimizer)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.median",
"torch.utils.data.DataLoader",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liya2001/s3d.pytorch | [
"47240695a5f6947a24bcd28a10949af4747d27d5"
] | [
"tools/watershed.py"
] | [
"import os,sys\r\nimport time\r\nimport numpy as np\r\nfrom IPython import embed\r\nfrom operator import itemgetter\r\n\r\n\r\nclass Watershed(object):\r\n def __init__(self,type_='clips',num_classes=2):\r\n self.type=type_\r\n self.num_classes=num_classes\r\n\r\n def get_proposals(self,video_score_dic,alter=None):\r\n if alter==None:return 0\r\n if self.type=='clips':\r\n return self.get_proposals_clips(video_score_dic,alter)\r\n elif self.type=='smooth':\r\n return self.get_proposals_smooth(video_score_dic,alter)\r\n\r\n def get_proposals_smooth(self,video_score_dic,alter):\r\n proposals=list()\r\n smooth_term,thre=alter\r\n for smooth_term in [smooth_term]:\r\n '''\r\n clips=[0.95,0.5,0.25,0.15,0.05,0.01]\r\n '''\r\n for i,(video,score) in enumerate(video_score_dic.items()):\r\n tmp_score=score.copy()\r\n tmp_score=self.smooth(tmp_score,smooth_term)\r\n tmp_score[tmp_score<thre]=0\r\n tmp_score[tmp_score>thre]=1\r\n start_end_list=self.get_s_e(tmp_score,video)\r\n proposals.extend(start_end_list)\r\n\r\n print('smooth term {} has done!'.format(smooth_term))\r\n return proposals\r\n def get_proposals_clips(self,video_score_dic,alter):\r\n proposals=list()\r\n for clip in alter:\r\n '''\r\n clips=[0.95,0.5,0.25,0.15,0.05,0.01]\r\n '''\r\n for i,video,score in enumerate(video_score_dic.items()):\r\n tmp_score=score.copy()\r\n tmp_score[tmp_score<clip]=0\r\n tmp_score[tmp_score>clip]=1\r\n start_end_list=self.get_s_e(tmp_score,video)\r\n proposals.extend(start_end_list)\r\n\r\n print('clip {} has done!'.format(clip))\r\n return proposals\r\n\r\n def smooth(self,old_score,terms):\r\n smoothing_vec=np.ones(terms)\r\n sum_smooth_vec=terms\r\n new_scores=np.zeros_like(old_score)\r\n old_score=np.concatenate([old_score[0].reshape(1,-1).repeat(len(smoothing_vec)/2,0),\\\r\n old_score,\\\r\n old_score[-1].reshape(1,-1).repeat(len(smoothing_vec)/2-1,0)]) # padding with repeat\r\n for i in range(len(new_scores)):\r\n new_scores[i]=np.dot(old_score[i:i+len(smoothing_vec)].T,smoothing_vec)/sum_smooth_vec\r\n return new_scores\r\n\r\n def get_s_e(self,score_ins,video):\r\n s_e_list=list()\r\n for i in range(1,self.num_classes):\r\n s,e=0,0;lock=0\r\n score_item=score_ins[:,i] # each class\r\n score_item=np.array([0]+list(score_item)+[0])\r\n for j in range(len(score_item)):\r\n if lock==0 and score_item[j]!=0:\r\n s=j\r\n lock=1\r\n if lock==1 and score_item[j]==0:\r\n e=j\r\n s_e_list.append([video,s,e,i])\r\n lock=0\r\n return s_e_list\r\n #return self.post(s_e_list,score_ins,video) # to ensemble by curves\r\n\r\n\r\n def post(self,s_e_list,score_ins,video):\r\n posted_s_e_list=s_e_list\r\n for ii in range(1,self.num_classes):\r\n tmp_s_e_lists=[_ for _ in s_e_list if _[3]==ii]\r\n s_s=[_[1] for _ in tmp_s_e_lists]\r\n e_s=[_[2] for _ in tmp_s_e_lists]\r\n\r\n for i,s_ in enumerate(s_s):\r\n for j,e_ in enumerate(e_s):\r\n if i<j and s_<e_:\r\n if sum(score_ins[s_:e_,ii])/float((e_-s_))>0.9:\r\n posted_s_e_list.append([video,s_,e_,ii])\r\n return posted_s_e_list\r\n\r\n"
] | [
[
"numpy.zeros_like",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BenJamesbabala/ShuffleNet-2 | [
"1e0a062c684e37c72d9b776a605f446b108498d6"
] | [
"model.py"
] | [
"import tensorflow as tf\nfrom layers import shufflenet_unit, conv2d, max_pool_2d, avg_pool_2d, dense, flatten\n\n\nclass ShuffleNet:\n \"\"\"ShuffleNet is implemented here!\"\"\"\n\n def __init__(self, args):\n self.args = args\n self.X = None\n self.y = None\n self.logits = None\n self.is_training = None\n self.loss = None\n self.regularization_loss = None\n self.cross_entropy_loss = None\n self.train_op = None\n self.accuracy = None\n self.y_out_argmax = None\n self.summaries_merged = None\n\n # A number stands for the num_groups\n # Output channels for conv1 layer\n self.output_channels = {'1': [144, 288, 576], '2': [200, 400, 800], '3': [240, 480, 960], '4': [272, 544, 1088],\n '8': [384, 768, 1536], 'conv1': 24}\n\n self.__build()\n\n def __init_input(self):\n batch_size = self.args.batch_size if self.args.train_or_test == 'train' else 1\n with tf.variable_scope('input'):\n # Input images\n self.X = tf.placeholder(tf.float32,\n [batch_size, self.args.img_height, self.args.img_width,\n self.args.num_channels])\n # Classification supervision, it's an argmax. Feel free to change it to one-hot,\n # but don't forget to change the loss from sparse as well\n self.y = tf.placeholder(tf.int32, [batch_size])\n # is_training is for batch normalization and dropout, if they exist\n self.is_training = tf.placeholder(tf.bool)\n\n def __resize(self, x):\n return tf.image.resize_bicubic(x, [224, 224])\n\n def __stage(self, x, stage=2, repeat=3):\n if 2 <= stage <= 4:\n stage_layer = shufflenet_unit('stage' + str(stage) + '_0', x=x, w=None, num_groups=self.args.num_groups,\n group_conv_bottleneck=not (stage == 2),\n num_filters=self.output_channels[str(self.args.num_groups)][stage - 2],\n stride=(2, 2),\n fusion='concat', l2_strength=self.args.l2_strength, bias=self.args.bias,\n batchnorm_enabled=self.args.batchnorm_enabled,\n is_training=self.is_training)\n for i in range(1, repeat + 1):\n stage_layer = shufflenet_unit('stage' + str(stage) + '_' + str(i), x=stage_layer, w=None,\n num_groups=self.args.num_groups,\n group_conv_bottleneck=True,\n num_filters=self.output_channels[str(self.args.num_groups)][stage - 2],\n stride=(1, 1),\n fusion='add', l2_strength=self.args.l2_strength,\n bias=self.args.bias,\n batchnorm_enabled=self.args.batchnorm_enabled,\n is_training=self.is_training)\n return stage_layer\n else:\n raise ValueError(\"Stage should be from 2 -> 4\")\n\n def __init_output(self):\n with tf.variable_scope('output'):\n self.regularization_loss = tf.reduce_sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n self.cross_entropy_loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y, name='loss'))\n self.loss = self.regularization_loss + self.cross_entropy_loss\n\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.args.learning_rate).minimize(self.loss)\n self.y_out_argmax = tf.argmax(tf.nn.softmax(self.logits), axis=-1, output_type=tf.int32)\n\n self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y, self.y_out_argmax), tf.float32))\n\n with tf.name_scope('train-summary-per-iteration'):\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('acc', self.accuracy)\n self.summaries_merged = tf.summary.merge_all()\n\n def __build(self):\n self.__init_global_epoch()\n self.__init_global_step()\n self.__init_input()\n\n x_resized = self.__resize(self.X)\n conv1 = conv2d('conv1', x=x_resized, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),\n stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,\n batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)\n conv1_padded = tf.pad(conv1, [[0, 0], [1, 1], [1, 1], [0, 0]], \"CONSTANT\")\n max_pool = max_pool_2d(conv1_padded, size=(3, 3), stride=(2, 2), name='max_pool')\n stage2 = self.__stage(max_pool, stage=2, repeat=3)\n stage3 = self.__stage(stage2, stage=3, repeat=7)\n stage4 = self.__stage(stage3, stage=4, repeat=3)\n global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool')\n flattened = flatten(global_pool)\n\n self.logits = dense('fc', flattened, w=None, output_dim=self.args.num_classes,\n l2_strength=self.args.l2_strength,\n bias=self.args.bias,\n is_training=self.is_training)\n self.__init_output()\n\n def __init_global_epoch(self):\n \"\"\"\n Create a global epoch tensor to totally save the process of the training\n :return:\n \"\"\"\n with tf.variable_scope('global_epoch'):\n self.global_epoch_tensor = tf.Variable(-1, trainable=False, name='global_epoch')\n self.global_epoch_input = tf.placeholder('int32', None, name='global_epoch_input')\n self.global_epoch_assign_op = self.global_epoch_tensor.assign(self.global_epoch_input)\n\n def __init_global_step(self):\n \"\"\"\n Create a global step variable to be a reference to the number of iterations\n :return:\n \"\"\"\n with tf.variable_scope('global_step'):\n self.global_step_tensor = tf.Variable(0, trainable=False, name='global_step')\n self.global_step_input = tf.placeholder('int32', None, name='global_step_input')\n self.global_step_assign_op = self.global_step_tensor.assign(self.global_step_input)\n"
] | [
[
"tensorflow.nn.softmax",
"tensorflow.Variable",
"tensorflow.control_dependencies",
"tensorflow.get_collection",
"tensorflow.equal",
"tensorflow.placeholder",
"tensorflow.image.resize_bicubic",
"tensorflow.summary.merge_all",
"tensorflow.name_scope",
"tensorflow.pad",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.variable_scope",
"tensorflow.train.AdamOptimizer",
"tensorflow.summary.scalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
leozhoujf/tensorflow | [
"a5c2e20205089a1ef0edd497de1c4fb7f3c5ec4a"
] | [
"tensorflow/python/distribute/collective_all_reduce_strategy.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Class CollectiveAllReduceStrategy implementing DistributionStrategy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.core.protobuf import tensorflow_server_pb2\nfrom tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib\nfrom tensorflow.python.distribute import cross_device_utils\nfrom tensorflow.python.distribute import device_util\nfrom tensorflow.python.distribute import distribute_lib\nfrom tensorflow.python.distribute import input_lib\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.distribute import multi_worker_util\nfrom tensorflow.python.distribute import numpy_dataset\nfrom tensorflow.python.distribute import reduce_util\nfrom tensorflow.python.distribute import values\nfrom tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver\nfrom tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import tape\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import collective_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n# TODO(yuefengz): support in-graph replication.\n@tf_export(\"distribute.experimental.MultiWorkerMirroredStrategy\", v1=[])\nclass CollectiveAllReduceStrategy(distribute_lib.Strategy):\n \"\"\"Distribution strategy that uses collective ops for all-reduce.\n\n It is similar to MirroredStrategy but it uses collective ops for reduction.\n\n By default it uses all local GPUs or CPU for single-worker training.\n\n When 'TF_CONFIG' environment variable is given, it parses cluster_spec,\n task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy\n which mirrores models on GPUs of all machines in a cluster. In the current\n implementation, it uses all GPUs in a cluster and it assumes all workers have\n the same number of GPUs.\n\n It supports both eager mode and graph mode. However, for eager mode, it has to\n set up the eager context in its constructor and therefore all ops in eager\n mode have to run after the strategy object is created.\n\n Args:\n communication: optional Enum of type\n `distribute.experimental.CollectiveCommunication`. This provides a way\n for the user to override the choice of collective op communication.\n Possible values include `AUTO`, `RING`, and `NCCL`.\n \"\"\"\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO):\n \"\"\"Initializes the object.\"\"\"\n super(CollectiveAllReduceStrategy, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication))\n\n\n@tf_export(v1=[\"distribute.experimental.MultiWorkerMirroredStrategy\"])\nclass CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):\n\n __doc__ = CollectiveAllReduceStrategy.__doc__\n\n def __init__(\n self,\n communication=cross_device_ops_lib.CollectiveCommunication.AUTO):\n \"\"\"Initializes the object.\"\"\"\n super(CollectiveAllReduceStrategyV1, self).__init__(\n CollectiveAllReduceExtended(\n self,\n communication=communication))\n\n\nclass CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):\n \"\"\"Implementation of CollectiveAllReduceStrategy.\"\"\"\n\n def __init__(self,\n container_strategy,\n communication,\n cluster_resolver=TFConfigClusterResolver()):\n distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)\n assert isinstance(\n communication,\n cross_device_ops_lib.CollectiveCommunication)\n self._communication = communication\n self._initialize_strategy(cluster_resolver)\n assert isinstance(self._get_cross_device_ops(),\n cross_device_ops_lib.CollectiveAllReduce)\n\n def _initialize_strategy(self, cluster_resolver):\n if cluster_resolver.cluster_spec().as_dict():\n self._initialize_multi_worker(cluster_resolver)\n else:\n self._initialize_local(cluster_resolver)\n\n def _initialize_local(self, cluster_resolver):\n \"\"\"Initializes the object for local training.\"\"\"\n self._is_chief = True\n self._num_workers = 1\n\n if ops.executing_eagerly_outside_functions():\n try:\n context.context().configure_collective_ops(\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n use_nccl_communication=(self._communication == cross_device_ops_lib\n .CollectiveCommunication.NCCL))\n except RuntimeError:\n logging.warning(\"Collective ops is not configured at program startup. \"\n \"Some performance features may not be enabled.\")\n self._collective_ops_configured = True\n\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"/device:GPU:%d\" % i for i in range(num_gpus))\n else:\n local_devices = (\"/device:CPU:0\",)\n self._worker_device = device_util.canonicalize(\"/device:CPU:0\")\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n super(CollectiveAllReduceExtended, self)._initialize_local(local_devices)\n # TODO(yuefengz): remove num_gpus_per_worker from CollectiveAllReduce.\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n num_workers=self._num_workers,\n num_gpus_per_worker=num_gpus,\n collective_keys=self._collective_keys)\n\n self._cluster_spec = None\n self._task_type = None\n self._task_id = None\n\n # This is a mark to tell whether we are running with standalone client or\n # independent worker. Right now with standalone client, strategy object is\n # created as local strategy and then turn into multi-worker strategy via\n # configure call.\n self._local_or_standalone_client_mode = True\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\"Single-worker CollectiveAllReduceStrategy with local_devices \"\n \"= %r, communication = %s\", local_devices, self._communication)\n\n def _initialize_multi_worker(self, cluster_resolver):\n \"\"\"Initializes the object for multi-worker training.\"\"\"\n cluster_spec = multi_worker_util.normalize_cluster_spec(\n cluster_resolver.cluster_spec())\n task_type = cluster_resolver.task_type\n task_id = cluster_resolver.task_id\n if task_type is None or task_id is None:\n raise ValueError(\"When `cluster_spec` is given, you must also specify \"\n \"`task_type` and `task_id`.\")\n self._cluster_spec = cluster_spec\n self._task_type = task_type\n self._task_id = task_id\n\n self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)\n if not self._num_workers:\n raise ValueError(\"No `worker`, `chief` or `evaluator` tasks can be found \"\n \"in `cluster_spec`.\")\n\n self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,\n task_id)\n\n self._worker_device = \"/job:%s/task:%d\" % (task_type, task_id)\n self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)\n\n if (ops.executing_eagerly_outside_functions() and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n context.context().configure_collective_ops(\n collective_leader=multi_worker_util.collective_leader(\n cluster_spec, task_type, task_id),\n scoped_allocator_enabled_ops=(\"CollectiveReduce\",),\n use_nccl_communication=(self._communication == cross_device_ops_lib\n .CollectiveCommunication.NCCL),\n device_filters=(\"/job:%s/task:%d\" % (task_type, task_id),))\n self._collective_ops_configured = True\n\n # Starting a std server in eager mode and in independent worker mode.\n if (context.executing_eagerly() and\n not getattr(self, \"_std_server_started\", False) and\n not getattr(self, \"_local_or_standalone_client_mode\", False)):\n # Checking _local_or_standalone_client_mode as well because we should not\n # create the std server in standalone client mode.\n config_proto = config_pb2.ConfigProto()\n config_proto = self._update_config_proto(config_proto)\n server_def = tensorflow_server_pb2.ServerDef(\n cluster=cluster_spec.as_cluster_def(),\n default_session_config=config_proto,\n job_name=task_type,\n task_index=task_id,\n protocol=cluster_resolver.rpc_layer or \"grpc\")\n context.context().enable_collective_ops(server_def)\n self._std_server_started = True\n # The `ensure_initialized` is needed before calling\n # `context.context().devices()`.\n context.context().ensure_initialized()\n logging.info(\n \"Enabled multi-worker collective ops with available devices: %r\",\n context.context().devices())\n\n # TODO(yuefengz): The `num_gpus` is only for this particular task. It\n # assumes all workers have the same number of GPUs. We should remove this\n # assumption by querying all tasks for their numbers of GPUs.\n # TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in\n # some cases.\n if isinstance(cluster_resolver, TFConfigClusterResolver):\n num_gpus = context.num_gpus()\n else:\n num_gpus = cluster_resolver.num_accelerators().get(\"GPU\", 0)\n\n if num_gpus:\n local_devices = tuple(\"%s/device:GPU:%d\" % (self._worker_device, i)\n for i in range(num_gpus))\n else:\n local_devices = (self._worker_device,)\n\n self._collective_keys = cross_device_utils.CollectiveKeys()\n super(CollectiveAllReduceExtended, self)._initialize_local(local_devices)\n self._input_workers = input_lib.InputWorkers(\n self._device_map, [(self._worker_device, self.worker_devices)])\n self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(\n num_workers=self._num_workers,\n num_gpus_per_worker=num_gpus,\n collective_keys=self._collective_keys)\n\n # Add a default device so that ops without specified devices will not end up\n # on other workers.\n self._default_device = \"/job:%s/task:%d\" % (task_type, task_id)\n\n # Save the num_gpus_per_worker and rpc_layer for configure method.\n self._num_gpus_per_worker = num_gpus\n self._rpc_layer = cluster_resolver.rpc_layer\n self._warn_nccl_no_gpu()\n\n logging.info(\n \"Multi-worker CollectiveAllReduceStrategy with cluster_spec = %r, \"\n \"task_type = %r, task_id = %r, num_workers = %r, local_devices = %r, \"\n \"communication = %s\", cluster_spec.as_dict(), task_type,\n task_id, self._num_workers, local_devices,\n self._communication)\n\n def _create_variable(self, next_creator, *args, **kwargs):\n colocate_with = kwargs.pop(\"colocate_with\", None)\n if colocate_with is None:\n device_map = self._device_map\n logical_device = 0 # TODO(josh11b): Get logical device from scope here.\n elif isinstance(colocate_with, numpy_dataset.SingleDevice):\n with ops.device(colocate_with.device):\n return next_creator(*args, **kwargs)\n else:\n device_map = colocate_with.device_map\n logical_device = colocate_with.logical_device\n\n def _real_mirrored_creator(devices, *args, **kwargs):\n \"\"\"Creates one MirroredVariable on the current worker.\"\"\"\n unique_var_name = ops.get_default_graph().unique_name(\n kwargs[\"name\"], mark_as_used=False).rstrip(\"/\")\n # pylint: disable=protected-access\n collective_instance_key = self._collective_keys.get_instance_key(\n key_id=unique_var_name)\n # Only the first device participles in the broadcast of initial values.\n group_key = self._collective_keys.get_group_key([devices[0]])\n group_size = self._num_workers\n if \"initial_value\" not in kwargs:\n raise ValueError(\"Initial value must be specified.\")\n initial_value = kwargs[\"initial_value\"]\n if callable(initial_value):\n initial_value_fn = initial_value\n else:\n initial_value_fn = lambda: initial_value\n\n value_list = []\n for i, d in enumerate(devices):\n with ops.init_scope(), ops.device(d):\n if i == 0:\n # The initial value fn makes sure variables all initialized to\n # same values. The first device of the chief worker will send their\n # variable values to other workers.\n def _overridden_initial_value_fn(device=d, index=i): # pylint: disable=g-missing-docstring\n with ops.device(device):\n initial_value = initial_value_fn()\n assert not callable(initial_value)\n initial_value = ops.convert_to_tensor(initial_value)\n\n assert index == 0, index\n if self._num_workers > 1:\n if self._is_chief:\n bcast_send = collective_ops.broadcast_send(\n initial_value, initial_value.shape, initial_value.dtype,\n group_size, group_key, collective_instance_key)\n with ops.control_dependencies([bcast_send]):\n return array_ops.identity(initial_value)\n else:\n return collective_ops.broadcast_recv(\n initial_value.shape, initial_value.dtype, group_size,\n group_key, collective_instance_key)\n return initial_value\n else:\n # Give replicas meaningful distinct names:\n var0name = value_list[0].name.split(\":\")[0]\n # We append a / to variable names created on replicas with id > 0 to\n # ensure that we ignore the name scope and instead use the given\n # name as the absolute name of the variable.\n kwargs[\"name\"] = \"%s/replica_%d/\" % (var0name, i)\n\n # Variables on non-first replica get initial values from the\n # variables created on the first device of each worker.\n def _overridden_initial_value_fn(device=d, index=i):\n assert index > 0\n with ops.device(device):\n if context.executing_eagerly():\n return array_ops.identity(value_list[0].value())\n else:\n return array_ops.identity(value_list[0].initial_value)\n\n kwargs[\"initial_value\"] = _overridden_initial_value_fn\n with context.device_policy(context.DEVICE_PLACEMENT_SILENT):\n # Don't record operations (e.g. other variable reads) during\n # variable creation.\n with tape.stop_recording():\n v = next_creator(*args, **kwargs)\n\n if i == 0:\n actual_var_name = v.name.split(\":\")[0]\n assert unique_var_name == actual_var_name, \"%r vs %r\" % (\n unique_var_name, actual_var_name)\n assert not isinstance(v, values.DistributedVariable)\n value_list.append(v)\n return value_list\n\n # pylint: disable=protected-access\n return mirrored_strategy._create_mirrored_variable(\n self._container_strategy(), device_map, logical_device,\n _real_mirrored_creator, *args, **kwargs)\n\n def _make_input_context(self):\n if self._cluster_spec is None:\n input_pipeline_id = 0\n else:\n input_pipeline_id = multi_worker_util.id_in_cluster(\n self._cluster_spec, self._task_type, self._task_id)\n input_context = distribute_lib.InputContext(\n num_input_pipelines=self._num_workers,\n input_pipeline_id=input_pipeline_id,\n num_replicas_in_sync=self._num_replicas_in_sync)\n return input_context\n\n def _experimental_distribute_dataset(self, dataset):\n input_context = self._make_input_context()\n return input_lib.get_distributed_dataset(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_dataset_iterator(self, dataset):\n \"\"\"Distributes the dataset to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.DatasetIterator(\n dataset,\n self._input_workers,\n self._container_strategy(),\n split_batch_by=self._num_replicas_in_sync,\n input_context=input_context)\n\n def _make_input_fn_iterator(\n self,\n input_fn,\n replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):\n \"\"\"Distributes the input function to each local GPU.\"\"\"\n input_context = self._make_input_context()\n return input_lib.InputFunctionIterator(input_fn, self._input_workers,\n [input_context],\n self._container_strategy())\n\n def _configure(self,\n session_config=None,\n cluster_spec=None,\n task_type=None,\n task_id=None):\n \"\"\"Configures the object.\n\n Args:\n session_config: a `tf.compat.v1.ConfigProto`\n cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the\n cluster configurations.\n task_type: the current task type, such as \"worker\".\n task_id: the current task id.\n\n Raises:\n ValueError: if `task_type` is not in the `cluster_spec`.\n \"\"\"\n if cluster_spec:\n # Use the num_gpus_per_worker recorded in constructor since _configure\n # doesn't take num_gpus.\n cluster_resolver = SimpleClusterResolver(\n cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),\n task_type=task_type,\n task_id=task_id,\n num_accelerators={\"GPU\": self._num_gpus_per_worker},\n rpc_layer=self._rpc_layer)\n self._initialize_multi_worker(cluster_resolver)\n assert isinstance(self._get_cross_device_ops(),\n cross_device_ops_lib.CollectiveAllReduce)\n\n if session_config:\n session_config.CopyFrom(self._update_config_proto(session_config))\n\n def _update_config_proto(self, config_proto):\n updated_config = copy.deepcopy(config_proto)\n # Enable the scoped allocator optimization for CollectiveOps. This\n # optimization converts many small all-reduces into fewer larger\n # all-reduces.\n rewrite_options = updated_config.graph_options.rewrite_options\n rewrite_options.scoped_allocator_optimization = (\n rewriter_config_pb2.RewriterConfig.ON)\n # We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =\n # [\"CollectiveReduce\"]. Since we can't assign to a repeated proto field, we\n # clear and then append.\n del rewrite_options.scoped_allocator_opts.enable_op[:]\n rewrite_options.scoped_allocator_opts.enable_op.append(\"CollectiveReduce\")\n\n if self._communication == cross_device_ops_lib.CollectiveCommunication.NCCL:\n updated_config.experimental.collective_nccl = True\n\n if not self._cluster_spec:\n return updated_config\n\n assert self._task_type\n assert self._task_id is not None\n\n # Collective group leader is needed for collective ops to coordinate\n # workers.\n updated_config.experimental.collective_group_leader = (\n multi_worker_util.collective_leader(self._cluster_spec, self._task_type,\n self._task_id))\n\n # The device filters prevent communication between workers.\n del updated_config.device_filters[:]\n updated_config.device_filters.append(\n \"/job:%s/task:%d\" % (self._task_type, self._task_id))\n\n return updated_config\n\n def _reduce_to(self, reduce_op, value, destinations):\n if (isinstance(value, values.Mirrored) and\n reduce_op == reduce_util.ReduceOp.MEAN):\n return value\n assert not isinstance(value, values.Mirrored)\n\n if (isinstance(value, values.DistributedValues) and\n len(self.worker_devices) == 1):\n value = value.values[0]\n\n # When there are multiple workers, we need to reduce across workers using\n # collective ops.\n if (not isinstance(value, values.DistributedValues) and\n self._num_workers == 1):\n # This function handles reducing values that are not PerReplica or\n # Mirrored values. For example, the same value could be present on all\n # replicas in which case `value` would be a single value or value could\n # be 0.\n return cross_device_ops_lib.reduce_non_distributed_value(\n reduce_op, self._device_map, value, destinations)\n return self._get_cross_device_ops().reduce(\n reduce_op, value, destinations=destinations)\n\n def _warn_nccl_no_gpu(self):\n if ((self._communication ==\n cross_device_ops_lib.CollectiveCommunication.NCCL) and\n self._num_gpus_per_worker == 0):\n logging.warning(\"Enabled NCCL communication but no GPUs detected/\"\n \"specified.\")\n\n @property\n def experimental_between_graph(self):\n return True\n\n @property\n def experimental_should_init(self):\n return True\n\n @property\n def should_checkpoint(self):\n return self._is_chief\n\n @property\n def should_save_summary(self):\n return self._is_chief\n\n @property\n def _num_replicas_in_sync(self):\n return len(self.worker_devices) * self._num_workers\n\n # TODO(priyag): Delete this once all strategies use global batch size.\n @property\n def _global_batch_size(self):\n \"\"\"`make_dataset_iterator` and `make_numpy_iterator` use global batch size.\n\n `make_input_fn_iterator` assumes per-replica batching.\n\n Returns:\n Boolean.\n \"\"\"\n return True\n"
] | [
[
"tensorflow.python.distribute.distribute_lib.StrategyExtendedV1.__init__",
"tensorflow.python.distribute.cross_device_ops.reduce_non_distributed_value",
"tensorflow.python.distribute.numpy_dataset.SingleDevice",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.distribute.multi_worker_util.id_in_cluster",
"tensorflow.python.eager.tape.stop_recording",
"tensorflow.python.framework.ops.device",
"tensorflow.python.eager.context.context",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.distribute.distribute_lib.InputContext",
"tensorflow.python.distribute.multi_worker_util.worker_count",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.eager.context.device_policy",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.collective_ops.broadcast_recv",
"tensorflow.python.distribute.cross_device_ops.CollectiveAllReduce",
"tensorflow.python.distribute.multi_worker_util.is_chief",
"tensorflow.python.distribute.cross_device_utils.CollectiveKeys",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.distribute.multi_worker_util.collective_leader",
"tensorflow.python.distribute.multi_worker_util.normalize_cluster_spec",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.distribute.cluster_resolver.TFConfigClusterResolver",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.collective_ops.broadcast_send",
"tensorflow.python.distribute.input_lib.InputWorkers",
"tensorflow.core.protobuf.config_pb2.ConfigProto"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NanYoMy/brainstorm | [
"b6c20392ebfaf8472fbdc5ffb981948b9826404b"
] | [
"src/utils.py"
] | [
"import os\nimport re\nimport sys\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageDraw, ImageFont\nimport tensorflow as tf\nimport textwrap\n\n\nfrom src import networks\n\n\nimport medipy.metrics as medipy_metrics\nimport pynd.segutils as pynd_segutils\n\n#############################\n# File utils\n#############################\ndef get_latest_epoch_in_dir(d, match_prefixes=()):\n model_files = [f for f in os.listdir(d) if f.endswith('.h5')]\n\n epoch_nums = [re.search('(?<=epoch)[0-9]*', os.path.basename(f)).group(0) for f in model_files]\n epoch_nums = list(set([int(n) for n in epoch_nums if n is not None and n is not '']))\n max_epoch_num = 0\n\n if len(epoch_nums) == 0:\n return None\n\n if len(match_prefixes) > 0:\n for n in reversed(epoch_nums):\n curr_filenames = [os.path.basename(f) for f in model_files if 'epoch{}'.format(n) in f]\n if np.all([np.any([p in f for f in curr_filenames]) for p in match_prefixes]) and n > max_epoch_num:\n max_epoch_num = n\n else:\n max_epoch_num = max(epoch_nums)\n\n return max_epoch_num\n\n\ndef make_output_dirs(experiment_base_name: str,\n prompt_delete_existing: bool = True,\n prompt_update_name: bool = True,\n exp_root: str = './experiments/',\n existing_exp_dir=None,\n # for unit testing\n debug_delete_input=None,\n debug_rename_input=None\n ):\n '''\n Creates the experiment directory (for storing log files, parameters) as well as subdirectories for\n files, logs and models.\n\n If a directory already exists for this experiment,\n\n :param experiment_base_name: desired name for the experiment\n :param prompt_delete_existing: if we find an existing directory with the same name,\n do we tell the user? if not, just continue in the existing directory by default\n :param prompt_update_name: if the new experiment name differs from the existing_exp_dir,\n do we try to rename the existing directory to the new naming scheme?\n :param exp_root: root directory for all experiments\n :param existing_exp_dir: previous directory (if any) of this experiment\n :return:\n '''\n\n do_rename = False\n\n if existing_exp_dir is None:\n # brand new experiment\n experiment_name = experiment_base_name\n target_exp_dir = os.path.join(exp_root, experiment_base_name)\n else: # we are loading from an existing directory\n if re.search('_[0-9]*$', existing_exp_dir) is not None:\n # we are probably trying to load from a directory like experiments/<exp_name>_1,\n # so we should track the experiment_name with the correct id\n experiment_name = os.path.basename(existing_exp_dir)\n target_exp_dir = os.path.join(exp_root, experiment_name)\n else:\n # we are trying to load from a directory, but the newly provided experiment name doesn't match.\n # this can happen when the naming scheme has changed\n target_exp_dir = os.path.join(exp_root, experiment_base_name)\n\n # if it has changed, we should prompt to rename the old experiment to the new one\n if prompt_update_name:\n target_exp_dir, do_rename = _prompt_rename(\n existing_exp_dir, target_exp_dir, debug_rename_input)\n\n if do_rename: # we might have changed the model name to something that exists, so prompt if so\n print('Renaming {} to {}!'.format(existing_exp_dir, target_exp_dir))\n prompt_delete_existing = True\n else:\n target_exp_dir = existing_exp_dir # just assume we want to continue in the old one\n\n experiment_name = os.path.basename(target_exp_dir)\n\n print('Existing exp dir: {}'.format(existing_exp_dir))\n print('Target exp dir: {}'.format(target_exp_dir))\n\n figures_dir = os.path.join(target_exp_dir, 'figures')\n logs_dir = os.path.join(target_exp_dir, 'logs')\n models_dir = os.path.join(target_exp_dir, 'models')\n\n copy_count = 0\n\n # check all existing dirs with the same prefix (and all suffixes e.g. _1, _2)\n while os.path.isdir(figures_dir) or os.path.isdir(logs_dir) or os.path.isdir(models_dir):\n # list existing files\n if os.path.isdir(figures_dir):\n figure_files = [os.path.join(figures_dir, f) for f in os.listdir(figures_dir) if\n f.endswith('.jpg') or f.endswith('.png')]\n else:\n figure_files = []\n\n # check for .log files\n if os.path.isdir(logs_dir):\n log_files = [os.path.join(logs_dir, l) for l in os.listdir(logs_dir) \\\n if os.path.isfile(os.path.join(logs_dir, l))] \\\n + [os.path.join(target_exp_dir, f) for f in os.listdir(target_exp_dir) if f.endswith('.log')]\n else:\n log_files = []\n\n # check for model files\n if os.path.isdir(models_dir):\n model_files = [os.path.join(models_dir, m) for m in os.listdir(models_dir) \\\n if os.path.isfile(os.path.join(models_dir, m))]\n else:\n model_files = []\n\n if prompt_delete_existing and (len(figure_files) > 0 or len(log_files) > 0 or len(model_files) > 0):\n # TODO: print some of the latest figures, logs and models so we can see what epoch\n # these experiments trained until\n print(\n 'Remove \\n\\t{} figures from {}\\n\\t{} logs from {}\\n\\t{} models from {}?[y]es / [n]o (create new folder) / [C]ontinue existing / remove [m]odels too: [y/n/C/m]'.format(\n len(figure_files), figures_dir, len(log_files), logs_dir, len(model_files), models_dir))\n\n if debug_delete_input:\n print('Debug input: {}'.format(debug_delete_input))\n choice = debug_delete_input\n else:\n choice = input().lower()\n\n remove_choices = ['yes', 'y', 'ye']\n make_new_choices = ['no', 'n']\n continue_choices = ['c', '']\n remove_models_too = ['m']\n\n if choice in remove_choices:\n for f in figure_files + log_files:\n print('Removing {}'.format(f))\n os.remove(f)\n elif choice in remove_models_too:\n for f in figure_files + log_files + model_files:\n print('Removing {}'.format(f))\n os.remove(f)\n elif choice in continue_choices:\n print('Continuing in existing folder...')\n break\n\n elif choice in make_new_choices:\n copy_count += 1\n experiment_name = experiment_base_name + '_{}'.format(copy_count)\n target_exp_dir = os.path.join(exp_root, experiment_name)\n\n figures_dir = os.path.join(exp_root, experiment_name, 'figures')\n logs_dir = os.path.join(exp_root, experiment_name, 'logs')\n models_dir = os.path.join(exp_root, experiment_name, 'models')\n else:\n break\n\n if do_rename:\n # simply rename the existing old_exp_dir to exp_dir, rather than creating a new one\n os.rename(existing_exp_dir, target_exp_dir)\n else:\n # create each directory\n if not os.path.isdir(target_exp_dir):\n os.makedirs(target_exp_dir)\n\n # make subdirectories if they do not exist already\n if not os.path.isdir(figures_dir):\n os.mkdir(figures_dir)\n if not os.path.isdir(logs_dir):\n os.mkdir(logs_dir)\n if not os.path.isdir(models_dir):\n os.mkdir(models_dir)\n\n return experiment_name, target_exp_dir, figures_dir, logs_dir, models_dir\n\n\ndef _prompt_rename(old_dir, new_dir, debug_input=None):\n print('Rename dir \\n{} to \\n{} [y/N]?'.format(old_dir, new_dir))\n\n if debug_input:\n print('Debug input: {}'.format(debug_input))\n choice = debug_input\n else:\n choice = input().lower()\n\n rename_choices = ['yes', 'y']\n\n if choice in rename_choices:\n return new_dir, True\n else:\n return old_dir, False\n\n\n\n#############################\n# Batch utils\n#############################\n\ndef gen_batch(ims_data, labels_data,\n batch_size, randomize=False):\n '''\n :param ims_data: list of images, or an image.\n If a single image, it will be automatically converted to a list\n\n :param labels_data: list of other data (e.g. labels) that do not require\n image normalization or augmentation, but might need to be converted to onehot\n\n :param batch_size:\n :param randomize: bool to randomize indices per batch\n\n :return:\n '''\n\n # make sure everything is a list\n if not isinstance(ims_data, list):\n ims_data = [ims_data]\n\n # if we have labels that we want to generate from,\n # put everything into a list for consistency\n # (useful if we have labels and aux data)\n if labels_data is not None:\n if not isinstance(labels_data, list):\n labels_data = [labels_data]\n\n\n idxs = [-1]\n\n n_ims = ims_data[0].shape[0]\n\n while True:\n if randomize:\n idxs = np.random.choice(n_ims, batch_size, replace=True)\n else:\n idxs = np.linspace(idxs[-1] + 1, idxs[-1] + 1 + batch_size - 1, batch_size, dtype=int)\n restart_idxs = False\n while np.any(idxs >= n_ims):\n idxs[np.where(idxs >= n_ims)] = idxs[np.where(idxs >= n_ims)] - n_ims\n restart_idxs = True\n\n ims_batches = []\n for i, im_data in enumerate(ims_data):\n X_batch = im_data[idxs]\n\n if not X_batch.dtype == np.float32 and not X_batch.dtype == np.float64:\n X_batch = (X_batch / 255.).astype(np.float32)\n\n ims_batches.append(X_batch)\n\n if labels_data is not None:\n labels_batches = []\n for li, Y in enumerate(labels_data):\n if Y is None:\n Y_batch = None\n else:\n if isinstance(Y, np.ndarray):\n Y_batch = Y[idxs]\n else: # in case it's a list\n Y_batch = [Y[idx] for idx in idxs]\n labels_batches.append(Y_batch)\n else:\n labels_batches = None\n\n if not randomize and restart_idxs:\n idxs[-1] = -1\n\n yield tuple(ims_batches) + tuple(labels_batches)\n\n#############################\n# Segmentation losses and metrics\n############################\ndef onehot_to_labels(oh, n_classes=0, label_mapping=None):\n # assume oh is batch_size (x R x C) x n_labels\n if n_classes > 0 and label_mapping is None:\n label_mapping = np.arange(0, n_classes)\n elif n_classes == 0 and label_mapping is None:\n label_mapping = list(np.arange(0, oh.shape[-1]).astype(int))\n\n argmax_idxs = np.argmax(oh, axis=-1).astype(int)\n labels = np.reshape(np.asarray(label_mapping)[argmax_idxs.flatten()], oh.shape[:-1]).astype(type(label_mapping[0]))\n\n return labels\n\ndef labels_to_onehot(labels, n_classes=0, label_mapping=None):\n if labels is None:\n return labels\n # we can use either n_classes (which assumes labels from 0 to n_classes-1) or a label_mapping\n if label_mapping is None and n_classes == 0:\n label_mapping = list(np.unique(labels))\n n_classes = len(np.unique(labels)) # np.max(labels)\n elif n_classes > 0 and label_mapping is None:\n # infer label mapping from # of classes\n label_mapping = np.linspace(0, n_classes, n_classes, endpoint=False).astype(int).tolist()\n elif n_classes == 0 and label_mapping is not None:\n n_classes = len(label_mapping)\n\n if labels.shape[-1] == len(label_mapping) and np.max(labels) <= 1. and np.min(labels) >= 0.:\n # already onehot\n return labels\n\n if labels.shape[-1] == 1:\n labels = np.take(labels, 0, axis=-1)\n\n labels = np.asarray(labels)\n\n if len(label_mapping) == 2 and 0 in label_mapping and 1 in label_mapping and type(\n labels) == np.ndarray and np.array_equal(np.max(labels, axis=-1), np.ones((labels.shape[0],))):\n return labels\n\n labels_flat = labels.flatten()\n onehot_flat = np.zeros(labels_flat.shape + (n_classes,), dtype=int)\n for li in range(n_classes):\n onehot_flat[np.where(labels_flat == label_mapping[li]), li] = 1\n\n onehot = np.reshape(onehot_flat, labels.shape + (n_classes,)).astype(np.float32)\n return onehot\n\nclass SpatialSegmentSmoothness(object):\n def __init__(self, n_chans, n_dims,\n warped_contours_layer_output=None,\n lambda_i=1.\n ):\n self.n_dims = n_dims\n self.warped_contours_layer_output = warped_contours_layer_output\n self.lambda_i = lambda_i\n\n def compute_loss(self, y_true, y_pred):\n loss = 0\n segments_mask = 1. - self.warped_contours_layer_output\n\n for d in range(self.n_dims):\n # we use x to indicate the current spatial dimension, not just the first\n dCdx = tf.gather(y_pred, tf.range(1, tf.shape(y_pred)[d + 1]), axis=d + 1) \\\n - tf.gather(y_pred, tf.range(0, tf.shape(y_pred)[d + 1] - 1), axis=d + 1)\n\n # average across spatial dims and color channels\n loss += tf.reduce_mean(tf.abs(dCdx * tf.gather(segments_mask, tf.range(1, tf.shape(y_pred)[d+1]), axis=d+1)))\n return loss\n\n\ndef log_losses(progressBar, tensorBoardWriter, logger, loss_names, loss_vals, iter_count):\n '''\n Writes loss names and vals to keras progress bar, tensorboard, and a python logger\n\n :param progressBar: keras progbar object\n :param tensorBoardWriter: tensorboard writer object\n :param logger: python logger\n :param loss_names: list of strings\n :param loss_vals: list of numbers\n :param iter_count: integer representing current iteration\n :return:\n '''\n if not isinstance(loss_vals, list): # occurs when model only has one loss\n loss_vals = [loss_vals]\n\n if progressBar is not None:\n progressBar.add(1, values=[(loss_names[i], loss_vals[i]) for i in range(len(loss_vals))])\n\n if logger is not None:\n logger.debug(', '.join(['{}: {}'.format(loss_names[i], loss_vals[i]) for i in range(len(loss_vals))]))\n\n if tensorBoardWriter is not None:\n for i in range(len(loss_names)):\n tensorBoardWriter.add_summary(\n tf.Summary(value=[tf.Summary.Value(tag=loss_names[i], simple_value=loss_vals[i]), ]), iter_count)\n if i >= len(loss_vals):\n break\n\n\ndef eval_seg_sas_from_gen(sas_model, atlas_vol, atlas_labels,\n eval_gen, label_mapping, n_eval_examples, batch_size, logger=None):\n '''\n Evaluates a single-atlas segmentation method on a bunch of evaluation volumes.\n :param sas_model: spatial transform model used for SAS. Can be voxelmorph.\n :param atlas_vol: atlas volume\n :param atlas_labels: atlas segmentations\n :param eval_gen: generator that yields vols_valid, segs_valid batches\n :param label_mapping: list of label ids that will appear in segs, ordered by how they map to channels\n :param n_eval_examples: total number of examples to evaluate\n :param batch_size: batch size to use in evaluation\n :param logger: python logger if we want to log messages\n :return:\n '''\n img_shape = atlas_vol.shape[1:]\n\n seg_warp_model = networks.warp_model(\n img_shape=img_shape,\n interp_mode='nearest',\n indexing='xy',\n )\n\n from keras.models import Model\n from keras.layers import Input, Activation\n from keras.optimizers import Adam\n n_labels = len(label_mapping)\n\n warped_in = Input(img_shape[0:-1] + (n_labels,))\n warped = Activation('softmax')(warped_in)\n\n ce_model = Model(inputs=[warped_in], outputs=[warped], name='ce_model')\n ce_model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001))\n\n # test metrics: categorical cross-entropy and dice\n dice_per_label = np.zeros((n_eval_examples, len(label_mapping)))\n cces = np.zeros((n_eval_examples,))\n accs = np.zeros((n_eval_examples,))\n all_ids = []\n for bi in range(n_eval_examples):\n if logger is not None:\n logger.debug('Testing on subject {} of {}'.format(bi, n_eval_examples))\n else:\n print('Testing on subject {} of {}'.format(bi, n_eval_examples))\n X, Y, _, ids = next(eval_gen)\n Y_oh = labels_to_onehot(Y, label_mapping=label_mapping)\n\n warped, warp = sas_model.predict([atlas_vol, X])\n\n # warp our source models according to the predicted flow field. get rid of channels\n if Y.shape[-1] == 1:\n Y = Y[..., 0]\n preds_batch = seg_warp_model.predict([atlas_labels[..., np.newaxis], warp])[..., 0]\n preds_oh = labels_to_onehot(preds_batch, label_mapping=label_mapping)\n\n cce = np.mean(ce_model.evaluate(preds_oh, Y_oh, verbose=False))\n subject_dice_per_label = medipy_metrics.dice(\n Y, preds_batch, labels=label_mapping)\n\n nonbkgmap = (Y > 0)\n acc = np.sum(((Y == preds_batch) * nonbkgmap).astype(int)) / np.sum(nonbkgmap).astype(float)\n print(acc)\n dice_per_label[bi] = subject_dice_per_label\n cces[bi] = cce\n accs[bi] = acc\n all_ids += ids\n\n if logger is not None:\n logger.debug('Dice per label: {}, {}'.format(label_mapping, dice_per_label))\n logger.debug('Mean dice (no bkg): {}'.format(np.mean(dice_per_label[:, 1:])))\n logger.debug('Mean CE: {}'.format(np.mean(cces)))\n logger.debug('Mean accuracy: {}'.format(np.mean(accs)))\n else:\n print('Dice per label: {}, {}'.format(label_mapping, dice_per_label))\n print('Mean dice (no bkg): {}'.format(np.mean(dice_per_label[:, 1:])))\n print('Mean CE: {}'.format(np.mean(cces)))\n print('Mean accuracy: {}'.format(np.mean(accs)))\n return cces, dice_per_label, accs, all_ids\n\n\ndef eval_seg_from_gen(segmenter_model,\n eval_gen, label_mapping, n_eval_examples, batch_size, logger=None):\n '''\n Evaluates accuracy of a segmentation CNN\n :param segmenter_model: keras model for segmenter\n :param eval_gen: genrator that yields vols_valid, segs_valid\n :param label_mapping: list of label ids, ordered by how they map to channels\n :param n_eval_examples: total number of volumes to evaluate\n :param batch_size: batch size (number of slices per batch)\n :param logger: python logger (optional)\n :return:\n '''\n\n # test metrics: categorical cross-entropy and dice\n dice_per_label = np.zeros((n_eval_examples, len(label_mapping)))\n cces = np.zeros((n_eval_examples,))\n accs = np.zeros((n_eval_examples,))\n all_ids = []\n for bi in range(n_eval_examples):\n if logger is not None:\n logger.debug('Testing on subject {} of {}'.format(bi, n_eval_examples))\n else:\n print('Testing on subject {} of {}'.format(bi, n_eval_examples))\n X, Y, _, ids = next(eval_gen)\n Y_oh = labels_to_onehot(Y, label_mapping=label_mapping)\n preds_batch, cce = segment_vol_by_slice(\n segmenter_model, X, label_mapping=label_mapping, batch_size=batch_size,\n Y_oh=Y_oh, compute_cce=True,\n )\n subject_dice_per_label = medipy_metrics.dice(\n Y, preds_batch, labels=label_mapping)\n\n # only consider pixels where the gt label is not bkg (if we count bkg, accuracy will be very high)\n nonbkgmap = (Y > 0)\n\n acc = np.sum(((Y == preds_batch) * nonbkgmap).astype(int)) / np.sum(nonbkgmap).astype(float)\n\n print(acc)\n dice_per_label[bi] = subject_dice_per_label\n cces[bi] = cce\n accs[bi] = acc\n all_ids += ids\n\n if logger is not None:\n logger.debug('Dice per label: {}, {}'.format(label_mapping, np.mean(dice_per_label, axis=0).tolist()))\n logger.debug('Mean dice (no bkg): {}'.format(np.mean(dice_per_label[:, 1:])))\n logger.debug('Mean CE: {}'.format(np.mean(cces)))\n logger.debug('Mean accuracy: {}'.format(np.mean(accs)))\n else:\n print('Dice per label: {}, {}'.format(label_mapping, np.mean(dice_per_label, axis=0).tolist()))\n print('Mean dice (no bkg): {}'.format(np.mean(dice_per_label[:, 1:])))\n print('Mean CE: {}'.format(np.mean(cces)))\n print('Mean accuracy: {}'.format(np.mean(accs)))\n return cces, dice_per_label, accs, all_ids\n\n\ndef segment_vol_by_slice(segmenter_model, X, label_mapping, batch_size=8, Y_oh=None, compute_cce=False):\n '''\n Segments a 3D volume by running a per-slice segmenter on batches of slices\n :param segmenter_model:\n :param X: 3D volume, we assume this has a batch size of 1\n :param label_mapping:\n :param batch_size:\n :return:\n '''\n n_slices = X.shape[-2]\n n_labels = len(label_mapping)\n preds = np.zeros(X.shape[:-1] + (1,))\n n_batches = int(np.ceil(float(n_slices) / batch_size))\n\n cce_total = 0.\n for sbi in range(n_batches):\n # slice in z, then make slices into batch\n X_batched_slices = np.transpose(\n X[0, :, :, sbi * batch_size: min(n_slices, (sbi + 1) * batch_size)],\n (2, 0, 1, 3))\n\n preds_slices_oh = segmenter_model.predict(X_batched_slices)\n if compute_cce:\n slice_cce = segmenter_model.evaluate(\n X_batched_slices,\n np.transpose(Y_oh[0, :, :, sbi * batch_size : min(n_slices, (sbi + 1) * batch_size)], (2, 0, 1, 3)),\n verbose=False)\n # if we have multiple losses, take the first one\n if isinstance(slice_cce, list):\n slice_cce = slice_cce[0]\n\n # we want an average over slices, so make sure we count the correct number in the batch\n cce_total += slice_cce * X_batched_slices.shape[0]\n # convert onehot to labels and assign to preds volume\n preds[0, :, :, sbi * batch_size: min(n_slices, (sbi + 1) * batch_size)] \\\n = np.transpose(onehot_to_labels(\n preds_slices_oh, label_mapping=label_mapping), (1, 2, 0))[..., np.newaxis]\n if compute_cce:\n return preds, cce_total / float(n_slices)\n else:\n return preds\n\n######################\n# Visualization utils\n######################\ndef label_ims(ims_batch, labels=None,\n normalize=False,\n display_h=128):\n '''\n Displays a batch of matrices as an image.\n\n :param ims_batch: n_batches x h x w x c array of images.\n :param labels: optional labels. Can be an n_batches length list of tuples, floats or strings\n :param normalize: boolean to normalize any [min, max] to [0, 255]\n :param display_h: integer number of pixels for the height of each image to display\n :return: an image (h' x w' x 3) with elements of the batch organized into rows\n '''\n\n if len(ims_batch.shape) == 3 and ims_batch.shape[-1] == 3:\n # already an image\n return ims_batch\n\n batch_size, h, w = ims_batch.shape[:3]\n if len(ims_batch.shape) == 3:\n n_chans = 1\n else:\n n_chans = ims_batch.shape[-1]\n\n if type(labels) == list and len(labels) == 1:\n # only label the first image\n labels = labels + [''] * (batch_size - 1)\n elif labels is not None and not type(labels) == list and not type(labels) == np.ndarray:\n # replicate labels for each row in the batch\n labels = [labels] * batch_size\n\n scale_factor = display_h / float(h)\n\n # make sure we have a channels dimension\n if len(ims_batch.shape) < 4:\n ims_batch = np.expand_dims(ims_batch, 3)\n\n if normalize:\n flattened_dims = np.prod(ims_batch.shape[1:])\n\n X_spatially_flat = np.reshape(ims_batch, (batch_size, -1, n_chans))\n X_orig_min = np.min(X_spatially_flat, axis=1)\n X_orig_max = np.max(X_spatially_flat, axis=1)\n\n # now actually flatten and normalize across channels\n X_flat = np.reshape(ims_batch, (batch_size, -1))\n\n X_flat = X_flat - np.tile(np.min(X_flat, axis=1, keepdims=True), (1, flattened_dims))\n # avoid dividing by 0\n X_flat = X_flat / np.clip(\n np.tile(np.max(X_flat, axis=1, keepdims=True), (1, flattened_dims)), 1e-5, None)\n\n ims_batch = np.reshape(X_flat, ims_batch.shape)\n ims_batch = np.clip(ims_batch.astype(np.float32), 0., 1.)\n\n for i in range(batch_size):\n if labels is not None and len(labels) > 0:\n if labels[i] is not None:\n labels[i] = '{},'.format(labels[i])\n else:\n labels[i] = ''\n # show the min, max of each channel\n for c in range(n_chans):\n labels[i] += '({:.2f}, {:.2f})'.format(round(X_orig_min[i, c], 2), round(X_orig_max[i, c], 2))\n else:\n ims_batch = np.clip(ims_batch, 0., 1.)\n\n if np.max(ims_batch) <= 1.0:\n ims_batch = ims_batch * 255.0\n\n out_im = []\n for i in range(batch_size):\n # convert grayscale to rgb if needed\n if len(ims_batch[i].shape) == 2:\n curr_im = np.tile(np.expand_dims(ims_batch[i], axis=-1), (1, 1, 3))\n elif ims_batch.shape[-1] == 1:\n curr_im = np.tile(ims_batch[i], (1, 1, 3))\n else:\n curr_im = ims_batch[i]\n\n # scale to specified display size\n if scale_factor > 2: # if we are upsampling by a lot, nearest neighbor can look really noisy\n interp = cv2.INTER_NEAREST\n else:\n interp = cv2.INTER_LINEAR\n\n if not scale_factor == 1:\n curr_im = cv2.resize(curr_im, None, fx=scale_factor, fy=scale_factor, interpolation=interp)\n\n out_im.append(curr_im)\n\n out_im = np.concatenate(out_im, axis=0).astype(np.uint8)\n\n # draw text labels on images if specified\n font_size = 15\n max_text_width = int(17 * display_h / 128.) # empirically determined\n\n if labels is not None and len(labels) > 0:\n im_pil = Image.fromarray(out_im)\n draw = ImageDraw.Draw(im_pil)\n\n for i in range(batch_size):\n if len(labels) > i: # if we have a label for this image\n if type(labels[i]) == tuple or type(labels[i]) == list:\n # format tuple or list nicely\n formatted_text = ', '.join([\n labels[i][j].decode('UTF-8') if type(labels[i][j]) == np.unicode_ \\\n else labels[i][j] if type(labels[i][j]) == str \\\n else str(round(labels[i][j], 2)) if isinstance(labels[i][j], float) \\\n else str(labels[i][j]) for j in range(len(labels[i]))])\n elif type(labels[i]) == float or type(labels[i]) == np.float32:\n formatted_text = str(round(labels[i], 2)) # round floats to 2 digits\n elif isinstance(labels[i], np.ndarray):\n # assume that this is a 1D array\n curr_labels = np.squeeze(labels[i]).astype(np.float32)\n formatted_text = np.array2string(curr_labels, precision=2, separator=',')\n #', '.join(['{}'.format(\n #\tnp.around(labels[i][j], 2)) for j in range(labels[i].size)])\n else:\n formatted_text = '{}'.format(labels[i])\n\n font = ImageFont.truetype('Ubuntu-M.ttf', font_size)\n # wrap the text so it fits\n formatted_text = textwrap.wrap(formatted_text, width=max_text_width)\n\n\n for li, line in enumerate(formatted_text):\n draw.text((5, i * display_h + 5 + 14 * li), line, font=font, fill=(50, 50, 255))\n\n out_im = np.asarray(im_pil)\n\n return out_im\n\n\ndef draw_segs_on_slice(vol_slice, seg_slice,\n include_labels=None,\n colors=None,\n draw_contours=False,\n use_gradient_colormap=False):\n '''\n Overlays segmentations on a 2D slice.\n\n :param vol_slice: h x w image, the brain slice to overlay on top of\n :param seg_slice: h x w array, segmentations to overlay\n (in labels format, not one hot)\n :param include_labels: list, visualize only specific label values\n :param colors: n_labels x 3, specific colors to use for segmentations\n :param draw_contours: bool, visualize segmentations as contours\n rather than solid areas\n :param use_gradient_colormap: bool, create the colormap as a gradient of a\n single color rather than a rainbow\n\n :return: h x w x 3 image of brain slice with segmentations overlaid on top\n '''\n # if no labels are specified, simply visualize all unique label values\n if include_labels is None:\n include_labels = list(np.unique(seg_slice).astype(int))\n\n # if colors are not specified, make a color map\n if colors is None:\n if use_gradient_colormap:\n colors = make_cmap_gradient(\n len(include_labels) + 1, hue=0.5)\n else:\n colors = make_cmap_rainbow(\n len(include_labels) + 1)\n\n # make a new segmentation map with labels as ascending integers,\n # since this is what segutils expects\n pruned_slice = np.zeros(seg_slice.shape, dtype=int)\n for i, l in enumerate(include_labels):\n pruned_slice[seg_slice == l] = i + 1\n\n seg_im = pynd_segutils.seg_overlap(\n np.squeeze(vol_slice), pruned_slice,\n cmap=colors,\n do_contour=draw_contours)\n return seg_im\n\n\ndef overlay_segs_on_ims_batch(ims, segs,\n include_labels,\n draw_contours=False,\n use_gradient_colormap=False,\n subjects_axis=-1,\n colormap=None,\n ):\n\n # if the input is a single image, pretend it is a batch of size 1\n if len(ims.shape) == 2:\n ims = np.expand_dims(ims, -1)\n\n n_brains = ims.shape[subjects_axis]\n out_im = []\n\n for i in range(n_brains):\n curr_im = np.take(ims, i, axis=subjects_axis)\n curr_seg = np.take(segs, i, axis=subjects_axis)\n\n if len(segs.shape) > 2:\n curr_out_im = draw_segs_on_slice(\n curr_im, curr_seg,\n include_labels=include_labels,\n draw_contours=draw_contours,\n colors=colormap,\n use_gradient_colormap=use_gradient_colormap,\n )\n\n else:\n curr_out_im = draw_segs_on_slice(\n curr_im, segs,\n include_labels=include_labels,\n draw_contours=draw_contours,\n colors=colormap,\n use_gradient_colormap=use_gradient_colormap,\n )\n out_im.append(np.expand_dims(curr_out_im, axis=subjects_axis))\n out_im = np.concatenate(out_im, subjects_axis)\n\n return out_im\n\n\ndef make_cmap_gradient(nb_labels=256, hue=1.0):\n hue = hue * np.ones((nb_labels, 1))\n sat = np.reshape(np.linspace(1., 0., nb_labels, endpoint=True), hue.shape)\n colors = np.concatenate([hue, sat, np.ones((nb_labels, 1), dtype=np.float32)], axis=1) * 255\n colors = cv2.cvtColor(np.expand_dims(colors, 0).astype(np.uint8), cv2.COLOR_HSV2RGB).astype(np.float32)[0] / 255.0\n return colors\n\n\ndef make_cmap_rainbow(nb_labels=256):\n '''\n Creates a rainbow colormap (with an RGB color value for each label)\n\n :param nb_labels:\n :return:\n '''\n # make a rainbow gradient\n hue = np.expand_dims(np.linspace(0, 0.6, nb_labels), 1).astype(np.float32)\n colors = np.concatenate([hue, np.ones((nb_labels, 2), dtype=np.float32)], axis=1) * 255\n\n # convert to 0-1 range RGB\n colors = cv2.cvtColor(np.expand_dims(colors, 0).astype(np.uint8), cv2.COLOR_HSV2RGB).astype(np.float32)[0] / 255.0\n return colors\n\n"
] | [
[
"numpy.expand_dims",
"numpy.take",
"numpy.linspace",
"numpy.asarray",
"numpy.squeeze",
"numpy.concatenate",
"numpy.max",
"numpy.mean",
"numpy.any",
"numpy.where",
"numpy.clip",
"numpy.reshape",
"numpy.arange",
"numpy.unique",
"numpy.argmax",
"numpy.array2string",
"numpy.zeros",
"numpy.min",
"numpy.random.choice",
"tensorflow.shape",
"numpy.sum",
"numpy.tile",
"numpy.ones",
"tensorflow.Summary.Value",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lujoba/utils | [
"dd15cff628b2b972a1138600129f5c7375fca65f"
] | [
"libs/image_orientator.py"
] | [
"import cv2\nimport numpy as np\n\n\nclass ImageOrientation(object):\n def __init__(self):\n pass\n\n @staticmethod\n def rotate_bound(image, angle):\n # grab the dimensions of the image and then determine the\n # center\n (h, w) = image.shape[:2]\n (c_x, c_y) = (w // 2, h // 2)\n # grab the rotation matrix (applying the negative of the\n # angle to rotate clockwise), then grab the sine and cosine\n # (i.e., the rotation components of the matrix)\n matrix = cv2.getRotationMatrix2D((c_x, c_y), -angle, 1.0)\n cos = np.abs(matrix[0, 0])\n sin = np.abs(matrix[0, 1])\n # compute the new bounding dimensions of the image\n n_w = int((h * sin) + (w * cos))\n n_h = int((h * cos) + (w * sin))\n # adjust the rotation matrix to take into account translation\n matrix[0, 2] += (n_w / 2) - c_x\n matrix[1, 2] += (n_h / 2) - c_y\n # perform the actual rotation and return the image\n\n return cv2.warpAffine(image, matrix, (n_w, n_h))\n\n @staticmethod\n def get_centroids(contours):\n centroids = []\n for contour in contours:\n matrix = cv2.moments(contour)\n if matrix[\"m00\"] != 0.0:\n c_x = int(matrix[\"m10\"] / matrix[\"m00\"])\n c_y = int(matrix[\"m01\"] / matrix[\"m00\"])\n centroids.append((c_x, c_y))\n\n return centroids\n\n def __call__(self, image, **kwargs):\n # convert to gray\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # threshold the grayscale image\n ret, thresh = cv2.threshold(gray, 0, 255, 0)\n\n # find outer contour\n cntrs = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]\n\n # get rotated rectangle from outer contour\n rotrect = cv2.minAreaRect(cntrs[0])\n\n # get angle from rotated rectangle\n angle = rotrect[-1]\n\n # from https://www.pyimagesearch.com/2017/02/20/text-skew-correction-opencv-python/\n # the `cv2.minAreaRect` function returns values in the\n # range [-90, 0); as the rectangle rotates clockwise the\n # returned angle trends to 0 -- in this special case we\n # need to add 90 degrees to the angle\n if angle < -45:\n angle = -(90 + angle)\n\n # otherwise, just take the inverse of the angle to make\n # it positive\n else:\n angle = -angle\n\n # find image centroid position and check if the image is inverted\n centroids = self.get_centroids(self.rotate_bound(image, angle))\n median_centroid = np.median(centroids, axis=0)\n if median_centroid[1] < image.shape[1]:\n angle = 180 + angle\n\n return self.rotate_bound(img, angle)\n\n\nif __name__ == '__main__':\n # ===== MAIN =====\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('input_image', help='Input image name')\n parser.add_argument('output_image', help='Output image name')\n parser.add_argument('--show', help='Show result image', action='store_true')\n args = parser.parse_args()\n\n image_path = args.input_image\n if not os.path.exists(image_path):\n print(\"ERROR {} not found!\".format(image_path))\n exit()\n\n img = cv2.imread(image_path)\n orient = ImageOrientation()\n output = orient(img)\n # write result to disk\n cv2.imwrite(args.output_image, output)\n\n # Show image\n if args.show:\n cv2.imshow(description, output)\n cv2.waitKey(0)\n"
] | [
[
"numpy.median",
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aj-white/fcc_data_analysis | [
"4994718d9c61f099ed49555458f2fbf372cde0e2"
] | [
"Demographic_Data_Analyser/demographic_data_analyser.py"
] | [
"import pandas as pd\n\n\ndef calculate_demographic_data(print_data=True):\n # Read data from file\n df = pd.read_csv(r'adult.data.csv')\n\n df.columns = [x.replace('-', '_') for x in df.columns]\n\n # data cleaning to reduce memory\n df = (\n df\n .assign(\n workclass=df.workclass.replace('?', 'Unknown').astype('category'),\n education=df.education.astype('category'),\n marital_status=df.marital_status.astype('category'),\n occupation=df.occupation.replace('?', 'Unknown').astype('category'),\n relationship=df.relationship.astype('category'),\n race=df.race.astype('category'),\n sex=df.sex.astype('category'),\n native_country=df.native_country.replace('?', 'Unknown'),\n salary=df.salary.astype('category')\n )\n .astype({'age': 'int8', 'education_num': 'int8', 'capital_loss': 'int16', 'hours_per_week': 'int8'})\n )\n\n # How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.\n race_count = df['race'].value_counts()\n\n # What is the average age of men?\n average_age_men = df.query('sex == \"Male\"')['age'].mean().round(1)\n\n # What is the percentage of people who have a Bachelor's degree?\n percentage_bachelors = round(df['education'].value_counts(normalize=True)['Bachelors'] * 100, 1)\n\n\n # What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?\n # What percentage of people without advanced education make more than 50K?\n\n # with and without `Bachelors`, `Masters`, or `Doctorate`\n higher_education = df.query('education.isin([\"Bachelors\", \"Masters\", \"Doctorate\"])')\n lower_education = df.query('~education.isin([\"Bachelors\", \"Masters\", \"Doctorate\"])')\n\n # percentage with salary >50K\n higher_education_rich = round((higher_education.query('salary == \">50K\"').shape[0] / higher_education.shape[0]) * 100, 1)\n lower_education_rich = round((lower_education.query('salary == \">50K\"').shape[0] / lower_education.shape[0]) * 100, 1)\n # What is the minimum number of hours a person works per week (hours-per-week feature)?\n min_work_hours = df['hours_per_week'].min()\n\n # What percentage of the people who work the minimum number of hours per week have a salary of >50K?\n num_min_workers = df.query('hours_per_week == @min_work_hours')\n\n rich_percentage = round((num_min_workers.query('salary == \">50K\"').shape[0] / num_min_workers.shape[0]) * 100, 1)\n\n # What country has the highest percentage of people that earn >50K?\n # https://theprogrammingexpert.com/pandas-groupby-size/\n country_earners = (\n df\n .groupby(['native_country', 'salary'])\n .size()\n .unstack()\n .assign(total=lambda x: x['<=50K'] + x['>50K'])\n .assign(rich_pct=lambda x: (x['>50K'] / x['total']) * 100)\n )\n highest_earning_country = country_earners['rich_pct'].idxmax()\n highest_earning_country_percentage = round(country_earners['rich_pct'].max(), 1)\n\n # Identify the most popular occupation for those who earn >50K in India.\n top_IN_occupation = (\n df\n .query(\n '(native_country == \"India\") & (salary == \">50K\")'\n )\n ['occupation']\n .value_counts()\n .idxmax()\n )\n\n # DO NOT MODIFY BELOW THIS LINE\n\n if print_data:\n print(\"Number of each race:\\n\", race_count) \n print(\"Average age of men:\", average_age_men)\n print(f\"Percentage with Bachelors degrees: {percentage_bachelors}%\")\n print(f\"Percentage with higher education that earn >50K: {higher_education_rich}%\")\n print(f\"Percentage without higher education that earn >50K: {lower_education_rich}%\")\n print(f\"Min work time: {min_work_hours} hours/week\")\n print(f\"Percentage of rich among those who work fewest hours: {rich_percentage}%\")\n print(\"Country with highest percentage of rich:\", highest_earning_country)\n print(f\"Highest percentage of rich people in country: {highest_earning_country_percentage}%\")\n print(\"Top occupations in India:\", top_IN_occupation)\n\n return {\n 'race_count': race_count,\n 'average_age_men': average_age_men,\n 'percentage_bachelors': percentage_bachelors,\n 'higher_education_rich': higher_education_rich,\n 'lower_education_rich': lower_education_rich,\n 'min_work_hours': min_work_hours,\n 'rich_percentage': rich_percentage,\n 'highest_earning_country': highest_earning_country,\n 'highest_earning_country_percentage':\n highest_earning_country_percentage,\n 'top_IN_occupation': top_IN_occupation\n }\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wellbornbaba/konga-beta | [
"66799fdd09e3e3224bc628d8bc7c34de80d05d4a"
] | [
"linux/setup.py"
] | [
"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.expected_conditions import presence_of_element_located\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.chrome.options import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom bs4 import BeautifulSoup as bs\nimport threading\nimport os\nfrom os import path\nfrom os import getcwd\nfrom os import mkdir\nfrom os.path import exists as isexist\nimport pandas as pd\nfrom dclass import respath, remoteread_file, pprint, Db\nfrom time import sleep\nimport webbrowser\nimport sys\n\n\ndriverpath = path.join(getcwd(), \"driver\")\nif not isexist(driverpath): mkdir(driverpath)\n\nclass Scrapp():\n #urlBase https://konga.com\n #previewContainerclass = '_06822_e7mpG'\n #waitID = \"mainContent\"\n #waitCLASS = \"maincontent\"\n def __init__(self,sitename, urlBase, waitID, waitCLASS, previewContainerclass):\n self.limit = 0\n self.waitID = waitID\n self.waitCLASS = waitCLASS\n self.previewContainerclass = previewContainerclass\n self.url = urlBase\n urlName = sitename\n self.internal = urlName\n self.category = []\n self.pages = []\n self.extractingdetails = 0\n self.extractingpage = 0\n self.dbname = urlName + '.db'\n self.db = Db(respath(self.dbname))\n self.createDB()\n self.notallow = ['#', 'twitter','facebook','youtube','google','instagram']\n\n def browserdriver(self):\n try:\n return ChromeDriverManager(path=driverpath).install()\n except:\n print(\"Error downloading driver\")\n return False\n \n \n def createDB(self):\n #db = Db(respath(self.dbname))\n create_dcategory_table = \"\"\" CREATE TABLE IF NOT EXISTS dcategory (\n id integer PRIMARY KEY,\n url MEDIUMTEXT NOT NULL,\n status INTEGER\n ); \"\"\"\n\n create_dpages_table = \"\"\" CREATE TABLE IF NOT EXISTS dpages (\n id integer PRIMARY KEY,\n url MEDIUMTEXT NOT NULL,\n status INTEGER\n ); \"\"\"\n \n create_ddata_table = \"\"\" CREATE TABLE IF NOT EXISTS ddata (\n id integer PRIMARY KEY,\n dpageid INTEGER,\n brandname text NOT NULL, \n main_category text NOT NULL, \n sub_categories text NOT NULL, \n title text NOT NULL, \n images text NOT NULL, \n price text NOT NULL, \n selling_price text NOT NULL, \n discount text NOT NULL, \n description blob, \n product_code text NOT NULL, \n review text NOT NULL, \n link MEDIUMTEXT NOT NULL\n ); \"\"\"\n \n self.db.createdb(create_dcategory_table)\n self.db.createdb(create_dpages_table)\n self.db.createdb(create_ddata_table)\n \n def run(self):\n step2 = input(f\"Welcome to {self.internal} scrapper enjoy\\n\\nDo you want to start scrapping now? (y/n): \").lower()\n if step2 == 'y':\n checkdb2 = self.db.check('id', 'dcategory', f\"id!='' \")\n if checkdb2:\n step = int(input(\"You have some unfinished processing. Select options to continue\\n\\n1. Continue Extract pages and Extract Details\\n2. Export saved Details only\\n3. Export saved Details and continue extracting\\n4. Extract Details only\\n5. Start new session\\n6. Clear all session and exits : \"))\n if step ==1:\n self.extractPages()\n elif step ==2:\n self.saveData()\n elif step ==3:\n self.extractPages()\n self.saveData()\n elif step ==4:\n self.extractDetails()\n elif step ==5:\n self.db.others(f\"DELETE FROM dcategory\")\n self.db.others(f\"DELETE FROM dpages\")\n self.db.others(f\"DELETE FROM ddata\")\n self.dosubProcess()\n elif step ==6:\n self.db.others(f\"DELETE FROM dcategory\")\n self.db.others(f\"DELETE FROM dpages\")\n self.db.others(f\"DELETE FROM ddata\")\n sys.exit()\n else:\n print(\"Sorry no option was select try again\")\n self.run()\n else:\n self.dosubProcess()\n else:\n sys.exit()\n \n def notallowit(self, url):\n for pasre in self.notallow:\n if pasre in url: return True\n return False\n \n def dosubProcess(self):\n print(\"Connecting to \"+ str(self.internal))\n \n try:\n options = Options()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument(\"--test-type\")\n options.add_argument(\"--headless\")\n browser = webdriver.Chrome(executable_path=self.browserdriver(), options=options)\n #try parsing with selenium\n browser.get(self.url)\n #wait for the browser page to load\n waitToLoad = WebDriverWait(browser, 5)\n #wait until the target class is loaded and found\n if self.waitCLASS:\n waitToLoad.until(EC.presence_of_element_located((By.CLASS_NAME, self.waitCLASS)))\n #if successfully loaded store it to pagecontents variable\n allcategories_link = browser.find_element_by_class_name(self.waitCLASS)\n else:\n waitToLoad.until(EC.presence_of_element_located((By.ID, self.waitID)))\n #if successfully loaded store it to pagecontents variable\n allcategories_link = browser.find_element_by_id(self.waitID)\n \n dcontent = allcategories_link.get_attribute(\"outerHTML\")\n \n browser.quit()\n except:\n #try process with get\n dcontent = remoteread_file(self.url)\n \n \n try:\n content = bs(dcontent, 'html.parser')\n \n for totalfound_href in content.find_all('a'):\n try:\n foundurl = totalfound_href[\"href\"]\n \n if not foundurl.startswith('http') or not foundurl.startswith('https'):\n foundurl = self.url+'/'+ foundurl.lstrip('/')\n \n if self.internal == \"jumia\": \n if not \".html\" in foundurl and not self.notallowit(foundurl) and self.internal in foundurl:\n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n \n else:\n if \"category\" in foundurl and not self.notallowit(foundurl) and self.internal in foundurl:\n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n \n except Exception as e:\n print(\"Category page error \", e)\n \n except Exception as e:\n print('sub category error: '+ str(e))\n \n '''\n p1 = threading.Thread(name='Process Category', target=self.extractPages)\n p1.start()\n p2 = threading.Thread(name='Process Category', target=self.extractDetails)\n p2.start()\n '''\n self.extractPages()\n \n def extractPages(self):\n \n while True:\n getPages = self.db.fetch(f\"SELECT * FROM dcategory WHERE status='0' ORDER BY id DESC \")\n if len(getPages):\n page = ''\n for pag in getPages:\n page = pag['url']\n print(\"Processing Category\", page)\n options = Options()\n options.add_argument('--ignore-certificate-errors')\n options.add_argument(\"--test-type\")\n options.add_argument(\"--headless\")\n \n try:\n browser = webdriver.Chrome(executable_path=self.browserdriver(), options=options)\n #try parsing with selenium\n browser.get(page)\n #wait for the browser page to load\n waitToLoad = WebDriverWait(browser, 5)\n #wait until the target class is loaded and found\n if self.waitCLASS:\n waitToLoad.until(EC.presence_of_element_located((By.CLASS_NAME, self.waitCLASS)))\n else:\n waitToLoad.until(EC.presence_of_element_located((By.ID, self.waitID)))\n #if successfully loaded store it to pagecontents variable\n allcategories_link = browser.find_element_by_class_name(self.previewContainerClass)\n dcontent = allcategories_link.get_attribute(\"outerHTML\")\n \n browser.quit()\n except:\n #try process with get\n dcontent = remoteread_file(page)\n\n try:\n content = bs(dcontent, 'html.parser')\n \n for totalfound_href in content.find_all('a'):\n try:\n foundurl = totalfound_href[\"href\"]\n if foundurl:\n if not foundurl.startswith('http') or not foundurl.startswith('https'):\n foundurl = self.url+'/'+ foundurl.lstrip('/')\n \n \n if self.internal == \"jumia\": \n if \".html\" in foundurl and self.internal in foundurl: \n checkdb = self.db.check('id', 'dpages', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Page saved \", foundurl)\n self.db.insert('dpages', 'url, status', f\"'{foundurl}', '0' \")\n \n else:\n if not self.notallowit(foundurl) and self.internal in foundurl:\n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n \n else:\n if \"category\" in foundurl and self.internal in foundurl and not \"=\" in foundurl: \n if not self.notallowit(foundurl): \n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n else:\n if \"product\" in foundurl and self.internal in foundurl and not \"=\" in foundurl:\n checkdb = self.db.check('id', 'dpages', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Page saved \", foundurl)\n self.db.insert('dpages', 'url, status', f\"'{foundurl}', '0' \")\n except Exception as e:\n print(\"Page error \", e)\n \n except Exception as e:\n print('pages or category error: '+ str(e))\n \n self.db.others(f\"UPDATE dcategory SET status=1 WHERE id='{pag['id']}'\")\n sleep(1)\n \n else:\n self.extractDetails()\n break\n \n \n def extractDetails(self):\n countfound = 0\n #getPage = ['https://www.jumia.com.ng/sony-ps4-efootball-pro-evolution-soccer-pes-2020-40897803.html', 'https://www.jumia.com.ng/gerber-babies-wash-clothes-8-piece-gerber-mpg281849.html','https://www.jumia.com.ng/generic-baby-towel-gift-sets-3-in-1-43597455.html']\n #getPage =[r'C:\\PythonProjects\\myProject\\PendingProject\\konga-jiji-spar-jumia\\jumia_sunny2.htm']\n \n while True:\n if self.limit:\n if self.limit >= countfound:\n self.extractingdetails = 0\n break\n \n getPage = self.db.fetch(f\"SELECT * FROM dpages WHERE status='0' ORDER BY id ASC \")\n if len(getPage):\n print(\"Extracting Details begins...\")\n page = ''\n for pag in getPage:\n page = pag['url']\n #page = pag\n countfound +=1\n if self.limit:\n if self.limit >= countfound:\n self.extractingdetails = 0\n break\n \n print(\"Extraction begins on page\", page)\n \n try:\n opts = Options()\n opts.add_argument('--ignore-certificate-errors')\n opts.add_argument(\"--test-type\")\n opts.add_argument(\"--headless\")\n browser = webdriver.Chrome(executable_path=self.browserdriver(), options=opts)\n #try parsing with selenium\n browser.get(page)\n #wait for the browser page to load\n waitToLoad = WebDriverWait(browser, 5)\n #wait until the target class is loaded and found\n if self.waitCLASS:\n waitToLoad.until(EC.presence_of_element_located((By.CLASS_NAME, self.waitCLASS)))\n #if successfully loaded store it to pagecontents variable\n allcategories_link = browser.find_element_by_class_name(self.waitCLASS)\n else:\n waitToLoad.until(EC.presence_of_element_located((By.ID, self.waitID)))\n #if successfully loaded store it to pagecontents variable\n allcategories_link = browser.find_element_by_id(self.waitID) \n \n dcontent = allcategories_link.get_attribute(\"outerHTML\")\n \n browser.quit()\n except:\n #try process with get\n dcontent = remoteread_file(page) \n '''\n with open(page, 'rb') as ty:\n dcontent = ty.read()\n '''\n \n try: \n content = bs(dcontent, 'html.parser')\n \n for totalfound_href in content.find_all('a'):\n try:\n foundurl = totalfound_href[\"href\"]\n if foundurl:\n if not foundurl.startswith('http') or not foundurl.startswith('https'):\n foundurl = self.url+'/'+ foundurl.lstrip('/')\n \n if self.internal == \"jumia\": \n if \".html\" in foundurl and self.internal in foundurl: \n checkdb = self.db.check('id', 'dpages', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Page saved \", foundurl)\n self.db.insert('dpages', 'url, status', f\"'{foundurl}', '0' \")\n \n else:\n if not self.notallowit(foundurl) and self.internal in foundurl:\n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n \n else:\n if \"category\" in foundurl and self.internal in foundurl and not self.notallowit(foundurl): \n checkdb = self.db.check('id', 'dcategory', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Category saved \", foundurl)\n self.db.insert('dcategory', 'url, status', f\"'{foundurl}', '0' \")\n else:\n if \"product\" in foundurl and self.internal in foundurl and not self.notallowit(foundurl):\n checkdb = self.db.check('id', 'dpages', f\"url='{foundurl}' \")\n if checkdb is None:\n print(\"Page saved \", foundurl)\n self.db.insert('dpages', 'url, status', f\"'{foundurl}', '0' \")\n except:\n pass\n \n if self.internal == \"konga\":\n maindiv = content.find(class_=\"d9549_IlL3h\")\n \n try:\n maindiv2 = maindiv.find(class_=\"_31c33_NSdat\")\n resultspan = maindiv2.find_all(\"span\")\n if len(resultspan) > 2:\n resultReview = maindiv2.find_all(\"span\")[0].getText()\n resultProductCode = maindiv2.find_all(\"span\")[1].getText()\n else:\n resultReview = 0\n resultProductCode = maindiv2.find_all(\"span\")[0].getText()\n except Exception as e:\n resultReview = 0\n resultProductCode = 0\n \n try:\n resultTitle = maindiv.find(\"h4\").getText()\n except:\n resultTitle = ''\n \n try:\n resultBrand = maindiv.find(\"h5\").getText()\n except:\n resultBrand = ''\n \n try:\n maindiv3 = maindiv.find_all(class_=\"_3924b_1USC3\")[2]\n allprice = maindiv3.find_all(\"div\")\n resultPricediscount = allprice[1].getText().replace(\"?\", '')\n resultPrice = allprice[2].getText().replace(\"?\", '')\n resultPricesaved = allprice[3].find(\"span\").getText().replace(\"You save\", \"\").replace(\"?\", '').strip('')\n \n except:\n resultPrice = 0\n resultPricediscount = 0\n resultPricesaved = 0\n \n resultImage = ''\n try:\n maindiv5 = maindiv.find(class_=\"bf1a2_3kz7s\")\n for img in maindiv5.find_all(\"img\"):\n resultImage += img[\"src\"] + '\\n'\n except:\n pass\n \n \n try:\n resultAllsubcate = ''\n maindiv6 = content.find(class_=\"f9286_1HlF_\")\n resultMainsubcate = maindiv6.find(\"h1\").getText()\n for subcat in maindiv6.find_all(\"li\"):\n resultAllsubcate += subcat.getText()+ ' > '\n except:\n resultAllsubcate = ''\n resultMainsubcate = ''\n \n try:\n maindiv4 = maindiv.find(class_=\"_227af_AT9tO\")\n #tabs = maindiv4.find(class_=\"_8ed9d_3tJB8\")\n #for dotab in tabs.find_all(\"a\"):\n # #allo selenium to click on all the a hreg tags and get the results \n # tabname = dotab.find(\"h3\").getText()\n # dotabname = browser.dotab.click()\n # print(\"Tabname\", tabname)\n # tabcontent = maindiv4.find(class_=\"_3383f_1xAuk\")\n # print(str(tabcontent))\n resultDescription = maindiv4.find(class_=\"_3383f_1xAuk\")\n except:\n resultDescription = ''\n \n resultImage = resultImage.rstrip('\\n')\n resultAllsubcate = resultAllsubcate.rstrip(\" > \")\n if resultTitle:\n checkdb = self.db.check('id', 'ddata', f\"dpageid='{pag['id']}' \")\n if checkdb is None:\n print(\"\\n\\nData saved \", str(resultTitle),'\\n\\n')\n\n self.db.insert2(\"\"\"INSERT INTO ddata (dpageid,brandname,main_category, sub_categories,title,images,price,selling_price,discount,description,product_code,review, link) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);\"\"\", (pag['id'],resultBrand, resultMainsubcate,resultAllsubcate,resultTitle,resultImage,resultPrice,resultPricediscount,resultPricesaved,resultDescription.encode(),resultProductCode, resultReview, page))\n \n \n elif self.internal == \"jumia\":\n #jumia process\n maindiv = content\n \n try:\n resultTitle = maindiv.find(\"h1\").getText()\n except:\n resultTitle = ''\n \n #get brand name \n try:\n resultBrand = maindiv.find(\"div\", class_ =\"-fs14 -pvxs\").find_all('a')[0].getText()\n except Exception:\n resultBrand = ''\n \n #get review\n try:\n resultReview = maindiv.find(\"div\", class_ =\"-df -i-ctr -pvxs\").find_all('a')[0].getText().replace(\"(\", \"\").replace(\")\", \"\")\n except:\n resultReview = 0\n \n #get prices and discount\n maindiv5 = maindiv.find(\"div\", class_=\"-hr -pvs -mtxs\")\n try:\n resultPrice = maindiv5.find(class_=\"-tal -gy5 -lthr -fs16\").getText().replace(\"?\", '')\n resultPricediscount = maindiv5.find(class_=\"-b -ltr -tal -fs24\").getText().replace(\"?\", '')\n resultPricesaved = 0\n \n except Exception as e:\n print(\"Price error \", e)\n resultPrice = 0\n resultPricediscount = 0\n resultPricesaved = 0\n \n resultImage = ''\n try:\n maindiv5 = maindiv.find(class_=\"-ptxs -pbs\")\n for img in maindiv5.find_all(\"img\"):\n resultImage += img[\"data-src\"] + '\\n'\n except:\n pass\n \n try:\n resultAllsubcate = ''\n maindiv6 = content.find(class_=\"brcbs col16 -pts -pbm\")\n resultMainsubcate = maindiv6.find(\"a\").getText()\n for subcat in maindiv6.find_all(\"a\"):\n resultAllsubcate += subcat.getText()+ ' > '\n except:\n resultAllsubcate = ''\n resultMainsubcate = ''\n \n #get product sku\n maindiv4 = maindiv.find_all(\"div\", class_=\"col12\")[0]\n try:\n resultProductCode = maindiv4.find(\"ul\", class_=\"-pvs -mvxs -phm -lsn\").find_all(\"li\")[0].getText()\n except Exception as e:\n print('code is error', e)\n resultProductCode = ''\n \n try:\n maindivSpec = str(maindiv4.find(\"section\", class_=\"card aim -mtm -fs16\"))\n divDescription = str(maindiv4.find(\"div\", class_=\"card aim -mtm\"))\n \n resultDescription = str(divDescription + maindivSpec)\n except Exception:\n resultDescription = ''\n \n resultImage = resultImage.rstrip('\\n')\n resultAllsubcate = resultAllsubcate.rstrip(\" > \")\n \n pro =f'\\n\\nbrandname: {resultBrand}\\nmain_category: {resultMainsubcate}\\nsub_categories: {resultAllsubcate}\\ntitle: {resultTitle}\\nimages: {resultImage}\\nprice: {resultPrice}\\nselling_price: {resultPricediscount}\\ndiscount: {resultPricesaved}\\ndescription: {resultDescription.encode()}\\nproduct_code: {resultProductCode}\\nreview: {resultReview}'\n #print(pro)\n \n if resultTitle:\n checkdb = self.db.check('id', 'ddata', f\"dpageid='{pag['id']}' \")\n if checkdb is None:\n print(\"\\n\\nData saved \", str(resultTitle),'\\n\\n')\n\n self.db.insert2(\"\"\"INSERT INTO ddata (dpageid,brandname,main_category, sub_categories,title,images,price,selling_price,discount,description,product_code,review, link) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);\"\"\", (pag['id'],resultBrand, resultMainsubcate,resultAllsubcate,resultTitle,resultImage,resultPrice,resultPricediscount,resultPricesaved,resultDescription.encode(),resultProductCode, resultReview, page))\n \n \n print(\"Finished extracting \", page)\n \n except Exception as e:\n print('Error occurred '+ str(e))\n \n self.db.others(f\"UPDATE dpages SET status=1 WHERE id='{pag['id']}'\" )\n\n else:\n print('Almost done')\n self.saveData()\n break\n \n \n def saveData(self): \n data = {}\n step = int(input(\"To export please Select options to continue\\n\\n1. Export Pages only\\n2. Export Categories Links Only\\n3. Export Extracted Details Only\\n4. Exists : \"))\n \n if step ==1:\n #Export Pages only\n xls = pd.DataFrame(columns=['Url', 'Processed'])\n \n getData = self.db.fetch(f\"SELECT * FROM dpages ORDER BY id ASC \")\n if len(getData):\n \n print(\"Saving Datas to exceel sheet\")\n for result in getData:\n if result['status']: \n mainstatus = \"Yes\" \n else:\n mainstatus = \"No\"\n \n data['Url'] = result['url']\n data['ProProcessedcess'] = mainstatus\n xls = xls.append(data, ignore_index=True)\n xls.index += 1\n #now save the files\n #save as excel\n try:\n filesaveName = self.internal.upper() + \"_Pages.xlsx\"\n xls.to_excel(filesaveName)\n webbrowser.open(getcwd())\n savepath = path.join(getcwd(), filesaveName)\n print(\"Saved successfully to \", savepath)\n except Exception as e:\n print(\"Error can not save \", e)\n else:\n print(\"No Extracted Pages data to save\")\n self.saveData()\n \n elif step ==2:\n #Export Categories Links Only\n xls = pd.DataFrame(columns=['Url', 'Processed'])\n \n getData = self.db.fetch(f\"SELECT * FROM dcategory ORDER BY id ASC \")\n if len(getData):\n \n print(\"Saving Datas to exceel sheet\")\n for result in getData:\n if result['status']: \n mainstatus = \"Yes\" \n else:\n mainstatus = \"No\"\n \n data['Url'] = result['url']\n data['ProProcessedcess'] = mainstatus\n xls = xls.append(data, ignore_index=True)\n xls.index += 1\n #now save the files\n #save as excel\n try:\n filesaveName = self.internal.upper() + \"_Categories.xlsx\"\n xls.to_excel(filesaveName)\n webbrowser.open(getcwd())\n savepath = path.join(getcwd(), filesaveName)\n print(\"Saved successfully to \", savepath)\n except Exception as e:\n print(\"Error can not save \", e)\n else:\n print(\"No Extracted Categories data to save\")\n \n self.saveData()\n \n elif step ==3:\n xls = pd.DataFrame(columns=['Brandname', 'Main Category', 'Sub Categories', 'Title', 'Images', 'Price', 'Selling Price', 'Discount', 'Description', 'Product Code', 'Review', 'Link'])\n \n getData = self.db.fetch(f\"SELECT * FROM ddata ORDER BY brandname ASC \")\n if len(getData):\n \n print(\"Saving Datas to exceel sheet\")\n for result in getData:\n des = result['description'].decode()\n data['Brandname'] = result['brandname']\n data['Main Category'] = result['main_category']\n data['Sub Categories'] = result['sub_categories']\n data['Title'] = result['title']\n data['Images'] = result['images']\n data['Price'] = result['price']\n data['Selling Price'] = result['selling_price']\n data['Discount'] = result['discount']\n data['Description'] = des\n data['Product Code'] = result['product_code']\n data['Review'] = result['review']\n data['Link'] = result['link']\n xls = xls.append(data, ignore_index=True)\n xls.index += 1\n #now save the files\n #save as excel\n try:\n filesaveName = self.internal.upper() + \"_Details.xlsx\"\n xls.to_excel(filesaveName)\n webbrowser.open(getcwd())\n savepath = path.join(getcwd(), filesaveName)\n print(\"Saved successfully to \", savepath)\n except Exception as e:\n print(\"Error can not save \", e)\n else:\n print(\"No Extracted Detail data to save\")\n \n self.saveData()\n \n else:\n sys.exit()\n \nif __name__ == '__main__':\n stp = int(input(\"Choose site you want to start scrapping\\n\\n1. Konga.com\\n2. Jumia.com\\n3. Jiji.com\\n\\nSelect option now: \"))\n if stp ==1:\n Scrapp(\"konga\",\"https://konga.com\", \"mainContent\",\"\",\"_06822_e7mpG\").run()\n \n elif stp ==2:\n Scrapp(\"jumia\",\"https://www.jumia.com.ng\", \"\",\"has-b2top\",\"-pvs\").run()\n \n elif stp ==3:\n print(\"Sorry features not available\")\n else:\n print(\"Oop nothing was selected\")\n \n \n \n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
marcsv87/Deep-PDE-Solvers | [
"3322704f0291dc98e5bdf8c81c57a95cbfe70981"
] | [
"old/basket_options/emp_corr_max.py"
] | [
"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport time\nfrom numpy.linalg import norm\nimport copy\nimport math\nfrom numpy.linalg import cholesky\nimport argparse\n\n\nclass Net_timestep(nn.Module):\n \n def __init__(self, dim, nOut, n_layers, vNetWidth, activation = \"relu\"):\n super(Net_timestep, self).__init__()\n self.dim = dim\n self.nOut = nOut\n \n if activation == \"relu\":\n self.activation = nn.ReLU()\n elif activation == \"tanh\":\n self.activation = nn.Tanh()\n else:\n raise ValueError(\"unknown activation function {}\".format(activation))\n \n\n \n self.i_h = self.hiddenLayerT0(dim, vNetWidth)\n self.h_h = nn.ModuleList([self.hiddenLayerT1(vNetWidth, vNetWidth) for l in range(n_layers-1)])\n self.h_o = self.outputLayer(vNetWidth, nOut)\n \n def hiddenLayerT0(self, nIn, nOut):\n layer = nn.Sequential(nn.BatchNorm1d(nIn, momentum=0.1), nn.Linear(nIn,nOut,bias=True),\n nn.BatchNorm1d(nOut, momentum=0.1), \n self.activation) \n return layer\n \n def hiddenLayerT1(self, nIn, nOut):\n layer = nn.Sequential(nn.Linear(nIn,nOut,bias=True),\n nn.BatchNorm1d(nOut, momentum=0.1), \n self.activation) \n return layer\n \n \n def outputLayer(self, nIn, nOut):\n layer = nn.Sequential(nn.Linear(nIn, nOut,bias=True),\n nn.BatchNorm1d(nOut, momentum=0.1))\n return layer\n \n def forward(self, S):\n h = self.i_h(S)\n for l in range(len(self.h_h)):\n h = self.h_h[l](h)\n output = self.h_o(h)\n return output \n\n\n\nclass ControlVariate_stoch_int(nn.Module):\n \"\"\"\n Main reference: https://arxiv.org/abs/1806.00421 \n \"\"\"\n \n def __init__(self, dim, r, sigma, covariance_mat, timegrid, n_layers, vNetWidth = 100, gradNetWidth=100):\n super(ControlVariate_stoch_int, self).__init__()\n self.dim = dim\n self.timegrid = torch.Tensor(timegrid).to(device)\n self.r = r # r is a number\n self.sigma = torch.Tensor(sigma).to(device) # this should be a vector of length dim\n self.covariance_mat = covariance_mat # covariance matrix\n self.C = cholesky(covariance_mat) # Cholesky decomposition of covariance matrix, with size (dim,dim)\n \n self.volatility_mat = torch.Tensor(self.C).to(device)\n for i in range(self.dim):\n self.volatility_mat[i] = self.volatility_mat[i]*self.sigma[i]\n\n self.net_timegrid = nn.ModuleList([Net_timestep(dim=dim, nOut=dim, n_layers=n_layers, vNetWidth=vNetWidth) for t in timegrid[:-1]]) \n\n\n\n def forward(self, S0): \n S_old = S0\n control_variate = 0 \n path = [S_old]\n \n for i in range(1,len(self.timegrid)):\n # Wiener process at time timegrid[i]\n h = self.timegrid[i]-self.timegrid[i-1]\n dW = math.sqrt(h)*torch.randn(S_old.data.size(), device=device)#.to(device)\n \n # volatility(t,S) * dW\n volatility_of_S_dW = S_old * torch.matmul(self.volatility_mat,dW.transpose(1,0)).transpose(1,0) # this is a matrix of size (batch_size x dim)\n \n # gradient of value function\n Z = torch.exp(-self.r * self.timegrid[i-1]) * self.net_timegrid[i-1](S_old)\n \n # stochastic integral\n stoch_int = torch.bmm(Z.unsqueeze(1), volatility_of_S_dW.unsqueeze(2)).squeeze(1) \n \n # control variate\n control_variate += stoch_int\n \n # we update the SDE path. Use one or the other. \n S_new = S_old + self.r*S_old*h + volatility_of_S_dW \n \n # we are done, prepare for next round\n S_old = S_new\n path.append(S_old)\n \n return S_old, control_variate, path\n\n\ndef g(S, S0):\n \"\"\"\n basket options\n \"\"\"\n zeros = torch.zeros(S.size()[0],1, device=device)\n K = S0.sum(1).view(-1,1)\n sum_final = S.sum(1).view(-1,1)\n m = torch.cat([zeros, sum_final - K],1)\n output = torch.max(m,1)\n return output[0]\n\n\ndef train_optimise_cor():\n model.train()\n for it in range(n_iter):\n model.train()\n optimizer.zero_grad()\n \n # learning rate decay\n lr = base_lr * 0.1**(it//1000)\n for param_group in optimizer.state_dict()['param_groups']:\n param_group['lr'] = lr\n \n z = torch.randn([batch_size, dim], device=device)#.to(device)\n input = torch.exp((mu-0.5*sigma**2)*tau + math.sqrt(tau)*z)*0.7\n \n init_time = time.time()\n S_T, control_variate, _ = model(input)\n time_forward = time.time() - init_time\n \n K = torch.ones_like(S_T)*0.7\n terminal = torch.exp(torch.tensor([-T*r], device=device))*g(S_T,K).view(-1,1)\n \n var_terminal = terminal.var()\n cov_terminal_control_variate = torch.mean((terminal-torch.mean(terminal))*(control_variate-torch.mean(control_variate)))\n var_control_variate = control_variate.var()\n corr_terminal_control_variate = cov_terminal_control_variate/(torch.sqrt(var_control_variate)*torch.sqrt(var_terminal))\n loss = 1-corr_terminal_control_variate**2 \n\n init_time = time.time()\n loss.backward()\n time_backward = time.time() - init_time\n \n optimizer.step()\n \n with open(file_log_path, 'a') as f:\n f.write(\"Iteration=[{it}/{n_iter}]\\t loss={loss:.8f}\\t time forward pass={t_f:.3f}\\t time backward pass={t_b:.3f}\\n\".format(it=it, n_iter=n_iter, loss=loss.item(), t_f=time_forward, t_b=time_backward))\n \n if (it+1) % 100 == 0:\n var_MC_CV_estimator, var_MC_estimator, MC_CV_estimator, MC_estimator, corr_terminal_control_variate = get_prediction_CV(1000)\n with open(file_log_results, 'a') as f:\n f.write('{},{},{},{},{}\\n'.format(var_MC_CV_estimator, var_MC_estimator, MC_CV_estimator, MC_estimator, corr_terminal_control_variate))\n \n if (it+1) % 1000 == 0:\n state = {'epoch':it+1, 'state_dict':model.state_dict(), 'optimizer':optimizer.state_dict()}\n filename = 'model_correlation_'+str(n_layers)+'_'+str(vNetWidth)+'_'+str(timestep)+'_'+str(dim)+'_it'+str(it)+'.pth.tar'\n torch.save(state, filename)\n \n print(\"Done.\")\n\n\n\n\ndef get_prediction_CV(batch_size_MC=100000):\n model.eval()\n \n if batch_size_MC > 1000:\n terminal_list = []\n control_variate_list = []\n for i in range(batch_size_MC//1000):\n print(i)\n input = torch.ones(1000, dim, device=device)*0.7\n with torch.no_grad():\n S_T, control_variate, _ = model(input)\n terminal = torch.exp(torch.tensor([-T*r], device=device))*g(S_T,input).view(-1,1)\n terminal_list.append(terminal)\n control_variate_list.append(control_variate)\n terminal = torch.cat(terminal_list, 0)\n control_variate = torch.cat(control_variate_list, 0)\n else:\n input = torch.ones(batch_size_MC, dim, device=device)*0.7\n with torch.no_grad():\n S_T, control_variate, _ = model(input)\n terminal = torch.exp(torch.tensor([-T*r], device=device))*g(S_T,input).view(-1,1)\n MC_estimator = torch.mean(terminal)\n var_terminal = torch.mean((terminal - torch.mean(terminal))**2)\n var_MC_estimator = 1/batch_size_MC*var_terminal\n \n cov_terminal_control_variate = torch.mean((terminal-torch.mean(terminal))*(control_variate-torch.mean(control_variate)))\n var_control_variate = control_variate.var()#torch.mean((control_variate-torch.mean(control_variate))**2)\n corr_terminal_control_variate = cov_terminal_control_variate/(torch.sqrt(var_control_variate)*torch.sqrt(var_terminal))\n \n # Optimal coefficent b that minimises variance of optimally controlled estimator \n b = cov_terminal_control_variate / var_control_variate\n \n # Monte Carlo controlled iterations\n MC_CV = terminal - b*(control_variate)\n \n # Monte Carlo controlled estimator\n MC_CV_estimator = torch.mean(MC_CV)\n \n var_MC_CV_estimator = 1/batch_size_MC * MC_CV.var()#var_MC_CV_estimator \n \n return var_MC_CV_estimator.item(), var_MC_estimator.item(), MC_CV_estimator.item(), MC_estimator.item(), corr_terminal_control_variate.item()\n\n\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--vNetWidth', action=\"store\", type=int, default=22, help=\"network width\")\n parser.add_argument('--n-layers', action=\"store\", type=int, default=2, help=\"number of layers\")\n parser.add_argument('--timestep', action=\"store\", type=float, default=0.01, help=\"timestep\")\n parser.add_argument('--dim', action=\"store\", type=int, default=2, help=\"dimension of the PDE\")\n\n args = parser.parse_args()\n vNetWidth = args.vNetWidth\n n_layers = args.n_layers\n timestep = args.timestep\n dim = args.dim\n\n if torch.cuda.is_available():\n device = \"cuda:0\"\n else:\n device = \"cpu\"\n\n PATH_RESULTS = os.getcwd()\n os.chdir(PATH_RESULTS)\n\n log_results = 100\n file_log_path = os.path.join(PATH_RESULTS, 'log_cor_opt_'+str(dim)+'.txt')\n file_log_results = os.path.join(PATH_RESULTS, 'results_cor_opt_'+str(dim)+'.txt')\n with open(file_log_results, 'a') as f:\n f.write('var_MC_CV_estimator, var_MC_estimator, MC_CV_estimator, MC_estimator,corr_terminal_control_variate\\n')\n \n ##################\n # Problem setup ##\n ##################\n init_t, T = 0,1\n timegrid = np.arange(init_t, T+timestep/2, timestep)\n r = 0.5\n sigma = 1\n mu = 0.08\n tau = 0.1\n covariance_mat = np.identity(dim) \n \n #########################\n # Network instantiation #\n #########################\n model = ControlVariate_stoch_int(dim=dim, r=r, sigma=np.array([sigma]*dim), covariance_mat=covariance_mat, timegrid=timegrid, n_layers=n_layers, vNetWidth=vNetWidth, gradNetWidth=vNetWidth) \n model.to(device)\n \n #######################\n # training parameters #\n #######################\n batch_size = 5000\n base_lr = 0.001\n optimizer = torch.optim.Adam(model.parameters(),lr=base_lr, betas=(0.9, 0.999))\n n_iter = 20000\n \n train_optimise_var()\n"
] | [
[
"torch.mean",
"torch.max",
"torch.cat",
"torch.no_grad",
"torch.cuda.is_available",
"torch.save",
"torch.ones",
"torch.sqrt",
"numpy.arange",
"torch.randn",
"torch.tensor",
"torch.ones_like",
"torch.nn.BatchNorm1d",
"torch.exp",
"torch.nn.Linear",
"numpy.identity",
"numpy.linalg.cholesky",
"numpy.array",
"torch.Tensor",
"torch.nn.Tanh",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
princeton-nlp/DeepSpeed | [
"b1669c0d8f6dbe65162b534f2c6967f64d143dc3"
] | [
"deepspeed/module_inject/replace_module.py"
] | [
"import copy\nimport torch\nimport deepspeed\nimport deepspeed.ops.transformer as transformer_inference\nfrom .replace_policy import HFBertLayerPolicy, MegatronLayerPolicy\nfrom .replace_policy import replace_policies\nfrom ..constants import INFERENCE_GENERIC_MODE, INFERENCE_SPECIALIZED_MODE\nfrom ..runtime.weight_quantizer import WeightQuantization\n\n\nclass ReplaceWithTensorSlicing:\n def __init__(self, mp_group=None):\n if (torch.distributed.is_initialized() and mp_group is not None):\n self.gpu_index = torch.distributed.get_rank(group=mp_group)\n else:\n self.gpu_index = 0\n\n def merge_assert(self, dim1, dim2):\n assert dim1 > dim2, \\\n 'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\\\n for merging your checkpoints before replacing the transformer layer with\\\n inference-kerenls'\n\n def qkv_copy(self, dst, src):\n if src is None:\n return src\n src_shape = src.shape\n dst_shape = dst.shape\n\n src_split = torch.split(src, src.shape[-1] // 3, dim=-1)\n\n if (len(src_shape) == 2 and len(dst_shape) == 2):\n if src_shape[1] == dst_shape[1]:\n return src\n\n self.merge_assert(src_shape[1], dst_shape[1])\n qkv_size = dst_shape[1] // 3\n qkv_split = [torch.split(src_s, qkv_size, dim=1) for src_s in src_split]\n\n weight_split = [\n torch.cat([qkv_s[i] for qkv_s in qkv_split],\n axis=1) for i in range(len(qkv_split[0]))\n ]\n dst = weight_split[self.gpu_index].to(torch.cuda.current_device())\n else:\n if src_shape[0] == dst_shape[0]:\n return src\n\n qkv_size = dst_shape[0] // 3\n qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]\n bias_split = [\n torch.cat([qkv_s[i] for qkv_s in qkv_split],\n axis=0) for i in range(len(qkv_split[0]))\n ]\n dst = bias_split[self.gpu_index].to(torch.cuda.current_device())\n\n return dst.contiguous()\n\n def copy(self, dst, src):\n if src is None:\n return src\n\n src_shape = src.shape\n dst_shape = dst.shape\n\n if (len(src_shape) == 2 and len(dst_shape) == 2):\n\n if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]:\n return src\n\n if src_shape[0] != dst_shape[0]:\n self.merge_assert(src_shape[0], dst_shape[0])\n weight_split = torch.split(src, dst_shape[0])\n else:\n self.merge_assert(src_shape[1], dst_shape[1])\n weight_split = torch.split(src, dst_shape[1], dim=1)\n\n dst = weight_split[self.gpu_index].to(torch.cuda.current_device())\n else:\n if src_shape[0] == dst_shape[0]:\n return src\n\n bias_split = torch.split(src, dst_shape[-1])\n dst = bias_split[self.gpu_index].to(torch.cuda.current_device())\n\n return dst.contiguous()\n\n\ndef replace_transformer_layer(orig_layer_impl,\n model,\n policy=None,\n micro_batch_size=-1,\n config=None,\n seed=-1,\n hidden_size=-1,\n num_attention_heads=-1,\n mp_size=1,\n mp_group=None,\n preln=True,\n fp16=True,\n local_rank=-1,\n training=True,\n quantize=False,\n encoder_decoder=False,\n quantize_settings=None):\n \"\"\" Replace bert-style transformer layers with DeepSpeed's transformer layer\n Arguments:\n orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,\n e.g., transformers.modeling_bert.BertLayer.\n model (torch.nn.Module): user's nn.module representing their model\n policy: shows the policy for mapping from the orig_layer_impl to transformer parameters\n micro_batch_size (int): micro batch size per gpu used during training/eval\n config (dict): model config containing hidden size, attention heads, etc.\n seed (int): random seed value\n max_seq_length (int): max sequence length for training\n hidden_size (int): hidden dimension\n num_attention_heads (int): numebr of attention heads\n mp_size (int): model_parallelism degree\n mp_group : model_parallel gropu initialized on the modeling side\n preln (bool): does the original layer implementation do pre or post layer norm?\n fp16 (bool): fp16 or fp32\n local_rank (int): GPU rank (optional),\n training (bool): specifying whether kernel-injection is done for training/inference (set to false for inference-mode injection)\n quantize_settings (tuple): this setting shows how we can quantize a model for running it through the inference kernels.\n It includes (quantization_scales, merge_count, mlp_extra_grouping, quantize_groups).\n encoder_decoder (bool): this flag needs to be set for huggingface Bert models.\n\n Returns:\n Updated nn.module with replaced transformer layers\n \"\"\"\n def replace_with_policy(child, policy_cls, inference=False, preln=True, layer_id=0):\n preln = False if policy_cls is HFBertLayerPolicy else preln\n if policy_cls is HFBertLayerPolicy:\n policy = policy_cls(child, inference=inference, preln=preln)\n else:\n policy = policy_cls(child, inference=inference)\n\n if inference:\n hidden_size, num_attention_heads = policy.get_hidden_heads()\n\n attn_linear_layer, qkvw, qkvb, dense_w, dense_b, scale_attention = policy.attention()\n mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b = policy.mlp()\n attn_nw, attn_nb, input_nw, input_nb = policy.layerNorm()\n\n if quantize:\n if policy_cls is not HFBertLayerPolicy:\n qkvw = qkvw.to(torch.int8)\n dense_w = dense_w.to(torch.int8)\n _h4h_w = _h4h_w.to(torch.int8)\n _4hh_w = _4hh_w.to(torch.int8)\n elif fp16:\n qkvw = qkvw.half()\n dense_w = dense_w.half()\n _h4h_w = _h4h_w.half()\n _4hh_w = _4hh_w.half()\n\n if quantize or fp16:\n dense_b = dense_b.half()\n _h4h_b = _h4h_b.half()\n _4hh_b = _4hh_b.half()\n attn_nw = attn_nw.half()\n attn_nb = attn_nb.half()\n input_nw = input_nw.half()\n input_nb = input_nb.half()\n\n mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)\n\n if inference:\n transformer_config = transformer_inference.DeepSpeedInferenceConfig(\n hidden_size=hidden_size,\n heads=num_attention_heads,\n fp16=fp16,\n pre_layer_norm=preln,\n mp_size=mp_size,\n q_int8=quantize,\n encoder_decoder=(True if policy_cls is HFBertLayerPolicy else False),\n triangular_masking=(policy_cls is not HFBertLayerPolicy),\n local_attention=((config.attention_layers[layer_id] == \"local\")\n if hasattr(config,\n 'attention_layers') else False),\n window_size=(config.window_size if hasattr(config,\n 'window_size') else 1))\n\n if quantize and quantize_settings is not None:\n (quantization_scales,\n merge_count,\n mlp_extra_grouping,\n quantize_groups) = quantize_settings\n new_module = transformer_inference.DeepSpeedTransformerInference(\n transformer_config,\n mp_group=mp_group,\n quantize_scales=quantization_scales[layer_id],\n quantize_groups=quantize_groups,\n merge_count=merge_count,\n mlp_extra_grouping=mlp_extra_grouping,\n qkv_merging=(policy_cls is HFBertLayerPolicy))\n\n if quantize and qkvw.dtype != torch.int8:\n quantize_bits = 8\n quantizer = WeightQuantization()\n if policy_cls is HFBertLayerPolicy:\n data_quantized, _ = quantizer.quantize_data(qkvw, quantize_bits, quantize_groups * 3)\n else:\n data_quantized, _ = quantizer.quantize_data(qkvw, quantize_bits, quantize_groups)\n qkvw.copy_(data_quantized)\n qkvw = qkvw.to(torch.int8)\n else:\n new_module = transformer_inference.DeepSpeedTransformerInference(\n transformer_config,\n mp_group=mp_group,\n )\n new_module.config.scale_attention = scale_attention\n\n # we want the weights in [input, output] shape\n # linear layer is created with [input, output] shape\n # transpose it here to reduce inference cost!\n def transpose(data):\n data.view(-1).copy_(data.transpose(-1, -2).contiguous().view(-1))\n data = data.reshape(data.shape[-1], data.shape[-2])\n return data\n\n if attn_linear_layer:\n qkvw = transpose(qkvw.data)\n dense_w = transpose(dense_w)\n\n if mlp_linear_layer:\n _h4h_w = transpose(_h4h_w)\n _4hh_w = transpose(_4hh_w)\n\n attn_block = new_module.attention\n attn_block.attn_qkvw.data = mp_replace.qkv_copy(attn_block.attn_qkvw.data,\n qkvw)\n\n if qkvb is not None:\n qkvb = qkvb.half()\n attn_block.attn_qkvb.data = mp_replace.qkv_copy(\n attn_block.attn_qkvb.data,\n qkvb)\n else:\n attn_block.attn_qkvb = qkvb\n\n attn_block.attn_ow.data = mp_replace.copy(attn_block.attn_ow.data, dense_w)\n attn_block.attn_ob.data = mp_replace.copy(attn_block.attn_ob.data, dense_b)\n\n mpl_block = new_module.mlp\n mpl_block.inter_w.data = mp_replace.copy(mpl_block.inter_w.data, _h4h_w)\n mpl_block.inter_b.data = mp_replace.copy(mpl_block.inter_b.data, _h4h_b)\n mpl_block.output_w.data = mp_replace.copy(mpl_block.output_w.data, _4hh_w)\n mpl_block.output_b.data = mp_replace.copy(mpl_block.output_b.data, _4hh_b)\n\n new_module.mlp.attn_nw.data = attn_nw.to(torch.cuda.current_device())\n new_module.mlp.attn_nb.data = attn_nb.to(torch.cuda.current_device())\n new_module.norm_w.data = input_nw.to(torch.cuda.current_device())\n new_module.norm_b.data = input_nb.to(torch.cuda.current_device())\n else:\n transformer_config = deepspeed.DeepSpeedTransformerConfig(\n batch_size=micro_batch_size,\n hidden_size=config.hidden_size,\n heads=config.num_attention_heads,\n attn_dropout_ratio=config.attention_probs_dropout_prob,\n hidden_dropout_ratio=config.hidden_dropout_prob,\n num_hidden_layers=config.num_hidden_layers,\n initializer_range=config.initializer_range,\n seed=seed,\n fp16=fp16,\n pre_layer_norm=(False if policy_cls is HFBertLayerPolicy else preln),\n huggingface=encoder_decoder,\n local_rank=local_rank,\n stochastic_mode=True,\n normalize_invertible=True,\n training=training)\n new_module = deepspeed.DeepSpeedTransformerLayer(transformer_config)\n new_module.attn_qkvw.data = qkvw\n new_module.attn_qkvb.data = qkvb\n new_module.attn_ow.data = dense_w\n new_module.attn_ob.data = dense_b\n\n new_module.attn_nw.data = attn_nw\n new_module.attn_nb.data = attn_nb\n new_module.norm_w.data = input_nw\n new_module.norm_b.data = input_nb\n\n new_module.inter_w.data = _h4h_w\n new_module.inter_b.data = _h4h_b\n new_module.output_w.data = _4hh_w\n new_module.output_b.data = _4hh_b\n return new_module\n\n def replace_fn(child, _policy, layer_id=0):\n if training:\n # copy relevant state from child -> new module\n new_module = replace_with_policy(child, _policy, preln=preln)\n\n else:\n # copy relevant state from child -> new module\n new_module = replace_with_policy(child,\n _policy,\n inference=True,\n preln=(policy is not HFBertLayerPolicy),\n layer_id=layer_id)\n\n return new_module\n\n return replace_module(model=model,\n orig_class=orig_layer_impl,\n replace_fn=replace_fn,\n _replace_policy=policy)\n\n\ndef revert_transformer_layer(orig_layer_impl, model, config, preln=False):\n \"\"\" Revert DeepSpeed's transformer layer back to original bert-style transformer layer\n Arguments:\n orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,\n e.g., transformers.modeling_bert.BertLayer.\n model (torch.nn.Module): user's nn.module representing their model\n config (dict): model config containing hidden size, attention heads, etc.\n\n Returns:\n Updated nn.module with original bert-style transformer layers\n \"\"\"\n def replace_fn(child, _replace_policy, layer_id):\n #from turing.nvidia_modelingpreln import BertLayer\n orig_module = orig_layer_impl(config)\n\n # copy relevant state from child -> original module\n qkvw = child.attn_qkvw.data\n qkvb = child.attn_qkvb.data\n\n qw, kw, vw = torch.chunk(qkvw, 3, axis=0)\n qb, kb, vb = torch.chunk(qkvb, 3, axis=0)\n\n orig_module.attention.self.query.weight.data = qw\n orig_module.attention.self.query.bias.data = qb\n orig_module.attention.self.key.weight.data = kw\n orig_module.attention.self.key.bias.data = kb\n orig_module.attention.self.value.weight.data = vw\n orig_module.attention.self.value.bias.data = vb\n\n orig_module.attention.output.dense.weight.data = child.attn_ow.data\n orig_module.attention.output.dense.bias.data = child.attn_ob.data\n\n attn_ln_w = child.attn_nw.data\n attn_ln_b = child.attn_nb.data\n if preln:\n orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w\n orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b\n else:\n orig_module.attention.output.LayerNorm.weight.data = attn_ln_w\n orig_module.attention.output.LayerNorm.bias.data = attn_ln_b\n\n inter_ff_w = child.inter_w.data\n inter_ff_b = child.inter_b.data\n if preln:\n orig_module.intermediate.dense_act.weight.data = inter_ff_w\n orig_module.intermediate.dense_act.bias.data = inter_ff_b\n else:\n orig_module.intermediate.dense.weight.data = inter_ff_w\n orig_module.intermediate.dense.bias.data = inter_ff_b\n\n orig_module.output.dense.weight.data = child.output_w.data\n orig_module.output.dense.bias.data = child.output_b.data\n\n transformer_ln_w = child.norm_w.data\n transformer_ln_b = child.norm_b.data\n if preln:\n orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w\n orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b\n else:\n orig_module.output.LayerNorm.weight.data = transformer_ln_w\n orig_module.output.LayerNorm.bias.data = transformer_ln_b\n return orig_module\n\n return replace_module(model=model,\n orig_class=deepspeed.DeepSpeedTransformerLayer,\n replace_fn=replace_fn,\n _replace_policy=None)\n\n\ndef replace_module(model, orig_class, replace_fn, _replace_policy):\n \"\"\" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.\n Arguments:\n model (torch.nn.Module): the model to augment\n orig_class (torch.nn.Module): the module to search for\n replace_fn (method): a method to convert instances of ``orig_class`` to the\n desired type and return a new instance.\n\n Returns:\n A modified ``model``.\n \"\"\"\n policy = {}\n if orig_class is not None:\n policy.update({orig_class: (replace_fn, _replace_policy)})\n else:\n for plcy in replace_policies:\n # instantiate a throw-away policy in order to populate the _orig_layer_class\n _ = plcy(None)\n if plcy._orig_layer_class is not None:\n policy.update({plcy._orig_layer_class: (replace_fn, plcy)})\n assert len(policy.items()) > 0,\\\n \"No default policy found! Please specifiy your policy injection_policy (like {BertLayer:HFBEertLayerPolicy}).\" +\\\n \"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py\"\n\n replaced_module, _ = _replace_module(model, policy)\n return replaced_module\n\n\ndef _replace_module(model, policies, layer_id=0):\n \"\"\" Traverse model's children recursively and apply any transformations in ``policies``.\n Arguments:\n model (torch.nn.Module): model to augment\n policies (dict): Mapping of source class to replacement function.\n\n Returns:\n Modified ``model``.\n \"\"\"\n for name, child in model.named_children():\n if child.__class__ in policies:\n orig = repr(child)\n setattr(\n model,\n name,\n policies[child.__class__][0](child,\n policies[child.__class__][-1],\n layer_id))\n new = getattr(model, name)\n layer_id += 1\n else:\n _, layer_id = _replace_module(child, policies, layer_id=layer_id)\n\n return model, layer_id\n"
] | [
[
"torch.cat",
"torch.cuda.current_device",
"torch.distributed.is_initialized",
"torch.split",
"torch.chunk",
"torch.distributed.get_rank"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wanshanhsieh/distiller | [
"2ae998c066b975bcb38b8ecc2927bf1015f634b9"
] | [
"examples/classifier_compression/compress_classifier.py"
] | [
"#\n# Copyright (c) 2018 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"This is an example application for compressing image classification models.\n\nThe application borrows its main flow code from torchvision's ImageNet classification\ntraining sample application (https://github.com/pytorch/examples/tree/master/imagenet).\nWe tried to keep it similar, in order to make it familiar and easy to understand.\n\nIntegrating compression is very simple: simply add invocations of the appropriate\ncompression_scheduler callbacks, for each stage in the training. The training skeleton\nlooks like the pseudo code below. The boiler-plate Pytorch classification training\nis speckled with invocations of CompressionScheduler.\n\nFor each epoch:\n compression_scheduler.on_epoch_begin(epoch)\n train()\n validate()\n compression_scheduler.on_epoch_end(epoch)\n save_checkpoint()\n\ntrain():\n For each training step:\n compression_scheduler.on_minibatch_begin(epoch)\n output = model(input)\n loss = criterion(output, target)\n compression_scheduler.before_backward_pass(epoch)\n loss.backward()\n compression_scheduler.before_parameter_optimization(epoch)\n optimizer.step()\n compression_scheduler.on_minibatch_end(epoch)\n\n\nThis exmple application can be used with torchvision's ImageNet image classification\nmodels, or with the provided sample models:\n\n- ResNet for CIFAR: https://github.com/junyuseu/pytorch-cifar-models\n- MobileNet for ImageNet: https://github.com/marvis/pytorch-mobilenet\n\"\"\"\n\nimport traceback\nimport logging\nfrom functools import partial\nimport distiller\nfrom distiller.models import create_model\nimport distiller.apputils.image_classifier as classifier\nimport distiller.apputils as apputils\nimport local_parser as parser\nimport os\nimport numpy as np\n\n\n# Logger handle\nmsglogger = logging.getLogger()\n\n\ndef main():\n # Parse arguments\n args = parser.add_cmdline_args(classifier.init_classifier_compression_arg_parser()).parse_args()\n app = ClassifierCompressorSampleApp(args, script_dir=os.path.dirname(__file__))\n if app.handle_subapps():\n return\n init_knowledge_distillation(app.args, app.model, app.compression_scheduler)\n app.run_training_loop()\n # Finally run results on the test set\n return app.test()\n\n \ndef handle_subapps(model, criterion, optimizer, compression_scheduler, pylogger, args):\n def load_test_data(args):\n test_loader = classifier.load_data(args, load_train=False, load_val=False, load_test=True)\n return test_loader\n\n do_exit = False\n if args.greedy:\n greedy(model, criterion, optimizer, pylogger, args)\n do_exit = True\n elif args.summary:\n # This sample application can be invoked to produce various summary reports\n for summary in args.summary:\n distiller.model_summary(model, summary, args.dataset)\n do_exit = True\n elif args.export_onnx is not None:\n distiller.export_img_classifier_to_onnx(model,\n os.path.join(msglogger.logdir, args.export_onnx),\n args.dataset, add_softmax=True, verbose=False)\n do_exit = True\n elif args.qe_calibration:\n classifier.acts_quant_stats_collection(model, criterion, pylogger, args)\n do_exit = True\n elif args.activation_histograms:\n classifier.acts_histogram_collection(model, criterion, pylogger, args)\n do_exit = True\n elif args.sensitivity is not None:\n test_loader = load_test_data(args)\n sensitivities = np.arange(*args.sensitivity_range)\n sensitivity_analysis(model, criterion, test_loader, pylogger, args, sensitivities)\n do_exit = True\n elif args.evaluate:\n test_loader = load_test_data(args)\n activations_collectors = classifier.create_activation_stats_collectors(model, *args.activation_stats)\n classifier.evaluate_model(model, criterion, test_loader, pylogger, activations_collectors,\n args, compression_scheduler)\n do_exit = True\n elif args.thinnify:\n assert args.resumed_checkpoint_path is not None, \\\n \"You must use --resume-from to provide a checkpoint file to thinnify\"\n distiller.contract_model(model, compression_scheduler.zeros_mask_dict, args.arch, args.dataset, optimizer=None)\n apputils.save_checkpoint(0, args.arch, model, optimizer=None, scheduler=compression_scheduler,\n name=\"{}_thinned\".format(args.resumed_checkpoint_path.replace(\".pth.tar\", \"\")),\n dir=msglogger.logdir)\n msglogger.info(\"Note: if your model collapsed to random inference, you may want to fine-tune\")\n do_exit = True\n return do_exit\n\n\ndef init_knowledge_distillation(args, model, compression_scheduler):\n args.kd_policy = None\n if args.kd_teacher:\n teacher = create_model(args.kd_pretrained, args.dataset, args.kd_teacher, device_ids=args.gpus)\n if args.kd_resume:\n teacher = apputils.load_lean_checkpoint(teacher, args.kd_resume)\n dlw = distiller.DistillationLossWeights(args.kd_distill_wt, args.kd_student_wt, args.kd_teacher_wt)\n args.kd_policy = distiller.KnowledgeDistillationPolicy(model, teacher, args.kd_temp, dlw)\n compression_scheduler.add_policy(args.kd_policy, starting_epoch=args.kd_start_epoch, ending_epoch=args.epochs,\n frequency=1)\n msglogger.info('\\nStudent-Teacher knowledge distillation enabled:')\n msglogger.info('\\tTeacher Model: %s', args.kd_teacher)\n msglogger.info('\\tTemperature: %s', args.kd_temp)\n msglogger.info('\\tLoss Weights (distillation | student | teacher): %s',\n ' | '.join(['{:.2f}'.format(val) for val in dlw]))\n msglogger.info('\\tStarting from Epoch: %s', args.kd_start_epoch)\n\n\ndef early_exit_init(args):\n if not args.earlyexit_thresholds:\n return\n args.num_exits = len(args.earlyexit_thresholds) + 1\n args.loss_exits = [0] * args.num_exits\n args.losses_exits = []\n args.exiterrors = []\n msglogger.info('=> using early-exit threshold values of %s', args.earlyexit_thresholds)\n\n\nclass ClassifierCompressorSampleApp(classifier.ClassifierCompressor):\n def __init__(self, args, script_dir):\n super().__init__(args, script_dir)\n early_exit_init(args)\n # Save the randomly-initialized model before training (useful for lottery-ticket method)\n if args.save_untrained_model:\n ckpt_name = '_'.join((self.args.name or \"\", \"untrained\"))\n apputils.save_checkpoint(0, self.args.arch, self.model,\n name=ckpt_name, dir=msglogger.logdir)\n\n\n def handle_subapps(self):\n return handle_subapps(self.model, self.criterion, self.optimizer,\n self.compression_scheduler, self.pylogger, self.args)\n\n\ndef sensitivity_analysis(model, criterion, data_loader, loggers, args, sparsities):\n # This sample application can be invoked to execute Sensitivity Analysis on your\n # model. The ouptut is saved to CSV and PNG.\n msglogger.info(\"Running sensitivity tests\")\n if not isinstance(loggers, list):\n loggers = [loggers]\n test_fnc = partial(classifier.test, test_loader=data_loader, criterion=criterion,\n loggers=loggers, args=args,\n activations_collectors=classifier.create_activation_stats_collectors(model))\n which_params = [param_name for param_name, _ in model.named_parameters()]\n sensitivity = distiller.perform_sensitivity_analysis(model,\n net_params=which_params,\n sparsities=sparsities,\n test_func=test_fnc,\n group=args.sensitivity)\n distiller.sensitivities_to_png(sensitivity, os.path.join(msglogger.logdir, 'sensitivity.png'))\n distiller.sensitivities_to_csv(sensitivity, os.path.join(msglogger.logdir, 'sensitivity.csv'))\n\n\ndef greedy(model, criterion, optimizer, loggers, args):\n train_loader, val_loader, test_loader = classifier.load_data(args)\n\n test_fn = partial(classifier.test, test_loader=test_loader, criterion=criterion,\n loggers=loggers, args=args, activations_collectors=None)\n train_fn = partial(classifier.train, train_loader=train_loader, criterion=criterion, args=args)\n assert args.greedy_target_density is not None\n distiller.pruning.greedy_filter_pruning.greedy_pruner(model, args,\n args.greedy_target_density,\n args.greedy_pruning_step,\n test_fn, train_fn)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\n-- KeyboardInterrupt --\")\n except Exception as e:\n if msglogger is not None:\n # We catch unhandled exceptions here in order to log them to the log file\n # However, using the msglogger as-is to do that means we get the trace twice in stdout - once from the\n # logging operation and once from re-raising the exception. So we remove the stdout logging handler\n # before logging the exception\n handlers_bak = msglogger.handlers\n msglogger.handlers = [h for h in msglogger.handlers if type(h) != logging.StreamHandler]\n msglogger.error(traceback.format_exc())\n msglogger.handlers = handlers_bak\n raise\n finally:\n if msglogger is not None and hasattr(msglogger, 'log_filename'):\n msglogger.info('')\n msglogger.info('Log file for this run: ' + os.path.realpath(msglogger.log_filename))\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
watchsea/Trd-Capsule-TF | [
"7424ad73a0f67a12e20fdf475be27fffe53e2970"
] | [
"utils.py"
] | [
"import os\nimport scipy\nimport numpy as np\nimport tensorflow as tf\n\nfrom config import cfg\nimport rb_data\n\n\ndef load_mnist(path, is_training):\n if is_training:\n data = rb_data.read_data_sets(\"\",one_hot=False)\n trX = data.train.images\n trY = data.train.labels\n return trX, trY\n else:\n TEST_IMAGES = 'test_data/rb.HOT.15m(1).csv'\n data = rb_data.read_test_sets(TEST_IMAGES,one_hot=False)\n teX = data.images\n teY = data.labels\n return teX, teY\n\n\ndef get_batch_data():\n trX, trY = load_mnist(cfg.dataset, cfg.is_training)\n\n data_queues = tf.train.slice_input_producer([trX, trY])\n X, Y = tf.train.shuffle_batch(data_queues, num_threads=cfg.num_threads,\n batch_size=cfg.batch_size,\n capacity=cfg.batch_size * 64,\n min_after_dequeue=cfg.batch_size * 32,\n allow_smaller_final_batch=False)\n\n return(X, Y)\n\n\ndef save_images(imgs, size, path):\n '''\n Args:\n imgs: [batch_size, image_height, image_width]\n size: a list with tow int elements, [image_height, image_width]\n path: the path to save images\n '''\n imgs = (imgs + 1.) / 2 # inverse_transform\n return(scipy.misc.imsave(path, mergeImgs(imgs, size)))\n\n\ndef mergeImgs(images, size):\n h, w = images.shape[1], images.shape[2]\n imgs = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n imgs[j * h:j * h + h, i * w:i * w + w, :] = image\n\n return imgs\n\n\nif __name__ == '__main__':\n X, Y = load_mnist(cfg.dataset, cfg.is_training)\n print(X.get_shape())\n print(X.dtype)\n"
] | [
[
"numpy.zeros",
"tensorflow.train.shuffle_batch",
"tensorflow.train.slice_input_producer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
NelleV/grave | [
"19aec1eacb2bd3577fd2660f6199dcab6f8d97c5"
] | [
"examples/plot_grid.py"
] | [
"\"\"\"\nLabeled 2D Grid\n---------------\n\nThis example shows both labels and custom layout.\n\"\"\"\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nfrom grave import plot_network, style_merger\n\n\n\ndef degree_colorer(node_attributes):\n deg = node_attributes['degree']\n shape = 'o' #random.choice(['s', 'o', '^', 'v', '8'])\n return {'color': 'b', 'size': 20*deg, 'shape': shape}\n\ndef font_styler(attributes):\n return {'font_size': 8,\n 'font_weight': .5,\n 'font_color': 'k'}\n\ndef tiny_font_styler(attributes):\n return {'font_size': 4,\n 'font_weight': .5,\n 'font_color': 'r'}\n\ndef pathological_edge_style(edge_attrs):\n return {'color': random.choice(['r', (0, 1, 0, .5), 'xkcd:ocean'])}\n\n\nnetwork = nx.grid_2d_graph(4, 6)\n\nnx.set_node_attributes(network, dict(network.degree()), 'degree')\n\nfig, ax = plt.subplots()\nplot_network(network, ax=ax, layout=lambda G: {node: node for node in G},\n node_style=degree_colorer,\n edge_style=pathological_edge_style,\n node_label_style=font_styler,\n edge_label_style=tiny_font_styler)\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
melqkiades/machine-learning-framework | [
"25f88bfbce30ebe25ae1ce8830e1cb1945e16b8f"
] | [
"mini_projects/cat_in_the_dat/citd_create_folds.py"
] | [
"import pandas\nimport time\nfrom sklearn import model_selection\n\nfrom mini_projects.cat_in_the_dat.citd_constants import TRAINING_DATA_FOLDS, \\\n TRAINING_DATA\n\n\ndef create_folds():\n data_frame = pandas.read_csv(TRAINING_DATA)\n data_frame[\"kfold\"] = -1\n\n data_frame = data_frame.sample(frac=1).reset_index(drop=True)\n\n kf = model_selection.StratifiedKFold(n_splits=5, shuffle=False,\n random_state=42)\n\n for fold, (train_idx, val_idx) in enumerate(\n kf.split(X=data_frame, y=data_frame.target.values)):\n print(len(train_idx), len(val_idx))\n data_frame.loc[val_idx, 'kfold'] = fold\n\n data_frame.to_csv(TRAINING_DATA_FOLDS, index=False)\n\n\ndef main():\n create_folds()\n\n\nstart = time.time()\nmain()\nend = time.time()\ntotal_time = end - start\nprint(\"%s: Total time = %f seconds\" % (time.strftime(\"%Y/%m/%d-%H:%M:%S\"), total_time))\n"
] | [
[
"pandas.read_csv",
"sklearn.model_selection.StratifiedKFold"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
szj2ys/deal_with_the_tasks_and_challenges | [
"94b9f4aad26c7e2ec5a59cf67e9e977bfa3d5221"
] | [
"demos/pandas/panda_merge.py"
] | [
"import pandas as pd\n\n#定义资料集\nleft = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']\n})\nright = pd.DataFrame({\n 'key': ['K0', 'K1', 'K2', 'K3'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']\n})\n\n#依据key横向合并\nres = pd.merge(left, right, on='key')\n\nprint(res)\n\n#定义资料集\nleft = pd.DataFrame({\n 'key1': ['K0', 'K0', 'K1', 'K2'],\n 'key2': ['K0', 'K1', 'K0', 'K1'],\n 'A': ['A0', 'A1', 'A2', 'A3'],\n 'B': ['B0', 'B1', 'B2', 'B3']\n})\nright = pd.DataFrame({\n 'key1': ['K0', 'K1', 'K1', 'K2'],\n 'key2': ['K0', 'K0', 'K0', 'K0'],\n 'C': ['C0', 'C1', 'C2', 'C3'],\n 'D': ['D0', 'D1', 'D2', 'D3']\n})\n\n#依据key1与key2 columns进行合并,并打印出四种结果['left', 'right', 'outer', 'inner']\nres = pd.merge(left, right, on=['key1', 'key2'], how='inner')\nprint(res)\n\nres = pd.merge(left, right, on=['key1', 'key2'], how='outer')\nprint(res)\n\nres = pd.merge(left, right, on=['key1', 'key2'], how='left')\nprint(res)\n\nres = pd.merge(left, right, on=['key1', 'key2'], how='right')\nprint(res)\n\n#定义资料集\ndf1 = pd.DataFrame({'col1': [0, 1], 'col_left': ['a', 'b']})\ndf2 = pd.DataFrame({'col1': [1, 2, 2], 'col_right': [2, 2, 2]})\n\n# 依据col1进行合并,并启用indicator=True\nres = pd.merge(df1, df2, on='col1', how='outer', indicator=True)\nprint(res)\n\n# 自定indicator column的名称\nres = pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')\nprint(res)\n"
] | [
[
"pandas.merge",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
fred3m/scarlet_test | [
"a33c6ae43e041c55353d1fcbed2d7a9533c197b5"
] | [
"scarlet_test/measure.py"
] | [
"import os\nfrom typing import List, Sequence, Dict, Tuple, Union\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker as mticker\n\nfrom .core import __DATA_PATH__, get_filename, get_branches\n\n\ndef adjacent_values(vals: np.ndarray, q1: int, q3: int) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get adjacent values for whiskers\n\n :param vals: The array that is being plotted\n :param q1: The lower quartile\n :param q3: The upper quartile\n :return: lower whisker and upper whisker value\n \"\"\"\n upper_adjacent_value = q3 + (q3 - q1) * 1.5\n upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])\n\n lower_adjacent_value = q1 - (q3 - q1) * 1.5\n lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)\n return lower_adjacent_value, upper_adjacent_value\n\n\ndef measure_blend(\n data: Dict[str, np.ndarray],\n sources: List,\n filters: Sequence[str],\n) -> List[Dict[str, float]]:\n \"\"\"\n Measure all of the fake sources in a single blend\n\n :param data: The numpy file with blend data\n :param sources: The sources in the blend\n :param filters: The filter name for each band\n :return: List of measurements for each matched source\n \"\"\"\n import scarlet.measure\n\n # Extract necessary fields from the data\n centers = data[\"centers\"]\n matched = data[\"matched\"]\n matched_centers = np.array([[m[\"y\"], m[\"x\"]] for m in matched]).astype(int)\n\n true_flux = np.array([matched[f + \"magVar\"] for f in filters])\n\n measurements = []\n for k, (cy, cx) in enumerate(matched_centers):\n # Get the matching index for the source based on its center\n matched_idx = np.where((centers[:, 0] == cy) & (centers[:, 1] == cx))[0][0]\n\n # Calculate the flux difference in each band\n source = sources[matched_idx]\n flux = 27 - 2.5*np.log10(scarlet.measure.flux(source))\n\n diff = true_flux[:, k] - flux\n\n measurement = {filters[f] + \" diff\": diff[f] for f in range(len(filters))}\n measurements.append(measurement)\n\n return measurements\n\n\ndef check_log(data: np.ndarray, ax: plt.axis):\n \"\"\"Check to see if the data should use a log scale\n\n :param data: array that is being plotted\n :param ax: The axis that contains the plot\n :return: Whether or not to use a log scale\n \"\"\"\n _data = np.log10(data)\n ymin, ymax = np.min(_data), np.max(_data)\n # Use a log scale if the range is more than 2 orders of magnitude\n if ymax - ymin > 2:\n ymin = int(np.max([1e-50, ymin - 1]))\n ymax = int(ymax+1)\n ax.yaxis.set_major_formatter(mticker.StrMethodFormatter(\"$10^{{{x:.0f}}}$\"))\n ax.yaxis.set_ticks([\n np.log10(x) for p in range(ymin, ymax)\n for x in np.linspace(10 ** p, 10 ** (p + 1), 10)], minor=True)\n return True\n return False\n\n\nclass Metric:\n \"\"\"A metric to be calculated based on a set of deblended sources\n \"\"\"\n def __init__(\n self,\n name: str,\n units: str,\n ):\n \"\"\"Initialize the class\n\n :param name: Name of the metric.\n :param units: Units of the metric.\n :param use_abs: Whether or not this metric is an absolute value\n \"\"\"\n self.name = name\n self.units = units\n\n def plot(\n self,\n set_id: str,\n measurements: Dict[str, np.rec.recarray] = None,\n plot_indices: Sequence = None,\n scatter_indices: Sequence = None,\n ) -> plt.Figure:\n \"\"\"Create a plot using the records for a given set ID.\n\n :param set_id: ID of the set to analyze\n :param measurements: Dictionary (branch name, measurments)\n of measurements for each branch.\n :param plot_indices: The indices or slice of `measurements`\n to plot. If `plot_indices` is `None` then only the\n 10 latest branches are used.\n :param scatter_indices: The indices or slice of `measurements`\n to include in the scatter plot. If `scatter_indices` is `None`\n then only the last two branches are plotted.\n \"\"\"\n if measurements is None:\n branches = get_branches()\n measurements = {\n branch: np.load(os.path.join(__DATA_PATH__, set_id, get_filename(branch)))[\"records\"]\n for branch in branches\n }\n if plot_indices is None:\n plot_indices = slice(-10, None)\n if scatter_indices is None:\n scatter_indices = slice(-2, None)\n\n # First display the scatter plots\n fig, ax = plt.subplots(1, 3, figsize=(15, 5))\n records = {m: measurements[m] for m in list(measurements.keys())[scatter_indices]}\n num_prs = len(records)\n\n # Check to see if we need to plot a log axis\n islog = False\n for rec, (branch, record) in enumerate(records.items()):\n islog |= check_log(record[self.name], ax[2])\n\n # Display the scatter plot for each PR\n for rec, (pr, record) in enumerate(records.items()):\n x = np.arange(len(record[self.name]))\n if islog:\n data = np.log10(record[self.name])\n else:\n data = record[self.name]\n ax[2].scatter(x, data, label=pr, s=10 * (num_prs - rec))\n ax[2].legend()\n ax[2].set_xlabel(\"blend index\")\n\n # Next create the violin and box plots\n records = {m: measurements[m] for m in list(measurements.keys())[plot_indices]}\n\n for ax_n, plot_type in enumerate([\"box\", \"violin\"]):\n # Extract the data\n x = np.arange(len(records))\n data = []\n for s, (pr, record) in enumerate(records.items()):\n data.append(record[self.name])\n\n # Check if we need a log plot\n islog = check_log(data, ax[ax_n])\n if islog:\n data = [np.log10(d) for d in data]\n\n if plot_type == \"violin\":\n # Make the violin plot\n ax[ax_n].violinplot(data, x, showmeans=False, showextrema=False, showmedians=False)\n\n # Calculate the quartile whiskers\n quartile1, medians, quartile3 = np.percentile(data, [25, 50, 75], axis=1)\n whiskers = np.array([\n adjacent_values(sorted_array, q1, q3)\n for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])\n whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]\n # Display the whiskers\n ax[ax_n].scatter(x, medians, marker='o', color='white', s=30, zorder=3)\n ax[ax_n].vlines(x, quartile1, quartile3, color='k', linestyle='-', lw=5)\n ax[ax_n].vlines(x, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)\n else:\n # Make the box plot\n ax[ax_n].boxplot(data)\n\n x_labels = tuple(records.keys())\n ax[1].xaxis.set_ticks(np.arange(len(x_labels)))\n ax[0].set_xticklabels(x_labels, size='small', rotation='vertical')\n ax[1].set_xticklabels(x_labels, size='small', rotation='vertical')\n\n ax[0].set_ylabel(self.units)\n fig.suptitle(self.name, y=.95)\n plt.tight_layout()\n\n return fig\n\n\n# All of the metrics that are stored and plotted for regression testing\nall_metrics = {\n \"init time\": Metric(\"init time\", \"time (ms)\"),\n \"runtime\": Metric(\"runtime\", \"time/source (ms)\"),\n \"iterations\": Metric(\"iterations\", \"iterations\"),\n \"init logL\": Metric(\"init logL\", \"logL\"),\n \"logL\": Metric(\"logL\", \"logL\"),\n \"g diff\": Metric(\"g diff\", \"truth-model\"),\n \"r diff\": Metric(\"r diff\", \"truth-model\"),\n \"i diff\": Metric(\"i diff\", \"truth-model\"),\n \"z diff\": Metric(\"z diff\", \"truth-model\"),\n \"y diff\": Metric(\"y diff\", \"truth-model\"),\n}\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.ticker.StrMethodFormatter",
"numpy.linspace",
"numpy.min",
"numpy.clip",
"matplotlib.pyplot.subplots",
"numpy.percentile",
"numpy.max",
"numpy.log10",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MatD3mons/Multi_Face_Detector | [
"06c335856bd1ed45cfa1b620032f3fafbcc2baae"
] | [
"Train/python/coco_utils.py"
] | [
"import copy\nimport os\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data\nimport torchvision\n\nfrom pycocotools import mask as coco_mask\nfrom pycocotools.coco import COCO\n\nimport python.transforms as T\n\n\nclass FilterAndRemapCocoCategories(object):\n def __init__(self, categories, remap=True):\n self.categories = categories\n self.remap = remap\n\n def __call__(self, image, target):\n anno = target[\"annotations\"]\n anno = [obj for obj in anno if obj[\"category_id\"] in self.categories]\n if not self.remap:\n target[\"annotations\"] = anno\n return image, target\n anno = copy.deepcopy(anno)\n for obj in anno:\n obj[\"category_id\"] = self.categories.index(obj[\"category_id\"])\n target[\"annotations\"] = anno\n return image, target\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\nclass ConvertCocoPolysToMask(object):\n def __call__(self, image, target):\n w, h = image.size\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n\n anno = [obj for obj in anno if obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n boxes[:, 0::2].clamp_(min=0, max=w)\n boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n boxes = boxes[keep]\n classes = classes[keep]\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] for obj in anno])\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n return image, target\n\n\ndef _coco_remove_images_without_annotations(dataset, cat_list=None):\n def _has_only_empty_bbox(anno):\n return all(any(o <= 1 for o in obj[\"bbox\"][2:]) for obj in anno)\n\n def _count_visible_keypoints(anno):\n return sum(sum(1 for v in ann[\"keypoints\"][2::3] if v > 0) for ann in anno)\n\n min_keypoints_per_image = 10\n\n def _has_valid_annotation(anno):\n # if it's empty, there is no annotation\n if len(anno) == 0:\n return False\n # if all boxes have close to zero area, there is no annotation\n if _has_only_empty_bbox(anno):\n return False\n # keypoints task have a slight different critera for considering\n # if an annotation is valid\n if \"keypoints\" not in anno[0]:\n return True\n # for keypoint detection tasks, only consider valid images those\n # containing at least min_keypoints_per_image\n if _count_visible_keypoints(anno) >= min_keypoints_per_image:\n return True\n return False\n\n assert isinstance(dataset, torchvision.datasets.CocoDetection)\n ids = []\n for ds_idx, img_id in enumerate(dataset.ids):\n ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = dataset.coco.loadAnns(ann_ids)\n if cat_list:\n anno = [obj for obj in anno if obj[\"category_id\"] in cat_list]\n if _has_valid_annotation(anno):\n ids.append(ds_idx)\n\n dataset = torch.utils.data.Subset(dataset, ids)\n return dataset\n\n\ndef convert_to_coco_api(ds):\n coco_ds = COCO()\n # annotation IDs need to start at 1, not 0, see torchvision issue #1530\n ann_id = 1\n dataset = {'images': [], 'categories': [], 'annotations': []}\n categories = set()\n for img_idx in range(len(ds)):\n # find better way to get target\n # targets = ds.get_annotations(img_idx)\n img, targets = ds[img_idx]\n image_id = targets[\"image_id\"].item()\n img_dict = {}\n img_dict['id'] = image_id\n img_dict['height'] = img.shape[-2]\n img_dict['width'] = img.shape[-1]\n dataset['images'].append(img_dict)\n bboxes = targets[\"boxes\"]\n bboxes[:, 2:] -= bboxes[:, :2]\n bboxes = bboxes.tolist()\n labels = targets['labels'].tolist()\n areas = targets['area'].tolist()\n iscrowd = targets['iscrowd'].tolist()\n if 'masks' in targets:\n masks = targets['masks']\n # make masks Fortran contiguous for coco_mask\n masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)\n if 'keypoints' in targets:\n keypoints = targets['keypoints']\n keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()\n num_objs = len(bboxes)\n for i in range(num_objs):\n ann = {}\n ann['image_id'] = image_id\n ann['bbox'] = bboxes[i]\n ann['category_id'] = labels[i]\n categories.add(labels[i])\n ann['area'] = areas[i]\n ann['iscrowd'] = iscrowd[i]\n ann['id'] = ann_id\n if 'masks' in targets:\n ann[\"segmentation\"] = coco_mask.encode(masks[i].numpy())\n if 'keypoints' in targets:\n ann['keypoints'] = keypoints[i]\n ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])\n dataset['annotations'].append(ann)\n ann_id += 1\n dataset['categories'] = [{'id': i} for i in sorted(categories)]\n coco_ds.dataset = dataset\n coco_ds.createIndex()\n return coco_ds\n\n\ndef get_coco_api_from_dataset(dataset):\n for _ in range(10):\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n break\n if isinstance(dataset, torch.utils.data.Subset):\n dataset = dataset.dataset\n if isinstance(dataset, torchvision.datasets.CocoDetection):\n return dataset.coco\n return convert_to_coco_api(dataset)\n\n\nclass CocoDetection(torchvision.datasets.CocoDetection):\n def __init__(self, img_folder, ann_file, transforms):\n super(CocoDetection, self).__init__(img_folder, ann_file)\n self._transforms = transforms\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n target = dict(image_id=image_id, annotations=target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n\ndef get_coco(root, image_set, transforms, mode='instances'):\n anno_file_template = \"{}_{}2017.json\"\n PATHS = {\n \"train\": (\"train2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"train\"))),\n \"val\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\"))),\n # \"train\": (\"val2017\", os.path.join(\"annotations\", anno_file_template.format(mode, \"val\")))\n }\n\n t = [ConvertCocoPolysToMask()]\n\n if transforms is not None:\n t.append(transforms)\n transforms = T.Compose(t)\n\n img_folder, ann_file = PATHS[image_set]\n img_folder = os.path.join(root, img_folder)\n ann_file = os.path.join(root, ann_file)\n\n dataset = CocoDetection(img_folder, ann_file, transforms=transforms)\n\n if image_set == \"train\":\n dataset = _coco_remove_images_without_annotations(dataset)\n\n # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)])\n\n return dataset\n\n\ndef get_coco_kp(root, image_set, transforms):\n return get_coco(root, image_set, transforms, mode=\"person_keypoints\")\n"
] | [
[
"torch.zeros",
"torch.tensor",
"torch.as_tensor",
"torch.utils.data.Subset",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
truher/demos | [
"6fe9a325931519f04b121dc795c0ec367e84c0f7"
] | [
"interpolation_performance.py"
] | [
"import timeit\nimport numpy as np\nfrom typing import List, Iterable\nimport itertools\nimport collections\nfrom scipy.interpolate import interp1d #type:ignore\n\nloops=1000\n\n# data\nd = np.linspace(0,999,1000) \nfp = list(d)\nxp = np.linspace(0,999,1000)\n# interp x axis (in-between points)\nx = np.linspace(0,999,1999)\n\n# 177 us, in between points\ndef f1() -> List[np.float64]:\n return list(np.interp(x, xp, fp)) #type:ignore\nt = timeit.timeit(f1,number=loops)\nprint(f'f1 interp {1e6*t/loops} us')\n\n# 175us in between points\ndef f1a() -> List[np.float64]:\n ff = interp1d(xp, d)\n return list(ff(x))\nt = timeit.timeit(f1a,number=loops)\nprint(f'f1a interp1d {1e6*t/loops} us')\n\n# interp x axis (trailing points)\nxt = np.linspace(0,999.5,2000)\n# 177 us, constant extrapolation (repeat last)\ndef f1t() -> List[np.float64]:\n return list(np.interp(xt, xp, fp)) #type:ignore\nt = timeit.timeit(f1t,number=loops)\nprint(f'f1t interp repeat last {1e6*t/loops} us')\n\n# does not work off the end without fill_value\n# linear extrapolation\n# 222 us (slow!)\ndef f1ta() -> List[np.float64]:\n ff = interp1d(xp, d, fill_value='extrapolate')\n return list(ff(xt))\nt = timeit.timeit(f1ta,number=loops)\nprint(f'f1ta interp1d extrapolation {1e6*t/loops} us')\n\n\ninterp = f1()\n\n# very slow, 13 us\ndef f2() -> List[np.float64]:\n return list(itertools.chain([interp[0]],interp))\n\nt = timeit.timeit(f2,number=loops)\nprint(f'f2 chain {1e6*t/loops} us')\n\n# slow, 3 us\ndef f3() -> List[np.float64]:\n foo = [interp[0]]\n foo.extend(interp)\n return foo\n\nt = timeit.timeit(f3,number=loops)\nprint(f'f3 extend {1e6*t/loops} us')\n\n# 0.1us\ndef f4() -> List[np.float64]:\n foo = interp\n foo.append(interp[0])\n return foo\n\nt = timeit.timeit(f4,number=loops)\nprint(f'f4 append {1e6*t/loops} us')\n\n# winner, 1us\ndef f5() -> List[np.float64]:\n foo = interp\n foo.insert(0, interp[0])\n return foo\n\nt = timeit.timeit(f5,number=loops)\nprint(f'f5 insert {1e6*t/loops} us')\n\n###########################33\n# pop\nd6 = d.copy()\n# 0.2us\ndef f6() -> List[np.float64]:\n fd6 = list(d6)\n fd6.pop(0)\n return fd6\n\nt = timeit.timeit(f6,number=loops)\nprint(f'f6 pop {1e6*t/loops} us')\n\n###########################33\n# slice\nd7 = d.copy()\n# 1.6us\ndef f7():\n # type: () -> np.ndarray[np.float64]\n return d7[1:]\n\nt = timeit.timeit(f7,number=loops)\nprint(f'f7 slice {1e6*t/loops} us')\n\nd8:List[np.float64] = list(d.copy())\n# 72us materialize and pop\ndef f8() -> np.float64:\n return list(itertools.accumulate(d8)).pop(0)\n\nt = timeit.timeit(f8,number=loops)\nprint(f'f8 pop {1e6*t/loops} us')\n\nd9:List[np.float64] = list(d.copy())\n# 76us islice and materialize (about the same)\ndef f9() -> Iterable[float]:\n return list(itertools.islice(itertools.accumulate(d9), 1, None))\n\nt = timeit.timeit(f9,number=loops)\nprint(f'f9 islice {1e6*t/loops} us')\n"
] | [
[
"scipy.interpolate.interp1d",
"numpy.interp",
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
EverLookNeverSee/diag2model | [
"cba4f98483ada5fb72d00900f68df04245c7c7db"
] | [
"diagtomodel/xception/xception.py"
] | [
"\"\"\"\n An implementation of the Xception model architecture.\n Paper: https://arxiv.org/abs/1610.02357\n Author: Prakhar Srivastava - @prkhrsrvstv1\n\"\"\"\n\n\nfrom typing import Tuple\nfrom tensorflow.keras.models import Model\nfrom tensorflow.python.types.core import Tensor\nfrom tensorflow.keras.layers import (Activation, Add, BatchNormalization, Conv2D, Dense, GlobalAveragePooling2D, Input,\n MaxPool2D, SeparableConv2D)\n\n\ndef convolutional_unit(conv_inputs: Tensor, num_filters: int, kernel_size: Tuple[int, int],\n strides: Tuple[int, int] = (1, 1), pre_activation: bool = False, post_activation: bool = True,\n conv_layer: str = \"Conv2D\") -> Tensor:\n \"\"\"\n Convolutional Unit\n Passes the input tensor through a convolutional layer, and a batch-normalization layer.\n Also performs any combination of pre/post relu activation, depending on parameters passed.\n :param conv_inputs: Input tensor\n :param num_filters: Number of filters to use in the convolutional layer\n :param kernel_size: Kernel size for the convolutional layer\n :param strides: Strides size for the convolutional layer\n :param pre_activation: Whether or not to perform activation before the convolutional layer, False by default.\n :param post_activation: Whether or not to perform activation after the batch-normalization layer, True by default.\n :param conv_layer: The type of Convolutional layer to use. Should be either Conv2D or SeparableConv2D\n :return: A 4+D Tensor obtained after passing through activation, convolutional, and batch-normalization layers\n \"\"\"\n if conv_layer not in [\"Conv2D\", \"SeparableConv2D\"]:\n raise ValueError(f\"conv_layer must be either Conv2D or SeparableConv2D. Found {conv_layer}\")\n\n if pre_activation:\n conv_inputs = Activation(\"relu\")(conv_inputs)\n conv_outputs = conv_inputs\n\n if conv_layer == \"Conv2D\":\n conv_outputs = Conv2D(filters=num_filters, kernel_size=kernel_size, strides=strides, padding=\"same\",\n use_bias=False)(conv_inputs)\n elif conv_layer == \"SeparableConv2D\":\n conv_outputs = SeparableConv2D(filters=num_filters, kernel_size=kernel_size, strides=strides, padding=\"same\",\n use_bias=False)(conv_inputs)\n\n conv_outputs = BatchNormalization()(conv_outputs)\n\n if post_activation:\n conv_outputs = Activation(\"relu\")(conv_outputs)\n return conv_outputs\n\n\ndef separable_convolutional_unit(sep_conv_inputs: Tensor, num_filters: int, pre_activation: bool = True,\n post_activation: bool = False) -> Tensor:\n \"\"\"\n Separable Convolutional Unit\n Uses the Convolutional Unit (`convolutional_unit`) function to pass the input tensor through a Separable\n Convolutional layer. Also performs any combination of pre/post relu activation, depending on parameters passed.\n :param sep_conv_inputs: sep_conv_inputs: Input tensor\n :param num_filters: Number of filters to use in the convolutional layer\n :param pre_activation: Whether or not to perform activation before the convolutional layer, True by default.\n :param post_activation: Whether or not to perform activation after the batch-normalization layer. False by default.\n :return: A 4+D Tensor obtained after passing through activation, convolutional, and batch-normalization layers\n \"\"\"\n return convolutional_unit(sep_conv_inputs, num_filters, (3, 3), pre_activation=pre_activation,\n post_activation=post_activation, conv_layer=\"SeparableConv2D\")\n\n\ndef entry_flow(entry_inputs: Input) -> Tensor:\n \"\"\"\n Entry flow\n Implements the first of the three broad parts of the model\n :param entry_inputs: Input tensor of shape [*, rows, cols, channels]\n :return: Output tensor of shape [*, new_rows, new_cols, 728]\n \"\"\"\n # Block 2 (Red)\n entry_outputs = convolutional_unit(entry_inputs, 32, (3, 3), (2, 2))\n entry_outputs = convolutional_unit(entry_outputs, 64, (3, 3))\n\n # Block 3 - Conv A (Yellow)\n for num_filters in [128, 256, 728]:\n res = convolutional_unit(entry_outputs, num_filters, (1, 1), (2, 2), post_activation=False)\n for _ in range(2):\n entry_outputs = separable_convolutional_unit(entry_outputs, num_filters)\n entry_outputs = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding=\"same\")(entry_outputs)\n entry_outputs = Add()([res, entry_outputs])\n return entry_outputs\n\n\ndef middle_flow(middle_inputs: Tensor) -> Tensor:\n \"\"\"\n Middle flow\n Implements the second of the three broad parts of the model\n :param middle_inputs: middle_inputs: Tensor output generate by the Entry Flow,\n having shape [*, new_rows, new_cols, 728]\n :return: Output tensor of shape [*, new_rows, new_cols, 728]\n \"\"\"\n # Block 4 - Conv B (Green)\n middle_outputs = middle_inputs\n for _ in range(8):\n res = middle_outputs\n for _ in range(3):\n middle_outputs = separable_convolutional_unit(middle_outputs, 728)\n middle_outputs = Add()([res, middle_outputs])\n return middle_outputs\n\n\ndef exit_flow(exit_inputs: Tensor) -> Tensor:\n \"\"\"\n Exit flow\n Implements the second of the three broad parts of the model. Includes the optional fully-connected layers,\n and the logistic regression segment of the model.\n :param exit_inputs: Tensor output generated by the Middle Flow segment, having shape [*, new_rows, new_cols, 728]\n :return: Output tensor of shape [*, 1000], representing the classifier output for 1000 classes\n \"\"\"\n # Block 5 - Conv C (Orange)\n res = convolutional_unit(exit_inputs, 1024, (1, 1), (2, 2), post_activation=False)\n exit_outputs = exit_inputs\n for num_filters in [728, 1024]:\n exit_outputs = separable_convolutional_unit(exit_outputs, num_filters)\n exit_outputs = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding=\"same\")(exit_outputs)\n exit_outputs = Add()([res, exit_outputs])\n for num_filters in [1536, 2048]:\n exit_outputs = separable_convolutional_unit(exit_outputs, num_filters, pre_activation=False,\n post_activation=True)\n\n # Block 6 Global Average Pool (Gray)\n exit_outputs = GlobalAveragePooling2D()(exit_outputs)\n\n # Optional fully-connected layer(s) (Blue)\n exit_outputs = Dense(units=2048)(exit_outputs)\n exit_outputs = Activation(\"relu\")(exit_outputs)\n\n # Logistic regression (Blue)\n exit_outputs = Dense(units=1000)(exit_outputs)\n exit_outputs = Activation(\"softmax\")(exit_outputs)\n\n return exit_outputs\n\n\n# Defining the input layer with images of shape 299 x 299 x 3\nmodel_inputs = Input(shape=(299, 299, 3))\n# Pass through the entry flow\nintermediate_outputs = entry_flow(model_inputs)\n# Pass through the middle flow\nintermediate_outputs = middle_flow(intermediate_outputs)\n# Pass through the exit flow\nmodel_outputs = exit_flow(intermediate_outputs)\n\nmodel = Model(inputs=model_inputs, outputs=model_outputs, name=\"Xception\")\n\nif __name__ == '__main__':\n model.summary()\n"
] | [
[
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.SeparableConv2D",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
1313e/e13Tools | [
"5a068dedb73fa063699745220f9ed1bf48c7bcc4"
] | [
"e13tools/tests/test_math.py"
] | [
"# -*- coding: utf-8 -*-\n\n# %% IMPORTS\n# Package imports\nimport numpy as np\nimport pytest\n\n# e13Tools imports\nfrom e13tools import ShapeError\nfrom e13tools.math import gcd, is_PD, lcm, nCr, nearest_PD, nPr\n\n\n# %% PYTEST FUNCTIONS\n# Do default test for gcd()-function\ndef test_gcd():\n assert gcd(18, 60, 72, 138) == 6\n\n\n# Pytest class for the is_PD()-function\nclass Test_is_PD(object):\n # Test if real PD matrix returns True\n def test_real_PD_matrix(self):\n mat = np.eye(3)\n assert is_PD(mat)\n\n # Test if real non-PD matrix returns False\n def test_real_non_PD_matrix(self):\n mat = np.array([[1, 2.5], [2.5, 4]])\n assert not is_PD(mat)\n\n # Test if complex PD matrix returns True\n def test_complex_PD_matrix(self):\n mat = np.array([[4, 1.5+1j], [1.5-1j, 3]])\n assert is_PD(mat)\n\n # Test if using vector raises an error\n def test_vector(self):\n vec = np.array([7, 8, 9])\n with pytest.raises(ShapeError):\n is_PD(vec)\n\n # Test if using a non-square matrix raises an error\n def test_non_square_matrix(self):\n mat = np.array([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(ShapeError):\n is_PD(mat)\n\n # Test if non-Hermitian matrix raises an error\n def test_non_Hermitian_matrix(self):\n mat = np.array([[1, 2], [3, 4]])\n with pytest.raises(ValueError):\n is_PD(mat)\n\n\n# Do default test for lcm()-function\ndef test_lcm():\n assert lcm(8, 9, 21) == 504\n\n\n# Pytest class for nCr()-function\nclass Test_nCr(object):\n # Test for repeat = False\n def test_no_repeat(self):\n assert nCr(4, 0) == 1\n assert nCr(4, 1) == 4\n assert nCr(4, 2) == 6\n assert nCr(4, 3) == 4\n assert nCr(4, 4) == 1\n assert nCr(4, 5) == 0\n\n # Test for repeat = True\n def test_with_repeat(self):\n assert nCr(4, 0, repeat=True) == 1\n assert nCr(4, 1, repeat=True) == 4\n assert nCr(4, 2, repeat=True) == 10\n assert nCr(4, 3, repeat=True) == 20\n assert nCr(4, 4, repeat=True) == 35\n assert nCr(4, 5, repeat=True) == 56\n\n\n# Pytest class for nearest_PD()-function\nclass Test_nearest_PD(object):\n # Test if using a real PD matrix returns the matrix\n def test_real_PD_matrix(self):\n mat = np.eye(3)\n assert is_PD(mat)\n assert np.allclose(nearest_PD(mat), mat)\n\n # Test if using a real non-PD matrix converts it into a PD matrix\n def test_real_non_PD_matrix(self):\n mat = np.array([[1, 2], [3, 4]])\n with pytest.raises(ValueError):\n is_PD(mat)\n mat_PD = nearest_PD(mat)\n assert is_PD(mat_PD)\n assert np.allclose(mat_PD, np.array([[1.31461828, 2.32186616],\n [2.32186616, 4.10085767]]))\n\n # Test if using a complex non-PD matrix converts it into a PD matrix\n def test_complex_non_PD_matrix(self):\n mat = np.array([[4, 2+1j], [1+3j, 3]])\n mat_PD = nearest_PD(mat)\n assert is_PD(mat_PD)\n assert np.allclose(mat_PD, np.array([[4.0+0.j, 1.5-1.j],\n [1.5+1.j, 3.0+0.j]]))\n\n # Test if using vector raises an error\n def test_vector(self):\n vec = np.array([7, 8, 9])\n with pytest.raises(ShapeError):\n nearest_PD(vec)\n\n # Test if using a non-square matrix raises an error\n def test_non_square_matrix(self):\n mat = np.array([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(ShapeError):\n nearest_PD(mat)\n\n\n# Pytest class for nPr()-function\nclass Test_nPr(object):\n # Test for repeat = False\n def test_no_repeat(self):\n assert nPr(4, 0) == 1\n assert nPr(4, 1) == 4\n assert nPr(4, 2) == 12\n assert nPr(4, 3) == 24\n assert nPr(4, 4) == 24\n assert nPr(4, 5) == 0\n\n # Test for repeat = True\n def test_with_repeat(self):\n assert nPr(4, 0, repeat=True) == 1\n assert nPr(4, 1, repeat=True) == 4\n assert nPr(4, 2, repeat=True) == 16\n assert nPr(4, 3, repeat=True) == 64\n assert nPr(4, 4, repeat=True) == 256\n assert nPr(4, 5, repeat=True) == 1024\n"
] | [
[
"numpy.eye",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhfeng18/Schulich-AI2-Bitcoin-Predictions | [
"27bdb6a39b987cbab1376ac743141415cacd3d87"
] | [
"AI02hw2.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# <h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n# <div class=\"toc\"><ul class=\"toc-item\"></ul></div>\n\n# In[1]:\n\n\nfrom pandas import read_csv\nfrom matplotlib import pyplot\nfrom pandas_datareader import data\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom statsmodels.tsa.stattools import adfuller\nfrom sklearn.model_selection import train_test_split\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom sklearn.metrics import f1_score\nimport math\nfrom sklearn.metrics import mean_squared_error\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom pmdarima.arima import auto_arima\nfrom pandas.plotting import autocorrelation_plot\nfrom joblib import dump, load\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom pytrends.request import TrendReq\nfrom datetime import datetime\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[2]:\n\n\n# loads and prprocesses the data (I'm using the daily one)\n# and X2 Google Trends data\ndef preprocess(data_file):\n global x_train\n global x_valid\n global x2\n \n # read csv file\n series = pd.read_csv(data_file)\n \n # use 'close'\n # fill the missing by replacing them with the latest available price\n series['close'] = series['close'].ffill()\n\n # percentage change differencing\n x = np.diff(series['close']) / series['close'][:-1]\n \n N = series.shape[0]\n split_t = int(N * 0.8) # 0.8 to training & 0.2 to validation\n x_train = x[:split_t].to_numpy() # first 0.8\n x_valid = x[split_t:].to_numpy() # last 0.2\n \n \n # Google Trends bitcoin\n # Eastern standard time UTC-05 in minutes -300\n # I want '2018-03-10' to '2019-10-23' to match hw1 predictions (because of differencing)\n # Google Trends weekly data captures every Sunday\n # '2018-03-10' is Saturday, use '2018-03-04' and drop the first 6 obs\n # '2019-10-23' is Wednesday, use '2019-10-27' and drop the last 5 obs\n start_date = '2018-03-04'\n end_date = '2019-10-27'\n pytrends = TrendReq(hl='en-US', tz=-300)\n kw_list=['bitcoin']\n pytrends.build_payload(kw_list, cat=0, timeframe=start_date + ' ' + end_date, geo='', gprop='')\n bitcoin_trends = pytrends.interest_over_time()\n # resampling to daily\n bitcoin_daily = bitcoin_trends.resample('1d').interpolate(method='linear').drop(['isPartial'], axis='columns')\n bitcoin_daily = bitcoin_daily[6:len(bitcoin_daily)-4]\n x2 = bitcoin_daily['bitcoin'].to_numpy()\n x2 = np.diff(x2) / x2[:-1] # differencing\n \n return x_train, x_valid, x2\n # x_train = x1, x_valid = y\n\n\n# In[3]:\n\n\n# input_file\n# x1, y, x2 = preprocess('BTCUSD_1d.csv')\n\n\n\n# In[4]:\n\n\ndef model(x_train, x_valid, order=(2, 0, 2)):\n global y1_hat\n forecasts = np.zeros(x_valid.shape)\n history = list(x_train)\n\n for i in range(x_valid.shape[0]):\n\n model = ARIMA(history, order=order)\n model_fit = model.fit(disp=0)\n y_hat = model_fit.forecast()[0]\n forecasts[i] = y_hat\n y = x_valid[i]\n history.append(y)\n if (i % 20) == 0:\n print('predicted=%f, expected=%f' % (y_hat, y))\n\n y1_hat = forecasts\n return y1_hat\n\n\n# In[5]:\n\n\n# y1_hat = model(x1, y)\n\n\n# In[4]:\n\n\n# hw1_forecast = pd.read_csv('arima_forecasts.csv')\n# y1_hat = hw1_forecast['forecast'].to_numpy()\n\n\n# In[25]:\n\n\ndef combiner(y1_hat, x2, filename_comb=('ai2_reg1.joblib')):\n global y2_valid\n global y2_hat\n reg = load(filename_comb)\n \n y1_hat = y1_hat.reshape(len(y1_hat), 1)\n x2 = x2.reshape(len(x2), 1)\n \n # linear regression independent variable array\n iv = np.concatenate((y1_hat, x2), axis=1)\n \n# N = iv.shape[0] # 592\n# split_t = int(N * 0.8) # value = 473, 0.8 to training & 0.2 to validation\n# iv_train = iv[:split_t] # first 0.8, from index 0 to 472\n# iv_valid = iv[split_t:] # last 0.2, from index 473\n# y2_train = y[:split_t] # len=473\n# y2_valid = y[split_t:] # len=119\n# reg = LinearRegression().fit(iv_train, y2_train)\n \n y2_hat = reg.predict(iv)\n \n return y2_hat\n\n\n# In[26]:\n\n\n# y2_hat = combiner(y1_hat, x2)\n# mse_y2 = mean_squared_error(y, y2_hat)\n# print('validation MSE: %.9f' % mse_y2)\n# # validation MSE: 0.001391665\n"
] | [
[
"numpy.concatenate",
"pandas.read_csv",
"numpy.zeros",
"numpy.diff"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
crazzle/production-ready-ds | [
"783b37b12b69591eda32d2d57e08ce9f71119940"
] | [
"01_classification_pipeline.py"
] | [
"from luigi.contrib.spark import PySparkTask\nfrom luigi.parameter import IntParameter, DateParameter\nfrom luigi import LocalTarget, Task, WrapperTask\n\n\nclass Fetch(Task):\n from datetime import date, timedelta\n\n # Ein Datum wird als Parameter uebergeben\n date = DateParameter(default=date.today())\n\n # PRAW arbeitet mit Zeitintervallen\n # Um einen Tag zu importieren wird\n # von Tag N bis Tag N+1 importiert\n delta = timedelta(days=1)\n\n # Das LocalTarget fuer die rohen Daten\n # Die Daten werden unter\n # \"daily/<datum>/roh.csv gespeichert\n def output(self):\n prefix = self.date.strftime(\"%m-%d-%Y\")\n return LocalTarget(\"daily/%s/roh.csv\" % prefix)\n\n # Die Posts fuer einen Tag\n # werden heruntergeladen,\n # in einen Dataframe konvertiert\n # und als CSV in das Target geschrieben\n def run(self):\n start = self.date\n end = start + self.delta\n posts = self.fetch(start, end)\n frame = self.konvertiere(posts)\n self.speichern(frame, self.output())\n\n def fetch(self, start, end):\n import time\n import praw\n subreddits = [\"datascience\", \"gameofthrones\"]\n reddit = praw.Reddit(user_agent=\"test\",\n client_id=\"wpaIV3-b3AYOJQ\", \n client_secret=\"-M_LPtLCpkqlJTCyg--Rg9ePAwg\")\n subreddits = '+'.join(subreddits)\n subreddit = reddit.subreddit(subreddits)\n start = time.mktime(self.date.timetuple())\n end = self.date + self.delta\n end = time.mktime(end.timetuple())\n filtered = list(subreddit.submissions(start=start, end=end))\n return filtered\n \n def konvertiere(self, posts):\n import pandas\n dataframe = pandas.DataFrame([f.__dict__ for f in posts])[['id', 'title', 'selftext', 'subreddit']]\n return dataframe\n\n def speichern(self, dataframe, target):\n with target.open(\"w\") as out:\n dataframe.to_csv(out, encoding='utf-8', index=False, sep=';')\n\n\nclass Clean(Task):\n from datetime import date\n import nltk\n nltk.download('punkt')\n nltk.download('stopwords')\n\n # Ein Datum wird als Parameter uebergeben\n date = DateParameter(default=date.today())\n\n # Die Liste von Stop-Woertern\n # die herausgefiltert werden\n stoppwoerter = nltk.corpus.stopwords.words('english')\n\n # Der verwendete Tokenizer\n tokenizer = nltk.tokenize.RegexpTokenizer(r'\\w+')\n\n # Der Stemmer fuer Englische Woerter\n stemmer = nltk.SnowballStemmer(\"english\")\n\n # Als Abhaengigkeit wird der\n # Task *Fetch* zurueckgegeben\n def requires(self):\n return Fetch(self.date)\n\n # Das LocalTarget fuer die sauberen Daten\n # Die Daten werden unter\n # \"daily/<datum>/cleaned.csv gespeichert\n def output(self):\n prefix = self.date.strftime(\"%m-%d-%Y\")\n return LocalTarget(\"daily/%s/cleaned.csv\" % prefix)\n\n # Die Rohdaten werden zerstueckelt\n # durch die Stopwort-Liste gefiltert\n # und auf ihre Wortstaemme zurueckgefuehrt\n def run(self):\n csv = self.lade()\n tokenized = self.tokenize(csv)\n gefiltert = self.entferne(tokenized)\n wortstamm = self.stemme(gefiltert)\n csv[\"cleaned_words\"] = wortstamm\n self.speichern(csv, self.output())\n\n def lade(self):\n import pandas\n dataset = pandas.read_csv(self.input().path, encoding='utf-8', sep=';').fillna('')\n return dataset\n\n def tokenize(self, csv):\n def tok(post):\n tokenized = self.tokenizer.tokenize(post[\"title\"] + \" \" + post[\"selftext\"])\n return tokenized\n tokenized = csv.apply(tok, axis=1)\n return tokenized\n\n def entferne(self, tokenized):\n lowercase = tokenized.apply(lambda post: [wort.lower() for wort in post])\n filtered = lowercase.apply(lambda post: [wort for wort in post if wort not in self.stoppwoerter])\n return filtered\n\n def stemme(self, gefiltert):\n wortstamm = gefiltert.apply(lambda post: [self.stemmer.stem(wort) for wort in post])\n wortstamm = wortstamm.apply(lambda post: \" \".join(post))\n return wortstamm\n \n def speichern(self, dataframe, target):\n with target.open(\"w\") as out:\n dataframe[[\"id\", \"cleaned_words\", \"subreddit\"]].to_csv(out, encoding='utf-8', index=False, sep=';')\n\n\nclass ModelExists(WrapperTask):\n version = IntParameter(default=1)\n\n def output(self):\n return LocalTarget(\"model/%d/model\" % self.version)\n\n\nfrom luigi.contrib.spark import PySparkTask\nfrom luigi.parameter import IntParameter, DateParameter\nfrom luigi import LocalTarget\nclass Classify(PySparkTask):\n from datetime import date\n\n date = DateParameter(default=date.today())\n version = IntParameter(default=1)\n\n # PySpark Parameter\n driver_memory = '1g'\n executor_memory = '2g'\n executor_cores = '2'\n num_executors = '4'\n master = 'local'\n\n # Als Abhaengigkeit werden\n # Task *Clean* und *ModelExists*\n # zurueckgegeben\n def requires(self):\n return [ModelExists(self.version), Clean(self.date)]\n\n # Das LocalTarget fuer die Klassifikation\n # Die Daten werden unter\n # \"daily/<datum>/ergebnis.csv gespeichert\n def output(self):\n prefix = self.date.strftime(\"%m-%d-%Y\")\n return LocalTarget(\"daily/%s/ergebnis.csv\" % prefix)\n\n def main(self, sc, *args):\n from pyspark.sql.session import SparkSession\n from pyspark.ml import PipelineModel\n from pyspark.sql.functions import when\n\n # Initialisiere den SQLContext\n sql = SparkSession.builder\\\n .enableHiveSupport() \\\n .config(\"hive.exec.dynamic.partition\", \"true\") \\\n .config(\"hive.exec.dynamic.partition.mode\", \"nonstrict\") \\\n .config(\"hive.exec.max.dynamic.partitions\", \"4096\") \\\n .getOrCreate()\n\n # Lade die bereinigten Daten\n df = sql.read.format(\"com.databricks.spark.csv\") \\\n .option(\"delimiter\", \";\") \\\n .option(\"header\", \"true\") \\\n .load(self.input()[1].path)\n\n # Lade das Model das zuvor mit SparkML trainiert wurde\n model = PipelineModel.load(self.input()[0].path)\n\n # Klassifiziere die Datensaetze eines Tages mit dem Model\n ergebnis = model.transform(df)[[\"id\",\n \"subreddit\",\n \"probability\",\n \"prediction\"]]\n\n # Eine kleine Aufbereitung der Daten denn\n # die Klasse \"1\" hat den Namen \"datascience\"\n ergebnis = ergebnis.withColumn(\"prediction_label\",\n when(ergebnis.prediction==1,\n \"datascience\") \\\n .otherwise(\"gameofthrones\"))\n\n # Der Einfachheit halber wird der Dataframe\n # in einen Pandas Dataframe konvertiert.\n # Dies sollte bei grossen Datenmengen vermieden.\n with self.output().open(\"w\") as out:\n ergebnis.toPandas().to_csv(out,\n encoding='utf-8',\n index=False,\n sep=';')\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
themayankjha/MedX | [
"cdf295910f6e0a30e6738fcabbf458b8f3e9f9ae"
] | [
"API and AI Model/heartfail.py"
] | [
"\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nimport sklearn.metrics\r\nfrom sklearn.ensemble import ExtraTreesClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nimport os\r\n\r\n\r\ndef heartfail(age, sex, anaemia, bp, diabetes, smoking, time):\r\n os.environ['PATH'] = os.environ['PATH']+';'+os.environ['CONDA_PREFIX']+r\"\\Library\\bin\\graphviz\"\r\n data = pd.read_csv('static/heart_failure.csv')\r\n predictors = data[[\"age\", \"anaemia\", \"diabetes\", \"high_blood_pressure\", \"sex\", \"smoking\", \"time\"]]\r\n targets = data.DEATH_EVENT\r\n pred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size = .4, random_state = 2)\r\n classifier = RandomForestClassifier(n_estimators = 400)\r\n classifier = classifier.fit(pred_train, tar_train)\r\n predictions = classifier.predict(pred_test)\r\n accuracy = sklearn.metrics.accuracy_score(tar_test, predictions)\r\n model = ExtraTreesClassifier()\r\n model.fit(pred_train, tar_train)\r\n lst = []\r\n lst.append(classifier.predict([[age, anaemia, diabetes, bp, sex, smoking, time]])[0])\r\n lst.append(accuracy*100)\r\n return lst\r\n\r\n\r\n\r\n"
] | [
[
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
mberr/ea-sota-comparison | [
"0f7bb679b61675bfb65a2cd7462854968d876df2"
] | [
"src/kgm/trainables/dgmc_matching.py"
] | [
"import copy\nimport logging\n\nimport torch\n\nfrom os import path as osp\n\nfrom typing import MutableMapping, Any, Mapping, Iterable\n\nfrom ray.tune.result import DONE\nfrom torch_geometric.data import Data\nfrom torch_geometric.datasets import DBP15K\n\nfrom dgmc import DGMC\nfrom dgmc.models import RelCNN\nfrom kgm.data import MatchSideEnum\nfrom kgm.modules.embeddings import get_embedding_pair\nfrom kgm.modules.embeddings.base import NodeEmbeddingInitMethod\nfrom kgm.modules.embeddings.norm import EmbeddingNormalizationMethod\nfrom kgm.trainables.matching import MatchingTrainable\nfrom kgm.utils.common import kwargs_or_empty\nfrom kgm.utils.mlflow_utils import log_metrics_to_mlflow\nfrom kgm.utils.torch_utils import get_device, construct_optimizer_from_config\n\nlogger = logging.getLogger(name=__name__)\n\n\nclass SumEmbedding(object):\n def __call__(self, data):\n data.x1, data.x2 = data.x1.sum(dim=1), data.x2.sum(dim=1)\n return data\n\n\nclass DgmcMatching(MatchingTrainable):\n epoch = 0\n\n def __load_model(self, model_config: MutableMapping[str, Any]) -> DGMC:\n psi_1 = RelCNN(\n in_channels=self.data.x1.size(-1),\n out_channels=model_config.get('dim', 256),\n num_layers=model_config.get('ps1_n_layers', 3),\n batch_norm=model_config.get('psi1_batch_norm', False),\n cat=model_config.get('psi1_cat', True),\n lin=model_config.get('psi1_cat', True),\n dropout=model_config.get('psi1_dropout', 0.5),\n )\n psi_2 = RelCNN(\n in_channels=model_config.get('rnd_dim', 32),\n out_channels=model_config.get('rnd_dim', 32),\n num_layers=model_config.get('ps2_n_layers', 3),\n batch_norm=model_config.get('psi2_batch_norm', False),\n cat=model_config.get('psi2_cat', True),\n lin=model_config.get('psi2_cat', True),\n dropout=model_config.get('psi2_dropout', 0.0),\n )\n\n model = DGMC(psi_1=psi_1, psi_2=psi_2, num_steps=0, k=10).to(self.device)\n return model\n\n def _wrap_data(self, model_config: MutableMapping[str, Any]) -> Data:\n embeddings = get_embedding_pair(\n dataset=self.dataset,\n init=model_config.get('node_embedding_init_method', NodeEmbeddingInitMethod.random),\n init_config=model_config.get('node_embedding_init_config', None),\n trainable=model_config.get('trainable_node_embeddings', True),\n embedding_dim=model_config.get('embedding_dim', 300),\n norm=model_config.get('node_embedding_norm', EmbeddingNormalizationMethod.none),\n )\n\n data = Data(\n x1=embeddings[MatchSideEnum.left].weight.to(self.device),\n edge_index1=torch.unique(self.dataset.left_graph.triples[:, [0, 2]].t(), dim=1).to(self.device),\n x2=embeddings[MatchSideEnum.right].weight.to(self.device),\n edge_index2=torch.unique(self.dataset.right_graph.triples[:, [0, 2]].t(), dim=1).to(self.device),\n train_y=self.dataset.alignment.train.to(self.device),\n test_y=self.dataset.alignment.test.to(self.device),\n val_y=self.dataset.alignment.validation.to(self.device),\n )\n\n return data\n\n def __train(self):\n self.model.train()\n self.optimizer.zero_grad()\n\n _, S_L = self.model(self.data.x1, self.data.edge_index1, None, None, self.data.x2,\n self.data.edge_index2, None, None, self.data.train_y)\n\n loss = self.model.loss(S_L, self.data.train_y)\n loss.backward()\n self.optimizer.step()\n return loss\n\n @torch.no_grad()\n def _train_results(self):\n self.model.eval()\n\n _, S_L = self.model(self.data.x1, self.data.edge_index1, None, None, self.data.x2,\n self.data.edge_index2, None, None)\n\n hits1 = self.model.acc(S_L, self.data.train_y)\n hits10 = self.model.hits_at_k(10, S_L, self.data.train_y)\n\n return {\n 'hits_at_1': float(hits1),\n 'hits_at_10': float(hits10),\n }\n\n @torch.no_grad()\n def _test(self):\n self.model.eval()\n\n _, S_L = self.model(self.data.x1, self.data.edge_index1, None, None, self.data.x2,\n self.data.edge_index2, None, None)\n\n hits1 = self.model.acc(S_L, self.data.test_y)\n hits10 = self.model.hits_at_k(10, S_L, self.data.test_y)\n\n return {\n 'hits_at_1': float(hits1),\n 'hits_at_10': float(hits10),\n }\n\n @torch.no_grad()\n def _eval(self):\n self.model.eval()\n\n _, S_L = self.model(self.data.x1, self.data.edge_index1, None, None, self.data.x2,\n self.data.edge_index2, None, None)\n\n hits1 = self.model.acc(S_L, self.data.val_y)\n hits10 = self.model.hits_at_k(10, S_L, self.data.val_y)\n\n return {\n 'hits_at_1': float(hits1),\n 'hits_at_10': float(hits10),\n }\n\n def eval(self):\n # don't run self._eval if there is no validation split\n if not hasattr(self, 'dataset') or self.dataset.alignment._validation is None:\n results = {\n 'train': self._train_results(),\n 'test': self._test(),\n }\n else:\n results = {\n 'train': self._train_results(),\n 'test': self._test(),\n 'validation': self._eval()\n }\n\n return results\n\n def _train_iter(\n self,\n num_epochs: int = 1,\n final_eval: bool = True,\n ) -> Iterable[Mapping[str, Any]]:\n \"\"\"\n Train the model, and return intermediate results.\n\n :param num_epochs:\n The number of epochs.\n :param final_eval:\n Whether to perform an evaluation after the last training epoch.\n\n :return:\n One result dictionary per epoch.\n \"\"\"\n epoch_result = dict()\n for _ in range(self.epoch, self.epoch + num_epochs):\n self.model.train()\n\n # training step\n self.epoch += 1\n epoch_result = dict(\n epoch=self.epoch,\n train_loss=float(self.__train()),\n )\n\n # evaluate\n if (final_eval and self.epoch == num_epochs) or (self.eval_frequency is not None and (self.epoch % self.eval_frequency == 0)) or self.epoch > 100:\n self.model.eval()\n with torch.no_grad():\n epoch_result['eval'] = self.eval()\n\n yield epoch_result\n\n return epoch_result\n\n def _setup(self, config):\n local_config = config.copy()\n\n # Logging\n mlflow_config = config.get('mlflow')\n if mlflow_config is None:\n logger.warning('No MLFlow configuration found. Thus, no logging to MLFlow.')\n mlflow_config = dict(ignore=True)\n self._load_mlflow(mlflow_config=mlflow_config, config=config)\n\n # random seed\n self.seed = local_config.pop('seed')\n torch.manual_seed(self.seed)\n\n # Device\n device_name = local_config.pop('device', None)\n self.device = get_device(device=device_name)\n logger.info('Using device: %s', self.device)\n\n # Dataset\n data_config = local_config.pop('data')\n dataset_name = data_config['dataset']\n if dataset_name == 'dbp15kjape_torch_geometric':\n path = osp.join('..', 'data', 'DBP15K')\n self.data = DBP15K(path, data_config['subset'], transform=SumEmbedding())[0].to(self.device)\n else:\n self.dataset = self._load_data(data_config)\n self.train_val_split = data_config.get('train_validation_split') is not None\n\n # Model\n model_config = local_config.pop('model')\n if dataset_name == 'dbp15kjape_torch_geometric':\n # consistent condition path with data loading\n pass\n else:\n self.data = self._wrap_data(model_config=model_config)\n self.model = self.__load_model(model_config=model_config)\n self.model_config = model_config\n\n # Optimizer\n optimizer_config = local_config.pop('optimizer')\n optimizer_cls = optimizer_config.pop('cls', 'adam')\n optimizer_kwargs = copy.deepcopy(optimizer_config)\n optimizer_config = dict(cls=optimizer_cls)\n optimizer_config.update(kwargs_or_empty(optimizer_kwargs))\n self.optimizer = construct_optimizer_from_config(model=self.model, optimizer_config=optimizer_config)\n\n # Training\n train_config = local_config.pop('training')\n self._load_training(training_config=train_config)\n\n self.train_iter = iter(self._train_iter(num_epochs=self.max_num_epochs))\n\n def _train(self) -> Mapping[str, Any]:\n if self.epoch == 101:\n self.model.num_steps = self.model_config.get('num_steps', 10)\n self.model.detach = True\n\n result = dict()\n try:\n result = next(self.train_iter)\n if self.log_only_eval:\n while 'eval' not in result:\n result = next(self.train_iter)\n except StopIteration:\n result[DONE] = True\n\n # Log to MLFlow\n if self.use_mlflow and 'epoch' in result:\n log_metrics_to_mlflow(metrics=result, step=result['epoch'])\n\n if 'eval' in result:\n if 'validation' in result['eval']:\n result['checkpoint_score_attr'] = result['eval']['validation']['hits_at_1']\n else:\n result['checkpoint_score_attr'] = result['eval']['test']['hits_at_1']\n\n return result\n"
] | [
[
"torch.manual_seed",
"torch.no_grad"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sabraha2/uncertainty_estimation_deep_learning | [
"12e264f7fb18747352ffed6b4ff9883bc9d83dca"
] | [
"utils.py"
] | [
"'''Some helper functions for PyTorch, including:\n - get_mean_and_std: calculate the mean and std value of dataset.\n - msr_init: net parameter initialization.\n - progress_bar: progress bar mimic xlua.progress.\n'''\nimport os\nimport sys\nimport time\nimport torch\n\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n return mean, std\n\ndef init_params(net):\n '''Init layer parameters.'''\n for m in net.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=1e-3)\n if m.bias:\n init.constant(m.bias, 0)\n\n\n_, term_width = os.popen('stty size', 'r').read().split()\nterm_width = int(term_width)\n\nTOTAL_BAR_LENGTH = 65.\nlast_time = time.time()\nbegin_time = last_time\n\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [')\n for i in range(cur_len):\n sys.stdout.write('=')\n sys.stdout.write('>')\n for i in range(rest_len):\n sys.stdout.write('.')\n sys.stdout.write(']')\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %s' % format_time(step_time))\n L.append(' | Tot: %s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n"
] | [
[
"torch.nn.init.kaiming_normal",
"torch.zeros",
"torch.utils.data.DataLoader",
"torch.nn.init.constant",
"torch.nn.init.normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ritwik12/exoplanet-ml | [
"af443d3cd5e51f591527a7df51315b907057018c"
] | [
"exoplanet-ml/beam/astrowavenet/beam_prepare_inputs.py"
] | [
"# Copyright 2018 The Exoplanet ML Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Beam pipeline for processing Kepler light curves into AstroWaveNet inputs.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\n\nimport apache_beam as beam\nimport tensorflow as tf\n\nfrom astrowavenet.beam import process_light_curve\nfrom beam import utils\nfrom tf_util import configdict\n\n# pylint: disable=expression-not-assigned\n\nflags.DEFINE_string(\"input_kepid_file\", None,\n \"File containing Kepler ids to preprocess.\")\n\nflags.DEFINE_string(\"kepler_data_dir\", None,\n \"Base folder containing Kepler data.\")\n\nflags.DEFINE_string(\"injected_group\", None,\n \"Optional. One of 'inj1', 'inj2', 'inj3'.\")\n\nflags.DEFINE_string(\n \"scramble_type\", None,\n \"What scrambling procedure to use. One of 'SCR1', 'SCR2', 'SCR3', or None.\")\n\nflags.DEFINE_boolean(\"invert_light_curves\", False,\n \"Whether to generate inverted light curves.\")\n\nflags.DEFINE_string(\"flux_column\", \"PDCSAP_FLUX\", \"Which flux column to read.\")\n\nflags.DEFINE_string(\"output_dir\", None,\n \"Directory in which to save the output.\")\n\nflags.DEFINE_integer(\"num_shards_train\", 8,\n \"Number of shards for the training set.\")\n\nflags.DEFINE_integer(\"num_shards_val\", 1,\n \"Number of shards for the validation set.\")\n\nflags.DEFINE_integer(\"num_shards_test\", 1, \"Number of shards for the test set.\")\n\nflags.DEFINE_integer(\"upward_outlier_clipping\", 5,\n \"Maximum allowed standard deviations above the median.\")\n\nflags.DEFINE_integer(\"downward_outlier_clipping\", None,\n \"Maximum allowed standard deviations below the median.\")\n\nflags.DEFINE_integer(\"clip_lowest_n_values\", 20,\n \"Number of flux values to clip from the bottom.\")\n\nflags.DEFINE_boolean(\"normalize_stddev\", True,\n \"Whether or not to normalize the standard deviation.\")\n\nFLAGS = flags.FLAGS\n\n\ndef main(argv):\n del argv # Unused.\n logging.set_verbosity(logging.INFO)\n\n config = configdict.ConfigDict({\n \"input_kepid_file\": FLAGS.input_kepid_file,\n \"kepler_data_dir\": FLAGS.kepler_data_dir,\n \"flux_column\": FLAGS.flux_column,\n \"injected_group\": FLAGS.injected_group,\n \"scramble_type\": FLAGS.scramble_type,\n \"invert_light_curves\": FLAGS.invert_light_curves,\n \"upward_outlier_clipping\": FLAGS.upward_outlier_clipping,\n \"downward_outlier_clipping\": FLAGS.downward_outlier_clipping,\n \"clip_lowest_n_values\": FLAGS.clip_lowest_n_values,\n \"normalize_stddev\": FLAGS.normalize_stddev,\n })\n\n def pipeline(root):\n \"\"\"Beam pipeline for preprocessing Kepler events.\"\"\"\n if not FLAGS.input_kepid_file:\n raise ValueError(\"--input_kepid_file is required\")\n if not FLAGS.kepler_data_dir:\n raise ValueError(\"--kepler_data_dir is required\")\n if not FLAGS.output_dir:\n raise ValueError(\"--output_dir is required\")\n\n # Write the config.\n config_json = json.dumps(config, indent=2)\n root | beam.Create([config_json]) | \"write_config\" >> beam.io.WriteToText(\n os.path.join(FLAGS.output_dir, \"config.json\"),\n num_shards=1,\n shard_name_template=\"\")\n\n # Read input Kepler ids.\n with tf.gfile.Open(config.input_kepid_file) as f:\n kep_ids = [int(line.strip()) for line in f]\n logging.info(\"Read %d Kepler ids from %s\", len(kep_ids),\n config.input_kepid_file)\n\n # Initialize DoFns.\n process_fn = process_light_curve.ProcessLightCurveDoFn(\n config.kepler_data_dir,\n flux_column=config.flux_column,\n injected_group=config.injected_group,\n scramble_type=config.scramble_type,\n invert_light_curves=config.invert_light_curves,\n upward_outlier_clipping=config.upward_outlier_clipping,\n downward_outlier_clipping=config.downward_outlier_clipping,\n clip_lowest_n_values=config.clip_lowest_n_values,\n normalize_stddev=config.normalize_stddev)\n partition_fn = utils.TrainValTestPartitionFn(\n key_name=\"kepler_id\",\n partitions={\n \"train\": 0.8,\n \"val\": 0.1,\n \"test\": 0.1,\n },\n keys=kep_ids)\n\n # Create pipeline.\n inputs = [{\"kepler_id\": kep_id} for kep_id in kep_ids]\n results = (\n root\n | \"create_pcollection\" >> beam.Create(inputs)\n | \"process_light_curves\" >> beam.ParDo(process_fn)\n | \"reshuffle\" >> beam.Reshuffle()\n | \"partition_results\" >> beam.Partition(partition_fn,\n partition_fn.num_partitions))\n\n # Write the outputs in TFRecord format.\n for name, subset in zip(partition_fn.partition_names, results):\n if name == \"train\":\n num_shards = FLAGS.num_shards_train\n elif name == \"val\":\n num_shards = FLAGS.num_shards_val\n elif name == \"test\":\n num_shards = FLAGS.num_shards_test\n else:\n raise ValueError(\"Unrecognized subset name: {}\".format(name))\n\n utils.write_to_tfrecord(\n subset,\n key=\"example\",\n output_dir=FLAGS.output_dir,\n output_name=name,\n coder=beam.coders.ProtoCoder(tf.train.Example),\n num_shards=num_shards)\n\n pipeline.run()\n logging.info(\"Preprocessing complete.\")\n\n\nif __name__ == \"__main__\":\n app.run(main)\n"
] | [
[
"tensorflow.gfile.Open"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Samyak2/practical-nlp | [
"15b9b278e958f1b98f21702b6889861747d41eeb"
] | [
"Ch6/Data/utils.py"
] | [
"import pandas as pd\nimport numpy as np\n\ndef get_data(filename):\n df = pd.read_csv(filename,delim_whitespace=True,names=['word','label'])\n beg_indices = list(df[df['word'] == 'BOS'].index)+[df.shape[0]]\n sents,labels,intents = [],[],[]\n for i in range(len(beg_indices[:-1])):\n sents.append(df[beg_indices[i]+1:beg_indices[i+1]-1]['word'].values)\n labels.append(df[beg_indices[i]+1:beg_indices[i+1]-1]['label'].values)\n intents.append(df.loc[beg_indices[i+1]-1]['label']) \n return np.array(sents, dtype=object), np.array(labels, dtype=object), np.array(intents, dtype=object)\n\ndef get_data2(filename):\n with open(filename) as f:\n contents = f.read()\n sents,labels,intents = [],[],[]\n for line in contents.strip().split('\\n'):\n words,labs = [i.split(' ') for i in line.split('\\t')]\n sents.append(words[1:-1])\n labels.append(labs[1:-1])\n intents.append(labs[-1])\n return np.array(sents, dtype=object), np.array(labels, dtype=object), np.array(intents, dtype=object)\n\nread_method = {'Data/data2/atis-2.dev.w-intent.iob':get_data,\n 'Data/data2/atis.train.w-intent.iob':get_data2,\n 'Data/data2/atis.test.w-intent.iob':get_data,\n 'Data/data2/atis-2.train.w-intent.iob':get_data2}\n\ndef fetch_data(fname):\n func = read_method[fname]\n return func(fname)\n"
] | [
[
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
stzwooju/rlcard | [
"52b51c5451fdec3cff053246c84c0c3641d38076"
] | [
"badugi_dqn.py"
] | [
"''' An example of learning a Deep-Q Agent on Texas Limit Holdem\n'''\n\nimport tensorflow as tf\n\nimport rlcard\nfrom rlcard.agents.badugi_dqn_agent import BadugiDQNAgent\nfrom rlcard.agents.random_agent import RandomAgent\nfrom rlcard.models.badugi_rule_models import BadugiRuleAgentV1\nfrom rlcard.utils.utils import set_global_seed, send_slack\nfrom rlcard.utils.logger import Logger\n\n# Make environment\nenv = rlcard.make('badugi')\neval_env = rlcard.make('badugi')\n\n# Set the iterations numbers and how frequently we evaluate/save plot\nevaluate_every = 1000\nsend_slack_every = 10000\nsave_plot_every = 10000\ncheckpoint_every = 10000\nevaluate_num = 1000\nepisode_num = 1000000\n\n# Set the the number of steps for collecting normalization statistics\n# and intial memory size\nmemory_init_size = 1000\nnorm_step = 100\n\n# The paths for saving the logs and learning curves\nroot_path = './experiments/badugi_dqn_result_with_rule/'\nlog_path = root_path + 'log.txt'\ncsv_path = root_path + 'performance.csv'\nfigure_path = root_path + 'figures/'\ncheckpoint_path = root_path + 'ckpt/'\n\n# Set a global seed\n# set_global_seed(0)\n\nwith tf.Session() as sess:\n # Set agents\n global_step = tf.Variable(0, name='global_step', trainable=False)\n agent = BadugiDQNAgent(sess,\n scope='dqn',\n action_num=env.action_num,\n replay_memory_size=int(1e5),\n replay_memory_init_size=memory_init_size,\n norm_step=norm_step,\n state_shape=env.state_shape,\n mlp_layers=[512, 512],\n ckpt_path=checkpoint_path)\n\n random_agent = RandomAgent(action_num=eval_env.action_num)\n rule_agent = BadugiRuleAgentV1()\n\n sess.run(tf.global_variables_initializer())\n\n env.set_agents([agent, rule_agent, rule_agent, random_agent, random_agent])\n eval_env.set_agents([agent, rule_agent, rule_agent, random_agent, random_agent])\n\n # Count the number of steps\n step_counter = 0\n\n # Init a Logger to plot the learning curve\n bet_logger = Logger(xlabel='timestep', ylabel='bet reward', legend='DQN on Badugi', log_path=log_path, csv_path=csv_path)\n change_logger = Logger(xlabel='timestep', ylabel='change reward', legend='DQN on Badugi', log_path=log_path, csv_path=csv_path)\n\n for episode in range(episode_num):\n\n # Generate data from the environment\n trajectories, _, _ = env.run(is_training=True)\n\n # Feed transitions into agent memory, and train the agent\n for ts in trajectories[0]:\n # print('State: {}\\nAction: {}\\nReward: {}\\nNext State: {}\\nDone: {}\\n'.format(ts[0], ts[1], ts[2], ts[3], ts[4]))\n agent.feed(ts)\n step_counter += 1\n\n # Train the agent\n train_count = step_counter - (memory_init_size + norm_step)\n if train_count > 0:\n bet_loss, change_loss = agent.train()\n print('\\rINFO - Step {}, bet loss: {}, change loss : {}'.format(step_counter, bet_loss, change_loss), end='')\n\n # Evaluate the performance. Play with random agents.\n if episode % evaluate_every == 0:\n print('\\n\\nEpisode {}'.format(episode))\n \n bet_reward = 0\n change_reward = 0\n for eval_episode in range(evaluate_num):\n _, bet_reward_sum, change_reward_sum = eval_env.run(is_training=False)\n\n bet_reward += bet_reward_sum\n change_reward += change_reward_sum\n\n bet_logger.log('\\n########## Evaluation ##########')\n bet_logger.log('Timestep: {} Average bet reward is {}. Average change reward is {}'.format(env.timestep, float(bet_reward)/evaluate_num, float(change_reward)/evaluate_num))\n\n # send_slack('Episode: {} Average bet reward is {}. Average change reward is {}'.format(episode, float(bet_reward)/evaluate_num, float(change_reward)/evaluate_num))\n\n # Add point to logger\n bet_logger.add_point(x=env.timestep, y=float(bet_reward)/evaluate_num)\n change_logger.add_point(x=env.timestep, y=float(change_reward)/evaluate_num)\n\n # Make plot\n if episode % save_plot_every == 0 and episode > 0:\n bet_logger.make_plot(save_path=figure_path+'bet/'+str(episode)+'.png')\n change_logger.make_plot(save_path=figure_path+'change/'+str(episode)+'.png')\n \n if episode % checkpoint_every == 0 and episode > 0:\n bet_path, change_path = agent.save(checkpoint_path, episode)\n print('Saved to {}, {}'.format(bet_path, change_path))\n\n # Make the final plot\n bet_logger.make_plot(save_path=figure_path+'bet/'+str(episode)+'.png')\n change_logger.make_plot(save_path=figure_path+'change/'+str(episode)+'.png')\n"
] | [
[
"tensorflow.Variable",
"tensorflow.global_variables_initializer",
"tensorflow.Session"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
febriy/plant_segmentation | [
"8c59d8d61f6868f957319190b40071ff544a1ce9"
] | [
"segmentation_deep/convert_bg.py"
] | [
"import os\nfrom pathlib import Path\nimport pandas as pd\nimport cv2\nfrom torch.utils.data import DataLoader, Dataset, sampler\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nbase_path = Path(__file__).parent.parent\ndata_path = Path(base_path / \"data/\").resolve()\n\nimg_name_path = str(data_path / \"train-256/ara2013_plant001_rgb.png\")\nmask_name_path = str(data_path / \"train_masks-256/ara2013_plant001_label.png\")\nbg_name_path = str(data_path / \"mydata-256/picam.png\")\n\nimg = cv2.imread(img_name_path, cv2.COLOR_BGR2RGB)\nmask = cv2.imread(mask_name_path, cv2.COLOR_BGR2RGB) # , cv2.COLOR_BGR2GRAY)\nbg = cv2.imread(bg_name_path, cv2.COLOR_BGR2RGB)\n\n## Change to white\n# ret, thresh = cv2.threshold(mask, 5, 255, cv2.THRESH_BINARY)\n# img[thresh == 0] = 255\n\n\n# change to another background\nimg[mask == 0] = bg[mask == 0]\n\n\nfig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))\nfig.suptitle(\"predicted_mask//original_mask\")\n\n\nax1.imshow(mask)\nax2.imshow(img)\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HimanshuRanka/ImprovedRD-550-2021 | [
"dbbad05babdb3e40c7c3745818bd557d91a4105a"
] | [
"data.py"
] | [
"import os, gc\nimport torch\nimport torch.utils.data\nimport numpy as np\nimport json\ndata_path = os.path.join('..', 'data')\ndevice = torch.device('cuda')\n\nclass MyDataset(torch.utils.data.Dataset): \n def __init__(self, instances):\n self.instances = instances\n \n def __len__(self):\n return len(self.instances)\n \n def __getitem__(self, index):\n return self.instances[index]\n \ndef data2index(data_x, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency):\n data_x_idx = list()\n for instance in data_x:\n sememe_idx = [sememe2index[se] for se in instance['sememes']]\n lexname_idx = [lexname2index[ln] for ln in instance['lexnames']]\n rootaffix_idx = [rootaffix2index[ra] for ra in instance['root_affix'] if rootaffix_freq[ra]>=frequency]\n def_word_idx = list()\n def_words = instance['definitions'].strip().split()\n if len(def_words) > 0:\n for def_word in def_words:\n if def_word in word2index and def_word!=instance['word']:\n def_word_idx.append(word2index[def_word])\n else:\n def_word_idx.append(word2index['<OOV>'])\n data_x_idx.append({'word': word2index[instance['word']], 'lexnames': lexname_idx, 'root_affix':rootaffix_idx, 'sememes': sememe_idx, 'definition_words': def_word_idx})\n else:\n pass #print(instance['word'], instance['definitions']) # some is null\n return data_x_idx\n \ndef load_data(frequency):\n print('Loading dataset...')\n data_train = json.load(open(os.path.join(data_path, 'data_train.json')))\n data_dev = json.load(open(os.path.join(data_path, 'data_dev.json')))\n data_test_500_rand1_seen = json.load(open(os.path.join(data_path, 'data_test_500_rand1_seen.json')))\n data_test_500_rand1_unseen = json.load(open(os.path.join(data_path, 'data_test_500_rand1_unseen.json'))) #data_test_500_others\n data_defi_c = json.load(open(os.path.join(data_path, 'data_defi_c.json')))\n data_desc_c = json.load(open(os.path.join(data_path, 'data_desc_c.json')))\n lines = open(os.path.join(data_path, 'target_words.txt')).readlines()\n target_words = [line.strip() for line in lines]\n label_size = len(target_words)+2\n print('target_words (include <PAD><OOV>): ', label_size)\n lines = open(os.path.join(data_path, 'lexname_all.txt')).readlines()\n lexname_all = [line.strip() for line in lines]\n label_lexname_size = len(lexname_all)\n print('label_lexname_size: ', label_lexname_size)\n lines = open(os.path.join(data_path, 'root_affix_freq.txt')).readlines()\n rootaffix_freq = {}\n for line in lines:\n rootaffix_freq[line.strip().split()[0]] = int(line.strip().split()[1])\n lines = open(os.path.join(data_path, 'rootaffix_all.txt')).readlines()\n rootaffix_all = [line.strip() for line in lines]\n label_rootaffix_size = len(rootaffix_all)\n print('label_rootaffix_size: ', label_rootaffix_size)\n lines = open(os.path.join(data_path, 'sememes_all.txt')).readlines()\n sememes_all = [line.strip() for line in lines]\n label_sememe_size = len(sememes_all)+1\n print('label_sememe_size: ', label_sememe_size)\n vec_inuse = json.load(open(os.path.join(data_path, 'vec_inuse.json')))\n vocab = list(vec_inuse)\n vocab_size = len(vocab)+2\n print('vocab (embeddings in use)(include <PAD><OOV>): ', vocab_size)\n word2index = dict()\n index2word = list()\n word2index['<PAD>'] = 0\n word2index['<OOV>'] = 1\n index2word.extend(['<PAD>', '<OOV>'])\n index2word.extend(vocab)\n word2vec = np.zeros((vocab_size, len(list(vec_inuse.values())[0])), dtype=np.float32)\n for wd in vocab:\n index = len(word2index)\n word2index[wd] = index\n word2vec[index, :] = vec_inuse[wd]\n sememe2index = dict()\n index2sememe = list()\n for sememe in sememes_all:\n sememe2index[sememe] = len(sememe2index)\n index2sememe.append(sememe)\n lexname2index = dict()\n index2lexname = list()\n for ln in lexname_all:\n lexname2index[ln] = len(lexname2index)\n index2lexname.append(ln)\n rootaffix2index = dict()\n index2rootaffix = list()\n for ra in rootaffix_all:\n if rootaffix_freq[ra] >= frequency:\n rootaffix2index[ra] = len(rootaffix2index)\n index2rootaffix.append(ra)\n data_train_idx = data2index(data_train, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency)\n print('data_train size: %d'%len(data_train_idx))\n data_dev_idx = data2index(data_dev, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency)\n print('data_dev size: %d'%len(data_dev_idx))\n data_test_500_seen_idx = data2index(data_test_500_rand1_seen, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency) \n print('data_test_seen size: %d'%len(data_test_500_seen_idx))\n data_test_500_unseen_idx = data2index(data_test_500_rand1_unseen, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency) \n print('data_test_unseen size: %d'%len(data_test_500_unseen_idx))\n data_defi_c_idx = data2index(data_defi_c, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency)\n data_desc_c_idx = data2index(data_desc_c, word2index, sememe2index, lexname2index, rootaffix2index, rootaffix_freq, frequency) \n print('data_desc size: %d'%len(data_desc_c_idx))\n return word2index, index2word, word2vec, (index2sememe, index2lexname, index2rootaffix), (label_size, label_lexname_size, label_rootaffix_size, label_sememe_size), (data_train_idx, data_dev_idx, data_test_500_seen_idx, data_test_500_unseen_idx, data_defi_c_idx, data_desc_c_idx)\n\n'''\n{\n \"word\": \"restlessly\",\n \"lexnames\": [\n \"adv.all\"\n ],\n \"root_affix\": [\n \"ly\"\n ],\n \"sememes\": [\n \"rash\"\n ],\n \"definitions\": \"in a restless manner unquietly\"\n}\n'''\n \ndef build_sentence_numpy(sentences):\n max_length = max([len(sentence) for sentence in sentences])\n sentence_numpy = np.zeros((len(sentences), max_length), dtype=np.int64)\n for i in range(len(sentences)):\n sentence_numpy[i, 0:len(sentences[i])] = np.array(sentences[i])\n return sentence_numpy\n \n\ndef label_multihot(labels, num):\n sm = np.zeros((len(labels), num), dtype=np.float32)\n for i in range(len(labels)):\n for s in labels[i]:\n if s >= num:\n break\n sm[i, s] = 1\n return sm\n \ndef my_collate_fn(batch):\n words = [instance['word'] for instance in batch]\n definition_words = [instance['definition_words'] for instance in batch]\n words_t = torch.tensor(np.array(words), dtype=torch.int64, device=device)\n definition_words_t = torch.tensor(build_sentence_numpy(definition_words), dtype=torch.int64, device=device)\n return words_t, definition_words_t\n \ndef word2feature(dataset, word_num, feature_num, feature_name):\n max_feature_num = max([len(instance[feature_name]) for instance in dataset])\n ret = np.zeros((word_num, max_feature_num), dtype=np.int64)\n ret.fill(feature_num)\n for instance in dataset:\n if ret[instance['word'], 0] != feature_num: \n continue # this target_words has been given a feature mapping, because same word with different definition in dataset\n feature = instance[feature_name]\n ret[instance['word'], :len(feature)] = np.array(feature)\n return torch.tensor(ret, dtype=torch.int64, device=device)\n \ndef mask_noFeature(label_size, wd2fea, feature_num):\n mask_nofea = torch.zeros(label_size, dtype=torch.float32, device=device)\n for i in range(label_size):\n feas = set(wd2fea[i].detach().cpu().numpy().tolist())-set([feature_num])\n if len(feas)==0:\n mask_nofea[i] = 1\n return mask_nofea"
] | [
[
"torch.zeros",
"torch.tensor",
"torch.device",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kazakh-shai/SlowFast | [
"279350f7ad2355015f2d8c8c02160f04c89c460e"
] | [
"slowfast/models/ptv_model_builder.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\n\n\"\"\"Video models using PyTorchVideo model builder.\"\"\"\n\nfrom functools import partial\nimport torch.nn as nn\n\nfrom slowfast.models.batchnorm_helper import get_norm\nfrom slowfast.models.video_model_builder import _POOL1, _TEMPORAL_KERNEL_BASIS\n\nfrom pytorchvideo.models.csn import create_csn\nfrom pytorchvideo.models.r2plus1d import (\n create_2plus1d_bottleneck_block,\n create_r2plus1d,\n)\nfrom pytorchvideo.models.resnet import create_bottleneck_block, create_resnet\nfrom pytorchvideo.models.slowfast import create_slowfast\nfrom pytorchvideo.models.x3d import (\n Swish,\n create_x3d,\n create_x3d_bottleneck_block,\n)\nfrom pytorchvideo.models.head import create_res_basic_head, create_res_roi_pooling_head\nfrom detectron2.layers import ROIAlign\nfrom .build import MODEL_REGISTRY\n\n\ndef get_head_act(act_func):\n \"\"\"\n Return the actual head activation function given the activation fucntion name.\n\n Args:\n act_func (string): activation function to use. 'softmax': applies\n softmax on the output. 'sigmoid': applies sigmoid on the output.\n Returns:\n nn.Module: the activation layer.\n \"\"\"\n if act_func == \"softmax\":\n return nn.Softmax(dim=1)\n elif act_func == \"sigmoid\":\n return nn.Sigmoid()\n else:\n raise NotImplementedError(\n \"{} is not supported as a head activation \"\n \"function.\".format(act_func)\n )\n\n\n@MODEL_REGISTRY.register()\nclass PTVResNet(nn.Module):\n \"\"\"\n ResNet models using PyTorchVideo model builder.\n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"\n The `__init__` method of any subclass should also contain these\n arguments.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n super(PTVResNet, self).__init__()\n\n assert (\n cfg.RESNET.STRIDE_1X1 is False\n ), \"STRIDE_1x1 must be True for PTVResNet\"\n assert (\n cfg.RESNET.TRANS_FUNC == \"bottleneck_transform\"\n ), f\"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVResNet\"\n assert cfg.MODEL.ARCH in [\n \"c2d\",\n \"slow\",\n \"i3d\",\n ], f\"Unsupported MODEL.ARCH type {cfg.MODEL.ARCH} for PTVResNet\"\n\n self.detection_mode = cfg.DETECTION.ENABLE\n self._construct_network(cfg)\n\n def _construct_network(self, cfg):\n \"\"\"\n Builds a single pathway ResNet model.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n\n # Params from configs.\n norm_module = get_norm(cfg)\n head_act = get_head_act(cfg.MODEL.HEAD_ACT)\n pool_size = _POOL1[cfg.MODEL.ARCH]\n num_groups = cfg.RESNET.NUM_GROUPS\n spatial_dilations = cfg.RESNET.SPATIAL_DILATIONS\n spatial_strides = cfg.RESNET.SPATIAL_STRIDES\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n stage1_pool = pool_size[0][0] != 1 or len(set(pool_size[0])) > 1\n stage_spatial_stride = (\n spatial_strides[0][0],\n spatial_strides[1][0],\n spatial_strides[2][0],\n spatial_strides[3][0],\n )\n if cfg.MODEL.ARCH == \"i3d\":\n stage_conv_a_kernel_size = (\n (3, 1, 1),\n [(3, 1, 1), (1, 1, 1)],\n [(3, 1, 1), (1, 1, 1)],\n [(1, 1, 1), (3, 1, 1)],\n )\n else:\n stage_conv_a_kernel_size = (\n (temp_kernel[1][0][0], 1, 1),\n (temp_kernel[2][0][0], 1, 1),\n (temp_kernel[3][0][0], 1, 1),\n (temp_kernel[4][0][0], 1, 1),\n )\n\n # Head from config\n if cfg.DETECTION.ENABLE:\n self.detection_head = create_res_roi_pooling_head(\n in_features=cfg.RESNET.WIDTH_PER_GROUP * 2**(4+1),\n out_features=cfg.MODEL.NUM_CLASSES,\n pool=nn.AvgPool3d,\n output_size=(1,1,1),\n pool_kernel_size= (\n cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1,\n ),\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n activation=None,\n output_with_global_average=False,\n pool_spatial=nn.MaxPool2d,\n resolution=[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2,\n spatial_scale=1.0/float(cfg.DETECTION.SPATIAL_SCALE_FACTOR),\n sampling_ratio=0,\n roi=ROIAlign,\n )\n\n self.model = create_resnet(\n # Input clip configs.\n input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],\n # Model configs.\n model_depth=cfg.RESNET.DEPTH,\n model_num_class=cfg.MODEL.NUM_CLASSES,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n # Normalization configs.\n norm=norm_module,\n # Activation configs.\n activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n # Stem configs.\n stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,\n stem_conv_kernel_size=(temp_kernel[0][0][0], 7, 7),\n stem_conv_stride=(1, 2, 2),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_size=(1, 3, 3),\n stem_pool_stride=(1, 2, 2),\n # Stage configs.\n stage1_pool=nn.MaxPool3d if stage1_pool else None,\n stage1_pool_kernel_size=pool_size[0],\n stage_conv_a_kernel_size=stage_conv_a_kernel_size,\n stage_conv_b_kernel_size=(\n (1, 3, 3),\n (1, 3, 3),\n (1, 3, 3),\n (1, 3, 3),\n ),\n stage_conv_b_num_groups=(\n num_groups,\n num_groups,\n num_groups,\n num_groups,\n ),\n stage_conv_b_dilation=(\n (1, spatial_dilations[0][0], spatial_dilations[0][0]),\n (1, spatial_dilations[1][0], spatial_dilations[1][0]),\n (1, spatial_dilations[2][0], spatial_dilations[2][0]),\n (1, spatial_dilations[3][0], spatial_dilations[3][0]),\n ),\n stage_spatial_h_stride=stage_spatial_stride,\n stage_spatial_w_stride=stage_spatial_stride,\n stage_temporal_stride=(1, 1, 1, 1),\n bottleneck=create_bottleneck_block,\n # Head configs.\n head=create_res_basic_head if not self.detection_mode else None,\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=(\n cfg.DATA.NUM_FRAMES // pool_size[0][0],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],\n ),\n head_activation=None,\n head_output_with_global_average=False,\n )\n\n self.post_act = head_act\n\n def forward(self, x, bboxes=None):\n x = x[0]\n x = self.model(x)\n if self.detection_mode:\n x = self.detection_head(x, bboxes)\n x = self.post_act(x)\n else:\n # Performs fully convlutional inference.\n if not self.training:\n x = self.post_act(x)\n x = x.mean([2, 3, 4])\n x = x.view(x.shape[0], -1)\n return x\n\n\n@MODEL_REGISTRY.register()\nclass PTVSlowFast(nn.Module):\n def __init__(self, cfg):\n \"\"\"\n The `__init__` method of any subclass should also contain these\n arguments.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n super(PTVSlowFast, self).__init__()\n\n assert (\n cfg.RESNET.STRIDE_1X1 is False\n ), \"STRIDE_1x1 must be True for PTVSlowFast\"\n assert (\n cfg.RESNET.TRANS_FUNC == \"bottleneck_transform\"\n ), f\"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVSlowFast\"\n\n self.detection_mode = cfg.DETECTION.ENABLE\n self._construct_network(cfg)\n\n def _construct_network(self, cfg):\n \"\"\"\n Builds a SlowFast model.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n _MODEL_STAGE_DEPTH = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}\n\n # Params from configs.\n norm_module = get_norm(cfg)\n pool_size = _POOL1[cfg.MODEL.ARCH]\n num_groups = cfg.RESNET.NUM_GROUPS\n width_per_group = cfg.RESNET.WIDTH_PER_GROUP\n spatial_dilations = cfg.RESNET.SPATIAL_DILATIONS\n spatial_strides = cfg.RESNET.SPATIAL_STRIDES\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n num_block_temp_kernel = cfg.RESNET.NUM_BLOCK_TEMP_KERNEL\n stage_depth = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]\n\n stage_conv_a_kernel_sizes = [[], []]\n for pathway in range(2):\n for stage in range(4):\n stage_conv_a_kernel_sizes[pathway].append(\n ((temp_kernel[stage + 1][pathway][0], 1, 1),)\n * num_block_temp_kernel[stage][pathway]\n + ((1, 1, 1),)\n * (\n stage_depth[stage]\n - num_block_temp_kernel[stage][pathway]\n )\n )\n\n # Head from config\n # Number of stages = 4\n stage_dim_in = cfg.RESNET.WIDTH_PER_GROUP * 2**(4+1)\n head_in_features = stage_dim_in\n for reduction_ratio in cfg.SLOWFAST.BETA_INV:\n head_in_features = head_in_features + stage_dim_in // reduction_ratio\n\n if cfg.DETECTION.ENABLE:\n self.detection_head = create_res_roi_pooling_head(\n in_features=head_in_features,\n out_features=cfg.MODEL.NUM_CLASSES,\n pool=None,\n output_size=(1,1,1),\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n activation=None,\n output_with_global_average=False,\n pool_spatial=nn.MaxPool2d,\n resolution=[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2,\n spatial_scale=1.0/float(cfg.DETECTION.SPATIAL_SCALE_FACTOR),\n sampling_ratio=0,\n roi=ROIAlign,\n )\n head_pool_kernel_sizes = (\n (\n cfg.DATA.NUM_FRAMES\n // cfg.SLOWFAST.ALPHA\n // pool_size[0][0],\n 1,\n 1,\n ),\n (cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1),\n )\n else:\n head_pool_kernel_sizes = (\n (\n cfg.DATA.NUM_FRAMES\n // cfg.SLOWFAST.ALPHA\n // pool_size[0][0],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][1],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[0][2],\n ),\n (\n cfg.DATA.NUM_FRAMES // pool_size[1][0],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][1],\n cfg.DATA.TRAIN_CROP_SIZE // 32 // pool_size[1][2],\n ),\n )\n\n self.model = create_slowfast(\n # SlowFast configs.\n slowfast_channel_reduction_ratio=cfg.SLOWFAST.BETA_INV,\n slowfast_conv_channel_fusion_ratio=cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n slowfast_fusion_conv_kernel_size=(\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n 1,\n 1,\n ),\n slowfast_fusion_conv_stride=(cfg.SLOWFAST.ALPHA, 1, 1),\n # Input clip configs.\n input_channels=cfg.DATA.INPUT_CHANNEL_NUM,\n # Model configs.\n model_depth=cfg.RESNET.DEPTH,\n model_num_class=cfg.MODEL.NUM_CLASSES,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n # Normalization configs.\n norm=norm_module,\n # Activation configs.\n activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n # Stem configs.\n stem_dim_outs=(\n width_per_group,\n width_per_group // cfg.SLOWFAST.BETA_INV,\n ),\n stem_conv_kernel_sizes=(\n (temp_kernel[0][0][0], 7, 7),\n (temp_kernel[0][1][0], 7, 7),\n ),\n stem_conv_strides=((1, 2, 2), (1, 2, 2)),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_sizes=((1, 3, 3), (1, 3, 3)),\n stem_pool_strides=((1, 2, 2), (1, 2, 2)),\n # Stage configs.\n stage_conv_a_kernel_sizes=stage_conv_a_kernel_sizes,\n stage_conv_b_kernel_sizes=(\n ((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),\n ((1, 3, 3), (1, 3, 3), (1, 3, 3), (1, 3, 3)),\n ),\n stage_conv_b_num_groups=(\n (num_groups, num_groups, num_groups, num_groups),\n (num_groups, num_groups, num_groups, num_groups),\n ),\n stage_conv_b_dilations=(\n (\n (1, spatial_dilations[0][0], spatial_dilations[0][0]),\n (1, spatial_dilations[1][0], spatial_dilations[1][0]),\n (1, spatial_dilations[2][0], spatial_dilations[2][0]),\n (1, spatial_dilations[3][0], spatial_dilations[3][0]),\n ),\n (\n (1, spatial_dilations[0][1], spatial_dilations[0][1]),\n (1, spatial_dilations[1][1], spatial_dilations[1][1]),\n (1, spatial_dilations[1][1], spatial_dilations[1][1]),\n (1, spatial_dilations[1][1], spatial_dilations[1][1]),\n ),\n ),\n stage_spatial_strides=(\n (\n spatial_strides[0][0],\n spatial_strides[1][0],\n spatial_strides[2][0],\n spatial_strides[3][0],\n ),\n (\n spatial_strides[0][1],\n spatial_strides[1][1],\n spatial_strides[2][1],\n spatial_strides[3][1],\n ),\n ),\n stage_temporal_strides=((1, 1, 1, 1), (1, 1, 1, 1)),\n bottleneck=create_bottleneck_block,\n # Head configs.\n head=create_res_basic_head if not self.detection_mode else None,\n head_pool=nn.AvgPool3d,\n head_pool_kernel_sizes=head_pool_kernel_sizes,\n head_activation=None,\n head_output_with_global_average=False,\n )\n\n self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)\n\n def forward(self, x, bboxes=None):\n x = self.model(x)\n if self.detection_mode:\n x = self.detection_head(x, bboxes)\n x = self.post_act(x)\n else:\n # Performs fully convlutional inference.\n if not self.training:\n x = self.post_act(x)\n x = x.mean([2, 3, 4])\n x = x.view(x.shape[0], -1)\n return x\n\n\n@MODEL_REGISTRY.register()\nclass PTVX3D(nn.Module):\n def __init__(self, cfg):\n \"\"\"\n The `__init__` method of any subclass should also contain these\n arguments.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n super(PTVX3D, self).__init__()\n\n assert (\n cfg.RESNET.STRIDE_1X1 is False\n ), \"STRIDE_1x1 must be True for PTVX3D\"\n assert (\n cfg.RESNET.TRANS_FUNC == \"x3d_transform\"\n ), f\"Unsupported TRANS_FUNC type {cfg.RESNET.TRANS_FUNC} for PTVX3D\"\n assert (\n cfg.DETECTION.ENABLE is False\n ), \"Detection model is not supported for PTVX3D yet.\"\n\n self._construct_network(cfg)\n\n def _construct_network(self, cfg):\n \"\"\"\n Builds a X3D model.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n\n # Params from configs.\n norm_module = get_norm(cfg)\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n\n self.model = create_x3d(\n # Input clip configs.\n input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],\n input_clip_length=cfg.DATA.NUM_FRAMES,\n input_crop_size=cfg.DATA.TRAIN_CROP_SIZE,\n # Model configs.\n model_num_class=cfg.MODEL.NUM_CLASSES,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n width_factor=cfg.X3D.WIDTH_FACTOR,\n depth_factor=cfg.X3D.DEPTH_FACTOR,\n # Normalization configs.\n norm=norm_module,\n norm_eps=1e-5,\n norm_momentum=0.1,\n # Activation configs.\n activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n # Stem configs.\n stem_dim_in=cfg.X3D.DIM_C1,\n stem_conv_kernel_size=(temp_kernel[0][0][0], 3, 3),\n stem_conv_stride=(1, 2, 2),\n # Stage configs.\n stage_conv_kernel_size=(\n (temp_kernel[1][0][0], 3, 3),\n (temp_kernel[2][0][0], 3, 3),\n (temp_kernel[3][0][0], 3, 3),\n (temp_kernel[4][0][0], 3, 3),\n ),\n stage_spatial_stride=(2, 2, 2, 2),\n stage_temporal_stride=(1, 1, 1, 1),\n bottleneck=create_x3d_bottleneck_block,\n bottleneck_factor=cfg.X3D.BOTTLENECK_FACTOR,\n se_ratio=0.0625,\n inner_act=Swish,\n # Head configs.\n head_dim_out=cfg.X3D.DIM_C5,\n head_pool_act=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n head_bn_lin5_on=cfg.X3D.BN_LIN5,\n head_activation=None,\n head_output_with_global_average=False,\n )\n\n self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)\n\n def forward(self, x, bboxes=None):\n x = x[0]\n x = self.model(x)\n # Performs fully convlutional inference.\n if not self.training:\n x = self.post_act(x)\n x = x.mean([2, 3, 4])\n\n x = x.reshape(x.shape[0], -1)\n return x\n\n\n@MODEL_REGISTRY.register()\nclass PTVCSN(nn.Module):\n \"\"\"\n CSN models using PyTorchVideo model builder.\n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"\n The `__init__` method of any subclass should also contain these\n arguments.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n super(PTVCSN, self).__init__()\n\n assert (\n cfg.DETECTION.ENABLE is False\n ), \"Detection model is not supported for PTVCSN yet.\"\n\n self._construct_network(cfg)\n\n def _construct_network(self, cfg):\n \"\"\"\n Builds a single pathway ResNet model.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n\n # Params from configs.\n norm_module = get_norm(cfg)\n\n self.model = create_csn(\n # Input clip configs.\n input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],\n # Model configs.\n model_depth=cfg.RESNET.DEPTH,\n model_num_class=cfg.MODEL.NUM_CLASSES,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n # Normalization configs.\n norm=norm_module,\n # Activation configs.\n activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n # Stem configs.\n stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,\n stem_conv_kernel_size=(3, 7, 7),\n stem_conv_stride=(1, 2, 2),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_size=(1, 3, 3),\n stem_pool_stride=(1, 2, 2),\n # Stage configs.\n stage_conv_a_kernel_size=(1, 1, 1),\n stage_conv_b_kernel_size=(3, 3, 3),\n stage_conv_b_width_per_group=1,\n stage_spatial_stride=(1, 2, 2, 2),\n stage_temporal_stride=(1, 2, 2, 2),\n bottleneck=create_bottleneck_block,\n # Head configs.\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=(\n cfg.DATA.NUM_FRAMES // 8,\n cfg.DATA.TRAIN_CROP_SIZE // 32,\n cfg.DATA.TRAIN_CROP_SIZE // 32,\n ),\n head_activation=None,\n head_output_with_global_average=False,\n )\n\n self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)\n\n def forward(self, x, bboxes=None):\n x = x[0]\n x = self.model(x)\n # Performs fully convlutional inference.\n if not self.training:\n x = self.post_act(x)\n x = x.mean([2, 3, 4])\n\n x = x.reshape(x.shape[0], -1)\n return x\n\n\n@MODEL_REGISTRY.register()\nclass PTVR2plus1D(nn.Module):\n \"\"\"\n R(2+1)D models using PyTorchVideo model builder.\n \"\"\"\n\n def __init__(self, cfg):\n \"\"\"\n The `__init__` method of any subclass should also contain these\n arguments.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n super(PTVR2plus1D, self).__init__()\n\n assert (\n cfg.DETECTION.ENABLE is False\n ), \"Detection model is not supported for PTVR2plus1D yet.\"\n\n self._construct_network(cfg)\n\n def _construct_network(self, cfg):\n \"\"\"\n Builds a single pathway R(2+1)D model.\n\n Args:\n cfg (CfgNode): model building configs, details are in the\n comments of the config file.\n \"\"\"\n self.model = create_r2plus1d(\n # Input clip configs.\n input_channel=cfg.DATA.INPUT_CHANNEL_NUM[0],\n # Model configs.\n model_depth=cfg.RESNET.DEPTH,\n model_num_class=cfg.MODEL.NUM_CLASSES,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n # Normalization configs.\n norm=get_norm(cfg),\n norm_eps=1e-5,\n norm_momentum=0.1,\n # Activation configs.\n activation=partial(nn.ReLU, inplace=cfg.RESNET.INPLACE_RELU),\n # Stem configs.\n stem_dim_out=cfg.RESNET.WIDTH_PER_GROUP,\n stem_conv_kernel_size=(1, 7, 7),\n stem_conv_stride=(1, 2, 2),\n # Stage configs.\n stage_conv_a_kernel_size=(\n (1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),\n ),\n stage_conv_b_kernel_size=(\n (3, 3, 3),\n (3, 3, 3),\n (3, 3, 3),\n (3, 3, 3),\n ),\n stage_conv_b_num_groups=(1, 1, 1, 1),\n stage_conv_b_dilation=(\n (1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),\n (1, 1, 1),\n ),\n stage_spatial_stride=(2, 2, 2, 2),\n stage_temporal_stride=(1, 1, 2, 2),\n stage_bottleneck=(\n create_2plus1d_bottleneck_block,\n create_2plus1d_bottleneck_block,\n create_2plus1d_bottleneck_block,\n create_2plus1d_bottleneck_block,\n ),\n # Head configs.\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=(\n cfg.DATA.NUM_FRAMES // 4,\n cfg.DATA.TRAIN_CROP_SIZE // 32,\n cfg.DATA.TRAIN_CROP_SIZE // 32,\n ),\n head_activation=None,\n head_output_with_global_average=False,\n )\n\n self.post_act = get_head_act(cfg.MODEL.HEAD_ACT)\n\n def forward(self, x, bboxes=None):\n x = x[0]\n x = self.model(x)\n # Performs fully convlutional inference.\n if not self.training:\n x = self.post_act(x)\n x = x.mean([2, 3, 4])\n\n x = x.view(x.shape[0], -1)\n return x\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Sigmoid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
q-hwang/tacotron2 | [
"002e12fe968e1f3757de658de8e2975b8f7d43e3"
] | [
"train.py"
] | [
"import os\nimport time\nimport argparse\nimport math\nfrom numpy import finfo\n\nimport torch\nfrom distributed import DistributedDataParallel\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.nn import DataParallel\nfrom torch.utils.data import DataLoader\n\nfrom fp16_optimizer import FP16_Optimizer\n\nfrom model import Tacotron2\nfrom data_utils import TextMelLoader, TextMelCollate\nfrom loss_function import Tacotron2Loss\nfrom logger import Tacotron2Logger\nfrom hparams import create_hparams\n\n\ndef batchnorm_to_float(module):\n \"\"\"Converts batch norm modules to FP32\"\"\"\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n module.float()\n for child in module.children():\n batchnorm_to_float(child)\n return module\n\n\ndef reduce_tensor(tensor, num_gpus):\n rt = tensor.clone()\n torch.distributed.all_reduce(rt, op=torch.distributed.reduce_op.SUM)\n rt /= num_gpus\n return rt\n\n\ndef init_distributed(hparams, n_gpus, rank, group_name):\n assert torch.cuda.is_available(), \"Distributed mode requires CUDA.\"\n print(\"Initializing distributed\")\n # Set cuda device so everything is done on the right GPU.\n torch.cuda.set_device(rank % torch.cuda.device_count())\n\n # Initialize distributed communication\n torch.distributed.init_process_group(\n backend=hparams.dist_backend, init_method=hparams.dist_url,\n world_size=n_gpus, rank=rank, group_name=group_name)\n\n print(\"Done initializing distributed\")\n\n\ndef prepare_dataloaders(hparams):\n # Get data, data loaders and collate function ready\n trainset = TextMelLoader(hparams.training_files, hparams)\n valset = TextMelLoader(hparams.validation_files, hparams)\n collate_fn = TextMelCollate(hparams.n_frames_per_step)\n\n train_sampler = DistributedSampler(trainset) \\\n if hparams.distributed_run else None\n\n train_loader = DataLoader(trainset, num_workers=0, shuffle=True,\n sampler=train_sampler,\n batch_size=hparams.batch_size, pin_memory=False,\n drop_last=True, collate_fn=collate_fn)\n return train_loader, valset, collate_fn\n\n\ndef prepare_directories_and_logger(output_directory, log_directory, rank):\n if rank == 0:\n if not os.path.isdir(output_directory):\n os.makedirs(output_directory)\n os.chmod(output_directory, 0o775)\n logger = Tacotron2Logger(os.path.join(output_directory, log_directory))\n else:\n logger = None\n return logger\n\n\ndef load_model(hparams):\n \n model = Tacotron2(hparams).to(torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n if hparams.fp16_run:\n model = batchnorm_to_float(model.half())\n model.decoder.attention_layer.score_mask_value = float(finfo('float16').min)\n\n if hparams.distributed_run:\n model = DistributedDataParallel(model)\n elif torch.cuda.device_count() > 1:\n model = DataParallel(model)\n\n return model\n\n\ndef warm_start_model(checkpoint_path, model):\n assert os.path.isfile(checkpoint_path)\n print(\"Warm starting model from checkpoint '{}'\".format(checkpoint_path))\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(checkpoint_dict['state_dict'])\n return model\n\n\ndef load_checkpoint(checkpoint_path, model, optimizer):\n assert os.path.isfile(checkpoint_path)\n print(\"Loading checkpoint '{}'\".format(checkpoint_path))\n checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n model.load_state_dict(checkpoint_dict['state_dict'])\n optimizer.load_state_dict(checkpoint_dict['optimizer'])\n learning_rate = checkpoint_dict['learning_rate']\n iteration = checkpoint_dict['iteration']\n print(\"Loaded checkpoint '{}' from iteration {}\" .format(\n checkpoint_path, iteration))\n return model, optimizer, learning_rate, iteration\n\n\ndef save_checkpoint(model, optimizer, learning_rate, iteration, filepath):\n print(\"Saving model and optimizer state at iteration {} to {}\".format(\n iteration, filepath))\n torch.save({'iteration': iteration,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'learning_rate': learning_rate}, filepath)\n\n\ndef validate(model, criterion, valset, iteration, batch_size, n_gpus,\n collate_fn, logger, distributed_run, rank):\n \"\"\"Handles all the validation scoring and printing\"\"\"\n model.eval()\n with torch.no_grad():\n val_sampler = DistributedSampler(valset) if distributed_run else None\n val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,\n shuffle=False, batch_size=batch_size,\n pin_memory=False, collate_fn=collate_fn)\n\n val_loss = 0.0\n if distributed_run or torch.cuda.device_count() > 1:\n batch_parser = model.module.parse_batch\n else:\n batch_parser = model.parse_batch\n\n for i, batch in enumerate(val_loader):\n x, y = batch_parser(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n reduced_val_loss = reduce_tensor(loss.data, n_gpus)[0] \\\n if distributed_run else loss.data[0]\n val_loss += reduced_val_loss\n val_loss = val_loss / (i + 1)\n\n model.train()\n return val_loss\n\n\ndef train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,\n rank, group_name, hparams):\n \"\"\"Training and validation logging results to tensorboard and stdout\n\n Params\n ------\n output_directory (string): directory to save checkpoints\n log_directory (string) directory to save tensorboard logs\n checkpoint_path(string): checkpoint path\n n_gpus (int): number of gpus\n rank (int): rank of current gpu\n hparams (object): comma separated list of \"name=value\" pairs.\n \"\"\"\n if hparams.distributed_run:\n init_distributed(hparams, n_gpus, rank, group_name)\n\n torch.manual_seed(hparams.seed)\n torch.cuda.manual_seed(hparams.seed)\n print(\"Loading models...\")\n model = load_model(hparams)\n\n print(\"Initializing optimizer...\")\n learning_rate = hparams.learning_rate\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=hparams.weight_decay)\n if hparams.fp16_run:\n optimizer = FP16_Optimizer(\n optimizer, dynamic_loss_scale=hparams.dynamic_loss_scaling)\n\n criterion = Tacotron2Loss()\n\n print(\"Initializing logger...\")\n logger = prepare_directories_and_logger(\n output_directory, log_directory, rank)\n\n print(\"Initializing dataloader...\")\n train_loader, valset, collate_fn = prepare_dataloaders(hparams)\n\n print(\"Loading checkpoints...\")\n # Load checkpoint if one exists\n iteration = 0\n epoch_offset = 0\n if checkpoint_path is not None:\n if warm_start:\n model = warm_start_model(checkpoint_path, model)\n else:\n model, optimizer, _learning_rate, iteration = load_checkpoint(\n checkpoint_path, model, optimizer)\n if hparams.use_saved_learning_rate:\n learning_rate = _learning_rate\n\n iteration += 1 # next iteration is iteration + 1\n epoch_offset = max(0, int(iteration / len(train_loader)))\n\n model.train()\n if hparams.distributed_run or torch.cuda.device_count() > 1:\n batch_parser = model.module.parse_batch\n else:\n batch_parser = model.parse_batch\n # ================ MAIN TRAINNIG LOOP! ===================\n for epoch in range(epoch_offset, hparams.epochs):\n print(\"Epoch: {}\".format(epoch))\n for i, batch in enumerate(train_loader):\n start = time.perf_counter()\n for param_group in optimizer.param_groups:\n param_group['lr'] = learning_rate\n\n model.zero_grad()\n x, y = batch_parser(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n reduced_loss = reduce_tensor(loss.data, n_gpus)[0] \\\n if hparams.distributed_run else loss.data[0]\n\n if hparams.fp16_run:\n optimizer.backward(loss)\n grad_norm = optimizer.clip_fp32_grads(hparams.grad_clip_thresh)\n else:\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm(\n model.parameters(), hparams.grad_clip_thresh)\n\n optimizer.step()\n\n overflow = optimizer.overflow if hparams.fp16_run else False\n\n if not overflow and not math.isnan(reduced_loss) and rank == 0:\n duration = time.perf_counter() - start\n print(\"Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it\".format(\n iteration, reduced_loss, grad_norm, duration))\n\n logger.log_training(\n reduced_loss, grad_norm, learning_rate, duration, iteration)\n\n if not overflow and (iteration % hparams.iters_per_checkpoint == 0):\n reduced_val_loss = validate(\n model, criterion, valset, iteration, hparams.batch_size,\n n_gpus, collate_fn, logger, hparams.distributed_run, rank)\n\n if rank == 0:\n print(\"Validation loss {}: {:9f} \".format(\n iteration, reduced_val_loss))\n\n checkpoint_path = os.path.join(\n output_directory, \"checkpoint_{}\".format(iteration))\n save_checkpoint(model, optimizer, learning_rate, iteration,\n checkpoint_path)\n logger.log_validation(\n reduced_val_loss, model, x, y, y_pred, iteration,hparams)\n\n iteration += 1\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output_directory', type=str,\n help='directory to save checkpoints')\n parser.add_argument('-l', '--log_directory', type=str,\n help='directory to save tensorboard logs')\n parser.add_argument('-c', '--checkpoint_path', type=str, default=None,\n required=False, help='checkpoint path')\n parser.add_argument('--warm_start', action='store_true',\n help='load the model only (warm start)')\n parser.add_argument('--n_gpus', type=int, default=1,\n required=False, help='number of gpus')\n parser.add_argument('--rank', type=int, default=0,\n required=False, help='rank of current gpu')\n parser.add_argument('--group_name', type=str, default='group_name',\n required=False, help='Distributed group name')\n parser.add_argument('--hparams', type=str,\n required=False, help='comma separated name=value pairs')\n\n args = parser.parse_args()\n hparams = create_hparams(args.hparams)\n\n torch.backends.cudnn.enabled = hparams.cudnn_enabled\n torch.backends.cudnn.benchmark = hparams.cudnn_benchmark\n\n print(\"FP16 Run:\", hparams.fp16_run)\n print(\"Dynamic Loss Scaling:\", hparams.dynamic_loss_scaling)\n print(\"Distributed Run:\", hparams.distributed_run)\n print(\"cuDNN Enabled:\", hparams.cudnn_enabled)\n print(\"cuDNN Benchmark:\", hparams.cudnn_benchmark)\n\n train(args.output_directory, args.log_directory, args.checkpoint_path,\n args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.manual_seed",
"torch.load",
"torch.utils.data.distributed.DistributedSampler",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.finfo",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.device_count",
"torch.distributed.all_reduce"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tretherington/scipy | [
"6314c1c7d789d3a8323178cd91f581b589b2f11c"
] | [
"scipy/signal/wavelets.py"
] | [
"import numpy as np\nfrom numpy.dual import eig\nfrom scipy.special import comb\nfrom scipy.signal import convolve\n\n__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'morlet2', 'cwt']\n\n\ndef daub(p):\n \"\"\"\n The coefficients for the FIR low-pass filter producing Daubechies wavelets.\n\n p>=1 gives the order of the zero at f=1/2.\n There are 2p filter coefficients.\n\n Parameters\n ----------\n p : int\n Order of the zero at f=1/2, can have values from 1 to 34.\n\n Returns\n -------\n daub : ndarray\n Return\n\n \"\"\"\n sqrt = np.sqrt\n if p < 1:\n raise ValueError(\"p must be at least 1.\")\n if p == 1:\n c = 1 / sqrt(2)\n return np.array([c, c])\n elif p == 2:\n f = sqrt(2) / 8\n c = sqrt(3)\n return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])\n elif p == 3:\n tmp = 12 * sqrt(10)\n z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6\n z1c = np.conj(z1)\n f = sqrt(2) / 8\n d0 = np.real((1 - z1) * (1 - z1c))\n a0 = np.real(z1 * z1c)\n a1 = 2 * np.real(z1)\n return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,\n a0 - 3 * a1 + 3, 3 - a1, 1])\n elif p < 35:\n # construct polynomial and factor it\n if p < 35:\n P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]\n yj = np.roots(P)\n else: # try different polynomial --- needs work\n P = [comb(p - 1 + k, k, exact=1) / 4.0**k\n for k in range(p)][::-1]\n yj = np.roots(P) / 4\n # for each root, compute two z roots, select the one with |z|>1\n # Build up final polynomial\n c = np.poly1d([1, 1])**p\n q = np.poly1d([1])\n for k in range(p - 1):\n yval = yj[k]\n part = 2 * sqrt(yval * (yval - 1))\n const = 1 - 2 * yval\n z1 = const + part\n if (abs(z1)) < 1:\n z1 = const - part\n q = q * [1, -z1]\n\n q = c * np.real(q)\n # Normalize result\n q = q / np.sum(q) * sqrt(2)\n return q.c[::-1]\n else:\n raise ValueError(\"Polynomial factorization does not work \"\n \"well for p too large.\")\n\n\ndef qmf(hk):\n \"\"\"\n Return high-pass qmf filter from low-pass\n\n Parameters\n ----------\n hk : array_like\n Coefficients of high-pass filter.\n\n \"\"\"\n N = len(hk) - 1\n asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]\n return hk[::-1] * np.array(asgn)\n\n\ndef cascade(hk, J=7):\n \"\"\"\n Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.\n\n Parameters\n ----------\n hk : array_like\n Coefficients of low-pass filter.\n J : int, optional\n Values will be computed at grid points ``K/2**J``. Default is 7.\n\n Returns\n -------\n x : ndarray\n The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where\n ``len(hk) = len(gk) = N+1``.\n phi : ndarray\n The scaling function ``phi(x)`` at `x`:\n ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.\n psi : ndarray, optional\n The wavelet function ``psi(x)`` at `x`:\n ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.\n `psi` is only returned if `gk` is not None.\n\n Notes\n -----\n The algorithm uses the vector cascade algorithm described by Strang and\n Nguyen in \"Wavelets and Filter Banks\". It builds a dictionary of values\n and slices for quick reuse. Then inserts vectors into final vector at the\n end.\n\n \"\"\"\n N = len(hk) - 1\n\n if (J > 30 - np.log2(N + 1)):\n raise ValueError(\"Too many levels.\")\n if (J < 1):\n raise ValueError(\"Too few levels.\")\n\n # construct matrices needed\n nn, kk = np.ogrid[:N, :N]\n s2 = np.sqrt(2)\n # append a zero so that take works\n thk = np.r_[hk, 0]\n gk = qmf(hk)\n tgk = np.r_[gk, 0]\n\n indx1 = np.clip(2 * nn - kk, -1, N + 1)\n indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)\n m = np.zeros((2, 2, N, N), 'd')\n m[0, 0] = np.take(thk, indx1, 0)\n m[0, 1] = np.take(thk, indx2, 0)\n m[1, 0] = np.take(tgk, indx1, 0)\n m[1, 1] = np.take(tgk, indx2, 0)\n m *= s2\n\n # construct the grid of points\n x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)\n phi = 0 * x\n\n psi = 0 * x\n\n # find phi0, and phi1\n lam, v = eig(m[0, 0])\n ind = np.argmin(np.absolute(lam - 1))\n # a dictionary with a binary representation of the\n # evaluation points x < 1 -- i.e. position is 0.xxxx\n v = np.real(v[:, ind])\n # need scaling function to integrate to 1 so find\n # eigenvector normalized to sum(v,axis=0)=1\n sm = np.sum(v)\n if sm < 0: # need scaling function to integrate to 1\n v = -v\n sm = -sm\n bitdic = {'0': v / sm}\n bitdic['1'] = np.dot(m[0, 1], bitdic['0'])\n step = 1 << J\n phi[::step] = bitdic['0']\n phi[(1 << (J - 1))::step] = bitdic['1']\n psi[::step] = np.dot(m[1, 0], bitdic['0'])\n psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])\n # descend down the levels inserting more and more values\n # into bitdic -- store the values in the correct location once we\n # have computed them -- stored in the dictionary\n # for quicker use later.\n prevkeys = ['1']\n for level in range(2, J + 1):\n newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]\n fac = 1 << (J - level)\n for key in newkeys:\n # convert key to number\n num = 0\n for pos in range(level):\n if key[pos] == '1':\n num += (1 << (level - 1 - pos))\n pastphi = bitdic[key[1:]]\n ii = int(key[0])\n temp = np.dot(m[0, ii], pastphi)\n bitdic[key] = temp\n phi[num * fac::step] = temp\n psi[num * fac::step] = np.dot(m[1, ii], pastphi)\n prevkeys = newkeys\n\n return x, phi, psi\n\n\ndef morlet(M, w=5.0, s=1.0, complete=True):\n \"\"\"\n Complex Morlet wavelet.\n\n Parameters\n ----------\n M : int\n Length of the wavelet.\n w : float, optional\n Omega0. Default is 5\n s : float, optional\n Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.\n complete : bool, optional\n Whether to use the complete or the standard version.\n\n Returns\n -------\n morlet : (M,) ndarray\n\n See Also\n --------\n morlet2 : Implementation of Morlet wavelet, compatible with `cwt`.\n scipy.signal.gausspulse\n\n Notes\n -----\n The standard version::\n\n pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))\n\n This commonly used wavelet is often referred to simply as the\n Morlet wavelet. Note that this simplified version can cause\n admissibility problems at low values of `w`.\n\n The complete version::\n\n pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))\n\n This version has a correction\n term to improve admissibility. For `w` greater than 5, the\n correction term is negligible.\n\n Note that the energy of the return wavelet is not normalised\n according to `s`.\n\n The fundamental frequency of this wavelet in Hz is given\n by ``f = 2*s*w*r / M`` where `r` is the sampling rate.\n\n Note: This function was created before `cwt` and is not compatible\n with it.\n\n \"\"\"\n x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)\n output = np.exp(1j * w * x)\n\n if complete:\n output -= np.exp(-0.5 * (w**2))\n\n output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)\n\n return output\n\n\ndef ricker(points, a):\n \"\"\"\n Return a Ricker wavelet, also known as the \"Mexican hat wavelet\".\n\n It models the function:\n\n ``A * (1 - (x/a)**2) * exp(-0.5*(x/a)**2)``,\n\n where ``A = 2/(sqrt(3*a)*(pi**0.25))``.\n\n Parameters\n ----------\n points : int\n Number of points in `vector`.\n Will be centered around 0.\n a : scalar\n Width parameter of the wavelet.\n\n Returns\n -------\n vector : (N,) ndarray\n Array of length `points` in shape of ricker curve.\n\n Examples\n --------\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n >>> points = 100\n >>> a = 4.0\n >>> vec2 = signal.ricker(points, a)\n >>> print(len(vec2))\n 100\n >>> plt.plot(vec2)\n >>> plt.show()\n\n \"\"\"\n A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))\n wsq = a**2\n vec = np.arange(0, points) - (points - 1.0) / 2\n xsq = vec**2\n mod = (1 - xsq / wsq)\n gauss = np.exp(-xsq / (2 * wsq))\n total = A * mod * gauss\n return total\n\n\ndef morlet2(M, s, w=5):\n \"\"\"\n Complex Morlet wavelet, designed to work with `cwt`.\n\n Returns the complete version of morlet wavelet, normalised\n according to `s`::\n\n exp(1j*w*x/s) * exp(-0.5*(x/s)**2) * pi**(-0.25) * sqrt(1/s)\n\n Parameters\n ----------\n M : int\n Length of the wavelet.\n s : float\n Width parameter of the wavelet.\n w : float, optional\n Omega0. Default is 5\n\n Returns\n -------\n morlet : (M,) ndarray\n\n See Also\n --------\n morlet : Implementation of Morlet wavelet, incompatible with `cwt`\n\n Notes\n -----\n\n .. versionadded:: 1.4.0\n\n This function was designed to work with `cwt`. Because `morlet2`\n returns an array of complex numbers, the `dtype` argument of `cwt`\n should be set to `complex128` for best results.\n\n Note the difference in implementation with `morlet`.\n The fundamental frequency of this wavelet in Hz is given by::\n\n f = w*fs / (2*s*np.pi)\n\n where ``fs`` is the sampling rate and `s` is the wavelet width parameter.\n Similarly we can get the wavelet width parameter at ``f``::\n\n s = w*fs / (2*f*np.pi)\n\n Examples\n --------\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n\n >>> M = 100\n >>> s = 4.0\n >>> w = 2.0\n >>> wavelet = signal.morlet2(M, s, w)\n >>> plt.plot(abs(wavelet))\n >>> plt.show()\n\n This example shows basic use of `morlet2` with `cwt` in time-frequency\n analysis:\n\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t, dt = np.linspace(0, 1, 200, retstep=True)\n >>> fs = 1/dt\n >>> w = 6.\n >>> sig = np.cos(2*np.pi*(50 + 10*t)*t) + np.sin(40*np.pi*t)\n >>> freq = np.linspace(1, fs/2, 100)\n >>> widths = w*fs / (2*freq*np.pi)\n >>> cwtm = signal.cwt(sig, signal.morlet2, widths, w=w)\n >>> plt.pcolormesh(t, freq, np.abs(cwtm), cmap='viridis')\n >>> plt.show()\n\n \"\"\"\n x = np.arange(0, M) - (M - 1.0) / 2\n x = x / s\n wavelet = np.exp(1j * w * x) * np.exp(-0.5 * x**2) * np.pi**(-0.25)\n output = np.sqrt(1/s) * wavelet\n return output\n\n\ndef cwt(data, wavelet, widths, dtype=None, **kwargs):\n \"\"\"\n Continuous wavelet transform.\n\n Performs a continuous wavelet transform on `data`,\n using the `wavelet` function. A CWT performs a convolution\n with `data` using the `wavelet` function, which is characterized\n by a width parameter and length parameter. The `wavelet` function\n is allowed to be complex.\n\n Parameters\n ----------\n data : (N,) ndarray\n data on which to perform the transform.\n wavelet : function\n Wavelet function, which should take 2 arguments.\n The first argument is the number of points that the returned vector\n will have (len(wavelet(length,width)) == length).\n The second is a width parameter, defining the size of the wavelet\n (e.g. standard deviation of a gaussian). See `ricker`, which\n satisfies these requirements.\n widths : (M,) sequence\n Widths to use for transform.\n dtype : data-type, optional\n The desired data type of output. Defaults to ``float64`` if the\n output of `wavelet` is real and ``complex128`` if it is complex.\n \n .. versionadded:: 1.4.0\n \n kwargs\n Keyword arguments passed to wavelet function.\n \n .. versionadded:: 1.4.0\n\n Returns\n -------\n cwt: (M, N) ndarray\n Will have shape of (len(widths), len(data)).\n\n Notes\n -----\n\n .. versionadded:: 1.4.0\n\n For non-symmetric, complex-valued wavelets, the input signal is convolved\n with the time-reversed complex-conjugate of the wavelet data [1]. \n\n ::\n\n length = min(10 * width[ii], len(data))\n cwt[ii,:] = signal.convolve(data, np.conj(wavelet(length, width[ii],\n **kwargs))[::-1], mode='same')\n\n References\n ----------\n .. [1] S. Mallat, \"A Wavelet Tour of Signal Processing (3rd Edition)\",\n Academic Press, 2009.\n\n Examples\n --------\n >>> from scipy import signal\n >>> import matplotlib.pyplot as plt\n >>> t = np.linspace(-1, 1, 200, endpoint=False)\n >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)\n >>> widths = np.arange(1, 31)\n >>> cwtmatr = signal.cwt(sig, signal.ricker, widths)\n >>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',\n ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())\n >>> plt.show()\n \"\"\"\n # Determine output type\n if dtype is None:\n if np.asarray(wavelet(1, widths[0], **kwargs)).dtype.char in 'FDG':\n dtype = np.complex128\n else:\n dtype = np.float64\n\n output = np.zeros((len(widths), len(data)), dtype=dtype)\n for ind, width in enumerate(widths):\n N = np.min([10 * width, len(data)])\n wavelet_data = np.conj(wavelet(N, width, **kwargs)[::-1])\n output[ind] = convolve(data, wavelet_data, mode='same')\n return output\n"
] | [
[
"numpy.dot",
"numpy.poly1d",
"numpy.take",
"numpy.sqrt",
"numpy.linspace",
"numpy.exp",
"numpy.dual.eig",
"numpy.clip",
"numpy.arange",
"numpy.roots",
"numpy.real",
"numpy.zeros",
"numpy.array",
"scipy.signal.convolve",
"numpy.sum",
"numpy.absolute",
"numpy.log2",
"numpy.conj",
"scipy.special.comb"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.14",
"1.6",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
andrewsanchez/GenBankQC-Workflow | [
"8e630ca89c3f1a3cd9d6b2c4987100e3552d831e"
] | [
"scripts/dmx.py"
] | [
"import pandas as pd\nfrom pathlib import Path\n\n\ndmx = pd.read_csv(snakemake.input.dmx, index_col=0, sep=\"\\t\")\nnames = [Path(i).name for i in dmx.index]\ndmx.index = names\ndmx.columns = names\ndmx.to_csv(snakemake.input.dmx, sep=\"\\t\")\ndmx.mean().to_csv(snakemake.output.mean_dist, sep=\"\\t\")\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jcrowley3/bots-for-atari-games | [
"c601ba60445b57835e76c4a0c90a7d5232bd69c2"
] | [
"src/bot_5_ls.py"
] | [
"\"\"\"\nBot 5 -- Build least squares q-learning agent for FrozenLake\n\"\"\"\n\nfrom typing import Tuple\nfrom typing import Callable\nfrom typing import List\nimport gym\nimport numpy as np\nimport random\nrandom.seed(0) # make results reproducible\nnp.random.seed(0) # make results reproducible\n\nnum_episodes = 5000\ndiscount_factor = 0.85\nlearning_rate = 0.9\nw_lr = 0.5\nreport_interval = 500\nreport = '100-ep Average: %.2f . Best 100-ep Average: %.2f . Average: %.2f ' \\\n '(Episode %d)'\n\n\ndef makeQ(model: np.array) -> Callable[[np.array], np.array]:\n \"\"\"Returns a Q-function, which takes state -> distribution over actions\"\"\"\n return lambda X: X.dot(model)\n\n\ndef initialize(shape: Tuple):\n \"\"\"Initialize model\"\"\"\n W = np.random.normal(0.0, 0.1, shape)\n Q = makeQ(W)\n return W, Q\n\n\ndef train(X: np.array, y: np.array, W: np.array) -> Tuple[np.array, Callable]:\n \"\"\"Train the model, using solution to ridge regression\"\"\"\n I = np.eye(X.shape[1])\n newW = np.linalg.inv(X.T.dot(X) + 10e-4 * I).dot(X.T.dot(y))\n W = w_lr * newW + (1 - w_lr) * W\n Q = makeQ(W)\n return W, Q\n\n\ndef one_hot(i: int, n: int) -> np.array:\n \"\"\"Implements one-hot encoding by selecting the ith standard basis vector\"\"\"\n return np.identity(n)[i]\n\n\ndef print_report(rewards: List, episode: int):\n \"\"\"Print rewards report for current episode\n - Average for last 100 episodes\n - Best 100-episode average across all time\n - Average for all episodes across time\n \"\"\"\n print(report % (\n np.mean(rewards[-100:]),\n max([np.mean(rewards[i:i+100]) for i in range(len(rewards) - 100)]),\n np.mean(rewards),\n episode))\n\n\ndef main():\n env = gym.make('FrozenLake-v0') # create the game\n env.seed(0) # make results reproducible\n rewards = []\n\n n_obs, n_actions = env.observation_space.n, env.action_space.n\n W, Q = initialize((n_obs, n_actions))\n states, labels = [], []\n for episode in range(1, num_episodes + 1):\n if len(states) >= 10000:\n states, labels = [], []\n state = one_hot(env.reset(), n_obs)\n episode_reward = 0\n while True:\n states.append(state)\n noise = np.random.random((1, n_actions)) / episode\n action = np.argmax(Q(state) + noise)\n state2, reward, done, _ = env.step(action)\n\n state2 = one_hot(state2, n_obs)\n Qtarget = reward + discount_factor * np.max(Q(state2))\n label = Q(state)\n label[action] = (1 - learning_rate) * label[action] + learning_rate * Qtarget\n labels.append(label)\n\n episode_reward += reward\n state = state2\n if len(states) % 10 == 0:\n W, Q = train(np.array(states), np.array(labels), W)\n if done:\n rewards.append(episode_reward)\n if episode % report_interval == 0:\n print_report(rewards, episode)\n break\n print_report(rewards, -1)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.random.random",
"numpy.random.seed",
"numpy.eye",
"numpy.random.normal",
"numpy.identity",
"numpy.mean",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajthor/socks | [
"77063064ceb5a5da3f01733bef0885b00d4b2bed"
] | [
"gym_socks/algorithms/control/common.py"
] | [
"\"\"\"Common functions for kernel control algorithms.\n\nThis file contains common functions used by the kernel optimal control algorithms, and\nimplements an LP solver to compute the probability vector :math:`\\gamma`. This\nfunctionality is accessed via the :py:func:``compute_solution`` function, which serves\nas a single entrypoint, and the unconstrained version is chosen if the constraint matrix\n``D`` is None.\n\n\"\"\"\n\nimport numpy as np\n\nfrom scipy.optimize import linprog\n\nimport logging\nfrom gym_socks.utils.logging import ms_tqdm, _progress_fmt\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_constraint_matrix(D):\n return np.any(np.negative(D))\n\n\ndef compute_solution(\n C: np.ndarray, D: np.ndarray = None, heuristic: bool = False\n) -> np.ndarray:\n \"\"\"Compute the solution to the LP.\n\n Computes a solution to the linear program, choosing either to delegate to the unconstrained or constrained solver depending on whether D is `None`.\n\n Args:\n C: Array holding values of the cost function evaluated at sample points.\n D: Array holding values of the constraint function evaluated at sample points.\n heuristic: Whether to compute the heuristic solution.\n\n Returns:\n gamma: Probability vector.\n \"\"\"\n if D is None:\n return _compute_unconstrained_solution(C, heuristic)\n else:\n return _compute_constrained_solution(C, D, heuristic)\n\n\ndef _compute_unconstrained_solution(C: np.ndarray, heuristic=False) -> np.ndarray:\n \"\"\"Compute the unconstrained solution to the LP.\n\n NOTE: A heuristic solution is available, but due to the speed of the LP solver for\n feasible problems, there is usually no need to invoke the heuristic solution, and\n the computation times are roughly equal. The main reason to use the heuristic\n solution is when scipy is unavailable.\n\n Note that in the unconstrained case, a closed-form solution is available by the\n Lagrangian dual. Thus, using the heuristic solution can be much faster in the\n unconstrained case.\n\n Args:\n C: Array holding values of the cost function evaluated at sample points.\n heuristic: Whether to compute the heuristic solution.\n\n Returns:\n gamma: Probability vector.\n\n \"\"\"\n # C = (Cx @ K + Cu)\n\n if heuristic is False:\n\n obj = C.T\n A_eq = np.ones((1, len(C)))\n b_eq = 1\n # Bounds are automatically set so that decision variables are nonnegative.\n # bounds = [(0, None)] * len(C)\n\n logger.debug(\"Computing solution via scipy LP solver.\")\n sol = linprog(\n obj,\n A_eq=A_eq,\n b_eq=b_eq,\n )\n\n logger.debug(f\"Solver completed with status code: {sol.status}\")\n # 0 : Optimization terminated successfully.\n # 1 : Iteration limit reached.\n # 2 : Problem appears to be infeasible.\n # 3 : Problem appears to be unbounded.\n # 4 : Numerical difficulties encountered.\n\n if sol.success is True:\n return sol.x\n else:\n logger.warn(\"No solution found via scipy.optimize.linprog.\")\n logger.warn(\"Returning heuristic solution.\")\n\n logger.debug(\"Computing heuristic solution.\")\n heuristic_sol = np.zeros((len(C),))\n idx = np.argmin(C)\n heuristic_sol[idx] = 1\n return heuristic_sol\n\n\ndef _compute_constrained_solution(\n C: np.ndarray, D: np.ndarray, heuristic=False\n) -> np.ndarray:\n \"\"\"Compute the constrained solution to the LP.\n\n NOTE: A heuristic solution is available, but due to the speed of the LP solver for\n feasible problems, there is usually no need to invoke the heuristic solution, and\n the computation times are roughly equal. The main reason to use the heuristic\n solution is when scipy is unavailable.\n\n For the constrained problem, there is no closed-form solution via the Lagrangian\n dual. Thus, the heuristic solution computes the probability vector by masking the\n entries of D which are positive (solutions which do not satisfy the constraints),\n and then finding the minimum value of the masked cost vector.\n\n Args:\n C: Array holding values of the cost function evaluated at sample points.\n D: Array holding values of the constraint function evaluated at sample points.\n heuristic: Whether to compute the heuristic solution.\n\n Returns:\n gamma: Probability vector.\n\n \"\"\"\n # C = (Cx @ K + Cu)\n # D = (Dx @ K + Du)\n _check_constraint_matrix(D)\n\n if heuristic is False:\n\n if len(D.shape) == 1:\n D = D.reshape(-1, 1)\n\n obj = C.T\n A_ub = D.T\n b_ub = 0\n A_eq = np.ones((1, len(C)))\n b_eq = 1\n # Bounds are automatically set so that decision variables are nonnegative.\n # bounds = [(0, None)] * len(C)\n\n logger.debug(\"Computing solution via scipy LP solver.\")\n sol = linprog(\n obj,\n A_ub=A_ub,\n b_ub=b_ub,\n A_eq=A_eq,\n b_eq=b_eq,\n )\n\n logger.debug(f\"Solver completed with status code: {sol.status}\")\n # 0 : Optimization terminated successfully.\n # 1 : Iteration limit reached.\n # 2 : Problem appears to be infeasible.\n # 3 : Problem appears to be unbounded.\n # 4 : Numerical difficulties encountered.\n\n if sol.success is True:\n return sol.x\n else:\n logger.debug(\"No solution found via scipy.optimize.linprog.\")\n logger.debug(\"Returning heuristic solution.\")\n\n heuristic_sol = np.zeros((len(C),))\n satisfies_constraints = np.where(D <= 0)\n if len(satisfies_constraints[0]) == 0:\n logger.warn(\"No feasible solution found!\")\n logger.debug(\"Returning minimal unconstrained solution.\")\n idx = C.argmin()\n else:\n idx = satisfies_constraints[0][C[satisfies_constraints].argmin()]\n heuristic_sol[idx] = 1\n\n return heuristic_sol\n"
] | [
[
"numpy.negative",
"numpy.argmin",
"numpy.where",
"scipy.optimize.linprog"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
jishnupdas/SOM-Classifier-for-lightcurves | [
"150072921400efb15f435e38823f59dbd53ef82a"
] | [
"SOM_classifier.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 24 10:51:23 2019\n\n@author: jishnu\n\"\"\"\n\n#%%\nimport os\nimport glob\nimport pickle\nimport numpy as np\nimport seaborn as sns\nfrom minisom import MiniSom \nimport matplotlib.pyplot as plt\nfrom scipy import signal\n#%%\ndpath = '/home/jishnu/Documents/TESS/tess_data/1D_lc/'\n\nfiles = glob.glob(dpath+'*')\n\n#%%\nget_array = lambda file: np.loadtxt(file)\n\n\ndef get_arr(file):\n \n data = np.loadtxt(file)\n if np.isnan(data).any() == True:\n# files.remove(file)\n return np.nan\n else:\n return data\n\n#%%\ndef OneDplot(xlen,y):\n xarr = np.linspace(0,1,xlen)\n plt.plot(xarr,y)\n plt.show()\n plt.close()\n\n#%%\n\nname = [f for f in files if get_arr(f) is not np.nan]\n\ndata = [get_arr(f) for f in name]\n\n\n#%%\n#for f in data[:99]:\n# OneDplot(32,f)\n\n#%%\nsom = MiniSom(50,50, 32, sigma=0.1,learning_rate=1.5) \n# initialization of 40x40 SOM\n#som.pca_weights_init(data)\n#%%\nsom.random_weights_init(data)\nsom.train_random(data, 10000) # trains the SOM with 100 iterations\n\n\n#with open('som.p', 'wb') as outfile:\n# pickle.dump(som, outfile)\n \n#%% \nwith open('som.p', 'rb') as infile:\n som = pickle.load(infile)\n \n\n#%%\ncoords = []\nerr = []\nfor d in data:\n try:\n coords.append(np.array(som.winner(d)))\n except:\n print(\"err with \",str(d))\n err.append(d)\n\n#%%\nx,y = [i[0] for i in coords],[i[1] for i in coords]\nplt.style.use('seaborn')\nplt.plot(x,y,'.',alpha=0.15)\nsns.kdeplot(x,y,cmap='Blues',shade=True,bw=2,shade_lowest=False,alpha=0.8)\nplt.show()\nplt.close()\n\n\n#%%\n\"\"\"REF: https://towardsdatascience.com/an-introduction-to-clustering-\n algorithms-in-python-123438574097\"\"\"\n\nfrom sklearn.cluster import KMeans\n\npoints = coords\n\nkmeans = KMeans(n_clusters=15)\n\nkmeans.fit(points)\n\ny_km = kmeans.fit_predict(points)\n\n#%%\ndef cluster_plot(l,mask1):\n for i in range(l):\n plt.plot(np.ma.masked_array(data=x,mask = np.invert((mask1 ==i,0)[0])), \n np.ma.masked_array(data=y,mask = np.invert((mask1 ==i,1)[0])),\n '.')\n\n#%%\n\ncluster_x = [i[0] for i in kmeans.cluster_centers_]\ncluster_y = [i[1] for i in kmeans.cluster_centers_]\n\n#plt.plot(x,y,'.',alpha=0.15)\n\n#plt.plot(cluster_x,cluster_y,'o')\ncluster_plot(15,y_km)\nsns.kdeplot(x,y,cmap='Blues',shade=True,shade_lowest=False,bw=2,alpha=0.6)\nplt.show()\nplt.close()\n\n#%%\n# import hierarchical clustering libraries\nimport scipy.cluster.hierarchy as sch\nfrom sklearn.cluster import AgglomerativeClustering\n\n# create dendrogram\ndendrogram = sch.dendrogram(sch.linkage(points, method='ward'))\n\n# create clusters\nhc = AgglomerativeClustering(n_clusters=15, affinity = 'euclidean',\n linkage = 'ward')\n\n# save clusters for chart\ny_hc = hc.fit_predict(points)\n\n#%%\ncluster_plot(6,y_hc)\nsns.kdeplot(x,y,cmap='Blues',shade=False,bw=2,alpha=0.5)\nplt.show()\nplt.close()\n"
] | [
[
"sklearn.cluster.KMeans",
"numpy.linspace",
"numpy.isnan",
"numpy.invert",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"scipy.cluster.hierarchy.linkage",
"sklearn.cluster.AgglomerativeClustering",
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
vijindal/cluspand | [
"a3676594354ab59991fe75fccecdc3a400c7b153"
] | [
"code/convex_hull.py"
] | [
"\"\"\"\nThis module provides the ConvexHull class.\n\"\"\"\n\nimport itertools\nimport numpy as np\nfrom typing import List, Sized, Union\nfrom scipy.interpolate import griddata\nfrom scipy.spatial import ConvexHull as ConvexHullSciPy\nfrom scipy.spatial.qhull import QhullError\n\n\nclass ConvexHull:\n \"\"\"This class provides functionality for extracting the convex hull\n of the (free) energy of mixing. It is based on the `convex hull\n calculator in SciPy\n <http://docs.scipy.org/doc/scipy-dev/reference/\\\ngenerated/scipy.spatial.ConvexHull.html>`_.\n\n Parameters\n ----------\n concentrations : list(float) or list(list(float))\n concentrations for each structure listed as ``[[c1, c2], [c1, c2],\n ...]``; for binaries, in which case there is only one independent\n concentration, the format ``[c1, c2, c3, ...]`` works as well.\n energies : list(float)\n energy (or energy of mixing) for each structure\n\n Attributes\n ----------\n concentrations : np.ndarray\n concentrations of the `N` structures on the convex hull\n energies : np.ndarray\n energies of the `N` structures on the convex hull\n dimensions : int\n number of independent concentrations needed to specify a point in\n concentration space (1 for binaries, 2 for ternaries etc.)\n structures : list(int)\n indices of structures that constitute the convex hull (indices are\n defined by the order of their concentrations and energies are fed when\n initializing the ConvexHull object)\n\n Examples\n --------\n A `ConvexHull` object is easily initialized by providing lists of\n concentrations and energies::\n\n >>> data = {'concentration': [0, 0.2, 0.2, 0.3, 0.4, 0.5, 0.8, 1.0],\n ... 'mixing_energy': [0.1, -0.2, -0.1, -0.2, 0.2, -0.4, -0.2, -0.1]}\n >>> hull = ConvexHull(data['concentration'], data['mixing_energy'])\n\n Now one can for example access the points along the convex hull directly::\n\n >>> for c, e in zip(hull.concentrations, hull.energies):\n ... print(c, e)\n 0.0 0.1\n 0.2 -0.2\n 0.5 -0.4\n 1.0 -0.1\n\n or plot the convex hull along with the original data using e.g., matplotlib::\n\n >>> import matplotlib.pyplot as plt\n >>> plt.scatter(data['concentration'], data['mixing_energy'], color='darkred')\n >>> plt.plot(hull.concentrations, hull.energies)\n >>> plt.show()\n\n It is also possible to extract structures at or close to the convex hull::\n\n >>> low_energy_structures = hull.extract_low_energy_structures(\n ... data['concentration'], data['mixing_energy'],\n ... energy_tolerance=0.005)\n\n A complete example can be found in the :ref:`basic tutorial\n <tutorial_enumerate_structures>`.\n \"\"\"\n\n def __init__(self,\n concentrations: Union[List[float], List[List[float]]],\n energies: List[float]) -> None:\n assert len(concentrations) == len(energies)\n # Prepare data in format suitable for SciPy-ConvexHull\n concentrations = np.array(concentrations)\n energies = np.array(energies)\n points = np.column_stack((concentrations, energies))\n self.dimensions = len(points[0]) - 1\n\n # Construct convex hull\n hull = ConvexHullSciPy(points)\n\n # Collect convex hull points in handy arrays\n concentrations = [] # type: ignore\n energies = [] # type: ignore\n for vertex in hull.vertices:\n if self.dimensions == 1:\n concentrations.append(points[vertex][0])\n else:\n concentrations.append(points[vertex][0:-1])\n energies.append(points[vertex][-1])\n concentrations = np.array(concentrations)\n energies = np.array(energies)\n\n structures = hull.vertices\n # If there is just one independent concentration, we'd better sort\n # according to it\n if self.dimensions == 1:\n ces = list(zip(*sorted(zip(concentrations, energies, structures))))\n self.concentrations = np.array(ces[0])\n self.energies = np.array(ces[1])\n self.structures = np.array(ces[2])\n else:\n self.concentrations = concentrations\n self.energies = energies\n self.structures = structures\n\n # Remove points that are above the \"pure components plane\"\n self._remove_points_above_tie_plane()\n\n def _remove_points_above_tie_plane(self, tol: float = 1e-6) -> None:\n \"\"\"\n Remove all points on the convex hull that correspond to maximum rather\n than minimum energy.\n\n Parameters\n ----------\n tol\n Tolerance for what energy constitutes a lower one.\n \"\"\"\n\n # Identify the \"complex concentration hull\", i.e. the extremal\n # concentrations. In the simplest case, these should simply be the\n # pure components.\n if self.dimensions == 1:\n # Then the ConvexHullScipy function doesn't work, so we just pick\n # the indices of the lowest and highest concentrations.\n vertices = []\n vertices.append(np.argmin(self.concentrations))\n vertices.append(np.argmax(self.concentrations))\n vertices = np.array(vertices)\n else:\n concentration_hull = ConvexHullSciPy(self.concentrations)\n vertices = concentration_hull.vertices\n\n # Remove all points of the convex energy hull that have an energy that\n # is higher than what would be gotten with pure components at the same\n # concentration. These points are mathematically on the convex hull,\n # but in the physically uninteresting upper part, i.e. they maximize\n # rather than minimize energy.\n to_delete = []\n for i, concentration in enumerate(self.concentrations):\n # The points on the convex concentration hull should always be\n # included, so skip them.\n if i in vertices:\n continue\n\n # The energy obtained as a linear combination of concentrations on\n # the convex hull is the \"z coordinate\" of the position on a\n # (hyper)plane in the (number of independent concentrations +\n # 1)-dimensional (N-D) space. This plane is spanned by N points.\n # If there are more vertices on the convex hull, we need to loop\n # over all combinations of N vertices.\n for plane in itertools.combinations(vertices,\n min(len(vertices),\n self.dimensions + 1)):\n # Calculate energy that would be gotten with pure components\n # with ascribed concentration.\n energy_pure = griddata(self.concentrations[np.array(plane)],\n self.energies[np.array(plane)],\n concentration,\n method='linear')\n\n # Prepare to delete if the energy was lowered. `griddata` gives\n # NaN if the concentration is outside the triangle formed by\n # the three vertices. The result of the below comparison is\n # then False, which is what we want.\n if energy_pure < self.energies[i] - tol:\n to_delete.append(i)\n break\n\n # Finally remove all points\n self.concentrations = np.delete(self.concentrations, to_delete, 0)\n self.energies = np.delete(self.energies, to_delete, 0)\n self.structures = list(np.delete(self.structures, to_delete, 0))\n\n def get_energy_at_convex_hull(self, target_concentrations:\n Union[List[float],\n List[List[float]]]) -> np.ndarray:\n \"\"\"Returns the energy of the convex hull at specified concentrations.\n If any concentration is outside the allowed range, NaN is\n returned.\n\n Parameters\n ----------\n target_concentrations\n concentrations at target points\n\n If there is one independent concentration, a list of\n floats is sufficient. Otherwise, the concentrations ought\n to be provided as a list of lists, such as ``[[0.1, 0.2],\n [0.3, 0.1], ...]``.\n \"\"\"\n if self.dimensions > 1 and isinstance(target_concentrations[0], Sized):\n assert len(target_concentrations[0]) == self.dimensions\n\n # Loop over all complexes of N+1 points to make sure that the lowest\n # energy plane is used in the end. This is needed in two dimensions\n # but in higher.\n hull_candidate_energies = []\n for plane in itertools.combinations(range(len(self.energies)),\n min(len(self.energies),\n self.dimensions + 1)):\n try:\n plane_energies = griddata(self.concentrations[list(plane)],\n self.energies[list(plane)],\n np.array(target_concentrations),\n method='linear')\n except QhullError:\n # If the points lie on a line, the convex hull will fail, but\n # we do not need to care about these \"planes\" anyway\n continue\n hull_candidate_energies.append(plane_energies)\n\n # Pick out the lowest energies found\n hull_energies = np.nanmin(hull_candidate_energies, axis=0)\n return hull_energies\n\n def extract_low_energy_structures(self, concentrations:\n Union[List[float],\n List[List[float]]],\n energies: List[float],\n energy_tolerance: float) -> List[int]:\n \"\"\"Returns the indices of energies that lie within a certain\n tolerance of the convex hull.\n\n Parameters\n ----------\n concentrations\n concentrations of candidate structures\n\n If there is one independent concentration, a list of\n floats is sufficient. Otherwise, the concentrations must\n be provided as a list of lists, such as ``[[0.1, 0.2],\n [0.3, 0.1], ...]``.\n energies\n energies of candidate structures\n energy_tolerance\n include structures with an energy that is at most this far\n from the convex hull\n \"\"\"\n # Convert to numpy arrays, can be necessary if, for example,\n # they are Pandas Series with \"gaps\"\n concentrations = np.array(concentrations)\n energies = np.array(energies)\n\n n_points = len(concentrations)\n if len(energies) != n_points:\n raise ValueError('concentrations and energies must have '\n 'the same length')\n\n # Calculate energy at convex hull for specified concentrations\n hull_energies = self.get_energy_at_convex_hull(concentrations)\n\n # Extract those that are close enough\n close_to_hull = [i for i in range(n_points)\n if energies[i] <= hull_energies[i] + energy_tolerance]\n\n return close_to_hull\n"
] | [
[
"numpy.nanmin",
"numpy.delete",
"numpy.argmax",
"numpy.argmin",
"scipy.spatial.ConvexHull",
"numpy.array",
"numpy.column_stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShaneTsui/practical-pytorch | [
"05766973290ef637517f1a62b2bc8be129727ed8"
] | [
"char-rnn-generation/train.py"
] | [
"# https://github.com/spro/practical-pytorch\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport argparse\nimport os\n\nfrom helpers import *\nfrom model import *\nfrom generate import *\n\n# Parse command line arguments\nargparser = argparse.ArgumentParser()\nargparser.add_argument('filename', type=str)\nargparser.add_argument('--n_epochs', type=int, default=2000)\nargparser.add_argument('--print_every', type=int, default=100)\nargparser.add_argument('--hidden_size', type=int, default=50)\nargparser.add_argument('--n_layers', type=int, default=2)\nargparser.add_argument('--learning_rate', type=float, default=0.01)\nargparser.add_argument('--chunk_len', type=int, default=200)\nargs = argparser.parse_args()\n\nfile, file_len = read_file(args.filename)\n\ndef random_training_set(chunk_len):\n start_index = random.randint(0, file_len - chunk_len - 1)\n end_index = start_index + chunk_len + 1\n chunk = file[start_index:end_index]\n inp = char_tensor(chunk[:-1])\n target = char_tensor(chunk[1:])\n return inp, target\n\ndecoder = RNN(n_characters, args.hidden_size, n_characters, args.n_layers)\ndecoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.learning_rate)\ncriterion = nn.CrossEntropyLoss()\n\nstart = time.time()\nall_losses = []\nloss_avg = 0\n\ndef train(inp, target):\n hidden = decoder.init_hidden()\n decoder.zero_grad()\n loss = 0\n\n for c in range(args.chunk_len):\n output, hidden = decoder(inp[c], hidden)\n loss += criterion(output, target[c])\n\n loss.backward()\n decoder_optimizer.step()\n\n return loss.data[0] / args.chunk_len\n\ndef save():\n save_filename = os.path.splitext(os.path.basename(args.filename))[0] + '.pt'\n torch.save(decoder, save_filename)\n print('Saved as %s' % save_filename)\n\ntry:\n print(\"Training for %d epochs...\" % args.n_epochs)\n for epoch in range(1, args.n_epochs + 1):\n loss = train(*random_training_set(args.chunk_len))\n loss_avg += loss\n\n if epoch % args.print_every == 0:\n print('[%s (%d %d%%) %.4f]' % (time_since(start), epoch, epoch / args.n_epochs * 100, loss))\n print(generate(decoder, 'Wh', 100), '\\n')\n\n print(\"Saving...\")\n save()\n\nexcept KeyboardInterrupt:\n print(\"Saving before quit...\")\n save()\n\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
noasimon/RockHyraxDetection | [
"19cb20fbb43ed0c29c82a76d68388401ff10b416"
] | [
"Classification/using_vgg16_keras.py"
] | [
"from os import listdir\nfrom os.path import isfile, join, isdir\nimport pandas as pd\nfrom PIL import Image\nfrom keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\nfrom keras.preprocessing.image import img_to_array, load_img\nimport tensorflow as tf\n\n#wierd configs\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\ntf.keras.backend.set_session(tf.Session(config=config))\n\n\nPATH = \"/home/ok/OAI/Bunnys\"\n#get folders\nfolders = [f for f in listdir(PATH) if isdir(join(PATH, f))]\nmodel = VGG16()\nbunny_pictures = []\nnon_bunny = []\nfor folder in folders:\n folder_path = join(PATH, folder)\n #non bunny data in the folder\n non_bunny_pics = [join(folder_path, f) for f in listdir(folder_path) if isfile(join(folder_path, f))]\n # for pic in non_bunny_pics:\n\n # image = Image.open(pic)\n # image = image.resize((224,224))\n # print(image.format)\n # print(image.mode)\n # print(image.size)\n\n\n # print(folder_path)\n # print (onlyfiles)\n non_bunny += non_bunny_pics\n # print(len(non_bunny))\n #bunny data inside folder/bunnys\n bunny_path = join(folder_path, \"bunnys\")\n # print(bunny_path)\n bunnyfiles = [join(bunny_path, f) for f in listdir(bunny_path) if isfile(join(bunny_path, f))]\n\n\n\n # print(bunnyfiles)\n bunny_pictures += bunnyfiles\n for pic in bunny_pictures:\n image = load_img(pic, target_size=(224, 224))\n # image.show()\n image = img_to_array(image)\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n image = preprocess_input(image)\n yhat = model.predict(image)\n label = decode_predictions(yhat)\n label = label[0][0]\n print('%s (%.2f%%)' % (label[1], label[2] * 100))\n break\n # print(len(bunny_pictures))\nverdict = [1 for i in bunny_pictures]\n# print(len(verdict))\nverdict += [0 for i in non_bunny]\n# print(len(verdict))\npictures = bunny_pictures + non_bunny\n# print(len(pic))\n\ndata = pd.DataFrame()\ndata['pictures'] = pictures\ndata['has_bunny'] = verdict\ndata.to_csv(join(PATH,'bunny_data.csv'))"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.Session",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rishawsingh/scikit-uplift | [
"c9dd56aa0277e81ef7c4be62bf2fd33432e46f36"
] | [
"sklift/datasets/datasets.py"
] | [
"import os\nimport shutil\n\nimport pandas as pd\nimport requests\nfrom sklearn.utils import Bunch\nfrom tqdm.auto import tqdm\n\n\ndef get_data_dir():\n \"\"\"Return the path of the scikit-uplift data dir.\n\n This folder is used by some large dataset loaders to avoid downloading the data several times.\n\n By default the data dir is set to a folder named ``scikit-uplift-data`` in the user home folder.\n\n Returns:\n string: The path to scikit-uplift data dir.\n\n \"\"\"\n return os.path.join(os.path.expanduser(\"~\"), \"scikit-uplift-data\")\n\n\ndef _create_data_dir(path):\n \"\"\"Creates a directory, which stores the datasets.\n\n Args:\n path (str): The path to scikit-uplift data dir.\n\n \"\"\"\n if not os.path.isdir(path):\n os.makedirs(path)\n\n\ndef _download(url, dest_path, content_length_header_key='Content-Length'):\n \"\"\"Download the file from url and save it locally.\n\n Args:\n url (str): URL address, must be a string.\n dest_path (str): Destination of the file.\n content_length_header_key (str): The key in the HTTP response headers that lists the response size in bytes.\n Used for progress bar.\n \"\"\"\n if isinstance(url, str):\n req = requests.get(url, stream=True)\n req.raise_for_status()\n\n with open(dest_path, \"wb\") as fd:\n total_size_in_bytes = int(req.headers.get(content_length_header_key, 0))\n progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n for chunk in req.iter_content(chunk_size=2 ** 20):\n progress_bar.update(len(chunk))\n fd.write(chunk)\n else:\n raise TypeError(\"URL must be a string\")\n\n\ndef _get_data(data_home, url, dest_subdir, dest_filename, download_if_missing,\n content_length_header_key='Content-Length'):\n \"\"\"Return the path to the dataset.\n\n Args:\n data_home (str): The path to scikit-uplift data dir.\n url (str): The URL to the dataset.\n dest_subdir (str): The name of the folder in which the dataset is stored.\n dest_filename (str): The name of the dataset.\n download_if_missing (bool): If False, raise a IOError if the data is not locally available instead of\n trying to download the data from the source site.\n content_length_header_key (str): The key in the HTTP response headers that lists the response size in bytes.\n Used for progress bar.\n\n Returns:\n string: The path to the dataset.\n\n \"\"\"\n if data_home is None:\n if dest_subdir is None:\n data_dir = get_data_dir()\n else:\n data_dir = os.path.join(get_data_dir(), dest_subdir)\n else:\n if dest_subdir is None:\n data_dir = os.path.abspath(data_home)\n else:\n data_dir = os.path.join(os.path.abspath(data_home), dest_subdir)\n\n _create_data_dir(data_dir)\n\n dest_path = os.path.join(data_dir, dest_filename)\n\n if not os.path.isfile(dest_path):\n if download_if_missing:\n _download(url, dest_path, content_length_header_key)\n else:\n raise IOError(\"Dataset missing\")\n return dest_path\n\n\ndef clear_data_dir(path=None):\n \"\"\"Delete all the content of the data home cache.\n\n Args:\n path (str): The path to scikit-uplift data dir\n\n \"\"\"\n if path is None:\n path = get_data_dir()\n if os.path.isdir(path):\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef fetch_lenta(data_home=None, dest_subdir=None, download_if_missing=True, return_X_y_t=False):\n \"\"\"Load and return the Lenta dataset (classification).\n\n An uplift modeling dataset containing data about Lenta's customers grociery shopping and\n related marketing campaigns.\n\n Major columns:\n\n - ``group`` (str): treatment/control group flag\n - ``response_att`` (binary): target\n - ``gender`` (str): customer gender\n - ``age`` (float): customer age\n - ``main_format`` (int): store type (1 - grociery store, 0 - superstore)\n\n Read more in the :ref:`docs <Lenta>`.\n\n Args:\n data_home (str): The path to the folder where datasets are stored.\n dest_subdir (str): The name of the folder in which the dataset is stored.\n download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing.\n return_X_y_t (bool): If True, returns (data, target, treatment) instead of a Bunch object.\n\n Returns:\n Bunch or tuple: dataset.\n\n Bunch:\n By default dictionary-like object, with the following attributes:\n\n * ``data`` (DataFrame object): Dataset without target and treatment.\n * ``target`` (Series object): Column target by values.\n * ``treatment`` (Series object): Column treatment by values.\n * ``DESCR`` (str): Description of the Lenta dataset.\n * ``feature_names`` (list): Names of the features.\n * ``target_name`` (str): Name of the target.\n * ``treatment_name`` (str): Name of the treatment.\n\n Tuple:\n tuple (data, target, treatment) if `return_X_y_t` is True\n\n Example::\n\n from sklift.datasets import fetch_lenta\n\n\n dataset = fetch_lenta()\n data, target, treatment = dataset.data, dataset.target, dataset.treatment\n\n # alternative option\n data, target, treatment = fetch_lenta(return_X_y_t=True)\n\n See Also:\n\n :func:`.fetch_x5`: Load and return the X5 RetailHero dataset (classification).\n\n :func:`.fetch_criteo`: Load and return the Criteo Uplift Prediction Dataset (classification).\n\n :func:`.fetch_hillstrom`: Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).\n\n :func:`.fetch_megafon`: Load and return the MegaFon Uplift Competition dataset (classification).\n \"\"\"\n\n url = 'https://sklift.s3.eu-west-2.amazonaws.com/lenta_dataset.csv.gz'\n filename = url.split('/')[-1]\n csv_path = _get_data(data_home=data_home, url=url, dest_subdir=dest_subdir,\n dest_filename=filename,\n download_if_missing=download_if_missing)\n\n target_col = 'response_att'\n treatment_col = 'group'\n\n data = pd.read_csv(csv_path)\n treatment, target = data[treatment_col], data[target_col]\n\n data = data.drop([target_col, treatment_col], axis=1)\n feature_names = list(data.columns)\n\n if return_X_y_t:\n return data, target, treatment\n\n module_path = os.path.dirname(__file__)\n with open(os.path.join(module_path, 'descr', 'lenta.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr,\n feature_names=feature_names, target_name=target_col, treatment_name=treatment_col)\n\n\ndef fetch_x5(data_home=None, dest_subdir=None, download_if_missing=True):\n \"\"\"Load and return the X5 RetailHero dataset (classification).\n\n The dataset contains raw retail customer purchases, raw information about products and general info about customers.\n\n Major columns:\n\n - ``treatment_flg`` (binary): treatment/control group flag\n - ``target`` (binary): target\n - ``customer_id`` (str): customer id - primary key for joining\n\n Read more in the :ref:`docs <X5>`.\n\n Args:\n data_home (str, unicode): The path to the folder where datasets are stored.\n dest_subdir (str, unicode): The name of the folder in which the dataset is stored.\n download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing\n\n Returns:\n Bunch: dataset.\n\n Dictionary-like object, with the following attributes.\n\n * ``data`` (Bunch object): dictionary-like object without target and treatment:\n\n * ``clients`` (ndarray or DataFrame object): General info about clients.\n * ``train`` (ndarray or DataFrame object): A subset of clients for training.\n * ``purchases`` (ndarray or DataFrame object): clients’ purchase history prior to communication.\n * ``target`` (Series object): Column target by values.\n * ``treatment`` (Series object): Column treatment by values.\n * ``DESCR`` (str): Description of the X5 dataset.\n * ``feature_names`` (Bunch object): Names of the features.\n * ``target_name`` (str): Name of the target.\n * ``treatment_name`` (str): Name of the treatment.\n\n References:\n https://ods.ai/competitions/x5-retailhero-uplift-modeling/data\n\n Example::\n\n from sklift.datasets import fetch_x5\n\n\n dataset = fetch_x5()\n data, target, treatment = dataset.data, dataset.target, dataset.treatment\n\n # data - dictionary-like object\n # data contains general info about clients:\n clients = data.clients\n\n # data contains a subset of clients for training:\n train = data.train\n\n # data contains a clients’ purchase history prior to communication.\n purchases = data.purchases\n\n See Also:\n\n :func:`.fetch_lenta`: Load and return the Lenta dataset (classification).\n\n :func:`.fetch_criteo`: Load and return the Criteo Uplift Prediction Dataset (classification).\n\n :func:`.fetch_hillstrom`: Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).\n\n :func:`.fetch_megafon`: Load and return the MegaFon Uplift Competition dataset (classification).\n \"\"\"\n url_train = 'https://timds.s3.eu-central-1.amazonaws.com/uplift_train.csv.gz'\n file_train = url_train.split('/')[-1]\n csv_train_path = _get_data(data_home=data_home, url=url_train, dest_subdir=dest_subdir,\n dest_filename=file_train,\n download_if_missing=download_if_missing)\n train = pd.read_csv(csv_train_path)\n train_features = list(train.columns)\n\n target_col = 'target'\n treatment_col = 'treatment_flg'\n\n treatment, target = train[treatment_col], train[target_col]\n\n train = train.drop([target_col, treatment_col], axis=1)\n\n url_clients = 'https://timds.s3.eu-central-1.amazonaws.com/clients.csv.gz'\n file_clients = url_clients.split('/')[-1]\n csv_clients_path = _get_data(data_home=data_home, url=url_clients, dest_subdir=dest_subdir,\n dest_filename=file_clients,\n download_if_missing=download_if_missing)\n clients = pd.read_csv(csv_clients_path)\n clients_features = list(clients.columns)\n\n url_purchases = 'https://timds.s3.eu-central-1.amazonaws.com/purchases.csv.gz'\n file_purchases = url_purchases.split('/')[-1]\n csv_purchases_path = _get_data(data_home=data_home, url=url_purchases, dest_subdir=dest_subdir,\n dest_filename=file_purchases,\n download_if_missing=download_if_missing)\n purchases = pd.read_csv(csv_purchases_path)\n purchases_features = list(purchases.columns)\n\n data = Bunch(clients=clients, train=train, purchases=purchases)\n feature_names = Bunch(train_features=train_features, clients_features=clients_features,\n purchases_features=purchases_features)\n\n module_path = os.path.dirname(__file__)\n with open(os.path.join(module_path, 'descr', 'x5.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr,\n feature_names=feature_names, target_name='target', treatment_name='treatment_flg')\n\n\ndef fetch_criteo(target_col='visit', treatment_col='treatment', data_home=None, dest_subdir=None,\n download_if_missing=True, percent10=False, return_X_y_t=False):\n \"\"\"Load and return the Criteo Uplift Prediction Dataset (classification).\n\n This dataset is constructed by assembling data resulting from several incrementality tests, a particular randomized\n trial procedure where a random part of the population is prevented from being targeted by advertising.\n\n Major columns:\n\n * ``treatment`` (binary): treatment\n * ``exposure`` (binary): treatment\n * ``visit`` (binary): target\n * ``conversion`` (binary): target\n * ``f0, ... , f11`` (float): feature values\n\n Read more in the :ref:`docs <Criteo>`.\n\n Args:\n target_col (string, 'visit', 'conversion' or 'all', default='visit'): Selects which column from dataset\n will be target. If 'all', return a DataFrame with all targets cols.\n treatment_col (string,'treatment', 'exposure' or 'all', default='treatment'): Selects which column from dataset\n will be treatment. If 'all', return a DataFrame with all treatment cols.\n data_home (string): Specify a download and cache folder for the datasets.\n dest_subdir (string): The name of the folder in which the dataset is stored.\n download_if_missing (bool, default=True): If False, raise an IOError if the data is not locally available\n instead of trying to download the data from the source site.\n percent10 (bool, default=False): Whether to load only 10 percent of the data.\n return_X_y_t (bool, default=False): If True, returns (data, target, treatment) instead of a Bunch object.\n\n Returns:\n Bunch or tuple: dataset.\n\n Bunch:\n By default dictionary-like object, with the following attributes:\n\n * ``data`` (DataFrame object): Dataset without target and treatment.\n * ``target`` (Series or DataFrame object): Column target by values.\n * ``treatment`` (Series or DataFrame object): Column treatment by values.\n * ``DESCR`` (str): Description of the Criteo dataset.\n * ``feature_names`` (list): Names of the features.\n * ``target_name`` (str list): Name of the target.\n * ``treatment_name`` (str or list): Name of the treatment.\n\n Tuple:\n tuple (data, target, treatment) if `return_X_y` is True\n\n Example::\n\n from sklift.datasets import fetch_criteo\n\n\n dataset = fetch_criteo(target_col='conversion', treatment_col='exposure')\n data, target, treatment = dataset.data, dataset.target, dataset.treatment\n\n # alternative option\n data, target, treatment = fetch_criteo(target_col='conversion', treatment_col='exposure', return_X_y_t=True)\n\n References:\n :cite:t:`Diemert2018`\n\n .. bibliography::\n\n See Also:\n\n :func:`.fetch_lenta`: Load and return the Lenta dataset (classification).\n\n :func:`.fetch_x5`: Load and return the X5 RetailHero dataset (classification).\n\n :func:`.fetch_hillstrom`: Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).\n\n :func:`.fetch_megafon`: Load and return the MegaFon Uplift Competition dataset (classification).\n \"\"\"\n treatment_cols = ['exposure', 'treatment']\n if treatment_col == 'all':\n treatment_col = treatment_cols\n elif treatment_col not in treatment_cols:\n raise ValueError(f\"The treatment_col must be an element of {treatment_cols + ['all']}. \"\n f\"Got value target_col={treatment_col}.\")\n\n target_cols = ['visit', 'conversion']\n if target_col == 'all':\n target_col = target_cols\n elif target_col not in target_cols:\n raise ValueError(f\"The target_col must be an element of {target_cols + ['all']}. \"\n f\"Got value target_col={target_col}.\")\n\n if percent10:\n url = 'https://criteo-bucket.s3.eu-central-1.amazonaws.com/criteo10.csv.gz'\n else:\n url = \"https://criteo-bucket.s3.eu-central-1.amazonaws.com/criteo.csv.gz\"\n\n filename = url.split('/')[-1]\n csv_path = _get_data(data_home=data_home, url=url, dest_subdir=dest_subdir,\n dest_filename=filename,\n download_if_missing=download_if_missing)\n\n dtypes = {\n 'exposure': 'Int8',\n 'treatment': 'Int8',\n 'conversion': 'Int8',\n 'visit': 'Int8'\n }\n data = pd.read_csv(csv_path, dtype=dtypes)\n treatment, target = data[treatment_col], data[target_col]\n\n data = data.drop(target_cols + treatment_cols, axis=1)\n\n if return_X_y_t:\n return data, target, treatment\n\n feature_names = list(data.columns)\n\n module_path = os.path.dirname(__file__)\n with open(os.path.join(module_path, 'descr', 'criteo.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr, feature_names=feature_names,\n target_name=target_col, treatment_name=treatment_col)\n\n\ndef fetch_hillstrom(target_col='visit', data_home=None, dest_subdir=None, download_if_missing=True,\n return_X_y_t=False):\n \"\"\"Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).\n\n This dataset contains 64,000 customers who last purchased within twelve months.\n The customers were involved in an e-mail test.\n\n Major columns:\n\n * ``visit`` (binary): target. 1/0 indicator, 1 = Customer visited website in the following two weeks.\n * ``conversion`` (binary): target. 1/0 indicator, 1 = Customer purchased merchandise in the following two weeks.\n * ``spend`` (float): target. Actual dollars spent in the following two weeks.\n * ``segment`` (str): treatment. The e-mail campaign the customer received\n\n Read more in the :ref:`docs <Hillstrom>`.\n\n Args:\n target_col (string, 'visit' or 'conversion', 'spend' or 'all', default='visit'): Selects which column from dataset\n will be target\n data_home (str): The path to the folder where datasets are stored.\n dest_subdir (str): The name of the folder in which the dataset is stored.\n download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing.\n return_X_y_t (bool, default=False): If True, returns (data, target, treatment) instead of a Bunch object.\n\n Returns:\n Bunch or tuple: dataset.\n\n Bunch:\n By default dictionary-like object, with the following attributes:\n\n * ``data`` (DataFrame object): Dataset without target and treatment.\n * ``target`` (Series or DataFrame object): Column target by values.\n * ``treatment`` (Series object): Column treatment by values.\n * ``DESCR`` (str): Description of the Hillstrom dataset.\n * ``feature_names`` (list): Names of the features.\n * ``target_name`` (str or list): Name of the target.\n * ``treatment_name`` (str): Name of the treatment.\n\n Tuple:\n tuple (data, target, treatment) if `return_X_y` is True\n\n References:\n https://blog.minethatdata.com/2008/03/minethatdata-e-mail-analytics-and-data.html\n\n Example::\n\n from sklift.datasets import fetch_hillstrom\n\n\n dataset = fetch_hillstrom(target_col='visit')\n data, target, treatment = dataset.data, dataset.target, dataset.treatment\n\n # alternative option\n data, target, treatment = fetch_hillstrom(target_col='visit', return_X_y_t=True)\n\n See Also:\n\n :func:`.fetch_lenta`: Load and return the Lenta dataset (classification).\n\n :func:`.fetch_x5`: Load and return the X5 RetailHero dataset (classification).\n\n :func:`.fetch_criteo`: Load and return the Criteo Uplift Prediction Dataset (classification).\n\n :func:`.fetch_megafon`: Load and return the MegaFon Uplift Competition dataset (classification)\n \"\"\"\n target_cols = ['visit', 'conversion', 'spend']\n if target_col == 'all':\n target_col = target_cols\n elif target_col not in target_cols:\n raise ValueError(f\"The target_col must be an element of {target_cols + ['all']}. \"\n f\"Got value target_col={target_col}.\")\n\n url = 'https://hillstorm1.s3.us-east-2.amazonaws.com/hillstorm_no_indices.csv.gz'\n filename = url.split('/')[-1]\n csv_path = _get_data(data_home=data_home, url=url, dest_subdir=dest_subdir,\n dest_filename=filename,\n download_if_missing=download_if_missing)\n\n treatment_col = 'segment'\n\n data = pd.read_csv(csv_path)\n treatment, target = data[treatment_col], data[target_col]\n\n data = data.drop(target_cols + [treatment_col], axis=1)\n\n if return_X_y_t:\n return data, target, treatment\n\n feature_names = list(data.columns)\n\n module_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(module_path, 'descr', 'hillstrom.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr,\n feature_names=feature_names, target_name=target_col, treatment_name=treatment_col)\n\n\ndef fetch_megafon(data_home=None, dest_subdir=None, download_if_missing=True,\n return_X_y_t=False):\n \"\"\"Load and return the MegaFon Uplift Competition dataset (classification).\n\n An uplift modeling dataset containing synthetic data generated by telecom companies, trying to bring them closer to the real case that they encountered.\n\n Major columns:\n\n - ``X_1...X_50`` : anonymized feature set\n - ``conversion`` (binary): target\n - ``treatment_group`` (str): customer purchasing\n\n Read more in the :ref:`docs <MegaFon>`.\n\n Args:\n data_home (str): The path to the folder where datasets are stored.\n dest_subdir (str): The name of the folder in which the dataset is stored.\n download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing.\n return_X_y_t (bool): If True, returns (data, target, treatment) instead of a Bunch object.\n\n Returns:\n Bunch or tuple: dataset.\n\n Bunch:\n By default dictionary-like object, with the following attributes:\n\n * ``data`` (DataFrame object): Dataset without target and treatment.\n * ``target`` (Series object): Column target by values.\n * ``treatment`` (Series object): Column treatment by values.\n * ``DESCR`` (str): Description of the Megafon dataset.\n * ``feature_names`` (list): Names of the features.\n * ``target_name`` (str): Name of the target.\n * ``treatment_name`` (str): Name of the treatment.\n\n Tuple:\n tuple (data, target, treatment) if `return_X_y` is True\n\n Example::\n\n from sklift.datasets import fetch_megafon\n\n\n dataset = fetch_megafon()\n data, target, treatment = dataset.data, dataset.target, dataset.treatment\n\n # alternative option\n data, target, treatment = fetch_megafon(return_X_y_t=True)\n\n See Also:\n\n :func:`.fetch_lenta`: Load and return the Lenta dataset (classification).\n\n :func:`.fetch_x5`: Load and return the X5 RetailHero dataset (classification).\n\n :func:`.fetch_criteo`: Load and return the Criteo Uplift Prediction Dataset (classification).\n\n :func:`.fetch_hillstrom`: Load and return Kevin Hillstrom Dataset MineThatData (classification or regression).\n\n \"\"\"\n url_train = 'https://sklift.s3.eu-west-2.amazonaws.com/megafon_dataset.csv.gz'\n file_train = url_train.split('/')[-1]\n csv_train_path = _get_data(data_home=data_home, url=url_train, dest_subdir=dest_subdir,\n dest_filename=file_train,\n download_if_missing=download_if_missing)\n train = pd.read_csv(csv_train_path)\n\n target_col = 'conversion'\n treatment_col = 'treatment_group'\n\n treatment, target = train[treatment_col], train[target_col]\n\n train = train.drop([target_col, treatment_col], axis=1)\n\n if return_X_y_t:\n return train, target, treatment\n\n feature_names = list(train.columns)\n\n module_path = os.path.dirname(os.path.abspath(__file__))\n with open(os.path.join(module_path, 'descr', 'megafon.rst')) as rst_file:\n fdescr = rst_file.read()\n\n return Bunch(data=train, target=target, treatment=treatment, DESCR=fdescr,\n feature_names=feature_names, target_name=target_col, treatment_name=treatment_col)\n"
] | [
[
"pandas.read_csv",
"sklearn.utils.Bunch"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
eranhirs/qasrl-gs | [
"5ba6a188c32d5d506677df9eadbf35235dfca03e"
] | [
"scripts/evaluate_inter_annotator.py"
] | [
"from argparse import ArgumentParser\nfrom typing import List, Dict\n\nimport pandas as pd\nimport numpy as np\nimport os\nfrom glob import glob\nfrom itertools import combinations, product\n\nfrom common import Role, Argument\nfrom evaluate import Metrics, joint_len, iou\nfrom evaluate_dataset import eval_datasets, yield_paired_predicates\nfrom decode_encode_answers import decode_qasrl\n\n\ndef is_argument_match(arguments1: List[Argument], arguments2: List[Argument]):\n for arg1, arg2 in product(arguments1, arguments2):\n if iou(arg1, arg2) >= 0.3:\n return True\n return False\n\n\ndef evaluate_agreement(roles1: List[Role], roles2: List[Role]) -> int:\n used_roles1 = set()\n used_roles2 = set()\n n_matches = 0\n for role1, role2 in product(roles1, roles2):\n # if role1 in used_roles1 or role2 in used_roles2:\n # continue\n\n q1 = role1.question.wh.lower()\n q2 = role2.question.wh.lower()\n q1 = 'whowhat' if q1 in ('who', 'what') else q1\n q2 = 'whowhat' if q2 in ('who', 'what') else q2\n is_wh_match = q1 == q2\n if is_argument_match(role1.arguments, role2.arguments):\n if not is_wh_match:\n print(role1.question.text, role2.question.text)\n if is_wh_match and is_argument_match(role1.arguments, role2.arguments):\n n_matches += 1\n used_roles1.add(role1)\n used_roles2.add(role2)\n return n_matches\n\n\ndef eval_datasets_for_agreement(df1, df2):\n n_matches = 0\n n_total_roles = 0\n for key, roles1, roles2 in yield_paired_predicates(df1, df2):\n local_n_matches = evaluate_agreement(roles1, roles2)\n n_matches += local_n_matches\n n_total_roles += len(roles1) + len(roles2) - local_n_matches\n return n_matches, n_total_roles\n\n\ndef evaluate_generator_agreement(annot_df: pd.DataFrame, sent_map: Dict[str, List[str]]):\n cols = ['qasrl_id', 'verb_idx']\n n_gen = annot_df.groupby(cols).worker_id.transform(pd.Series.nunique)\n workers = annot_df.worker_id.unique().tolist()\n n_workers = len(workers)\n annot_df = annot_df[n_gen == n_workers].copy()\n n_predicates = annot_df[cols].drop_duplicates().shape[0]\n print(\"n_workers: \", n_workers)\n print(\"n_predicates: \", n_predicates)\n print(f\"worker_1\\tworker_2\\tprec\\trecall\\tf1\")\n\n f1s, label_f1s = [], []\n uniq_roles_per_predicate = []\n agreed_roles_per_predicate = []\n for w1, w2 in combinations(workers, r=2):\n w1_df = annot_df[annot_df.worker_id == w1].copy()\n w2_df = annot_df[annot_df.worker_id == w2].copy()\n # n_matches, n_total = eval_datasets_for_agreement(w1_df, w2_df)\n # uniq_roles_per_predicate.append(float(n_total)/n_predicates)\n # agreed_roles_per_predicate.append(float(n_matches)/n_predicates)\n #\n #\n arg_metrics, label_arg_metrics, _ = eval_datasets(w1_df, w2_df)\n print(f\"{w1}\\t{w2}\\t{arg_metrics.prec()}\\t{arg_metrics.recall()}\\t{arg_metrics.f1()}\")\n print(f\"{w1}\\t{w2}\\t{label_arg_metrics.prec()}\\t{label_arg_metrics.recall()}\\t{label_arg_metrics.f1()}\")\n\n f1s.append(arg_metrics.f1())\n label_f1s.append(label_arg_metrics.f1())\n f1s = np.array(f1s)\n label_f1s = np.array(label_f1s)\n print(f1s.mean(), f1s.std())\n print(label_f1s.mean(), label_f1s.std())\n\n # agreed_roles_per_predicate = np.array(agreed_roles_per_predicate)\n # print(agreed_roles_per_predicate.mean(), agreed_roles_per_predicate.std())\n #\n # uniq_roles_per_predicate = np.array(uniq_roles_per_predicate)\n # print(uniq_roles_per_predicate.mean(), uniq_roles_per_predicate.std())\n\n\ndef read_csv(file_path: str):\n try:\n return pd.read_csv(file_path)\n except UnicodeDecodeError:\n return pd.read_csv(file_path, encoding=\"Latin-1\")\n\n\ndef dataset_path(root_dir: str, dataset_name: str,\n gen1: str, gen2: str, arb: str):\n slice_path = \"_\".join([gen1, gen2, arb])\n slice_path = f\"{dataset_name}.inter.{slice_path}.csv\"\n slice_path = os.path.join(root_dir, slice_path)\n return slice_path\n\n\ndef main(root_dir: str, dataset_name: str):\n readme = pd.read_csv(os.path.join(root_dir, 'readme.csv'))\n sent_path = os.path.join(root_dir, f'{dataset_name}.csv')\n sent_df = read_csv(sent_path)\n sent_map = dict(zip(sent_df.qasrl_id, sent_df.tokens.apply(str.split)))\n # original annotations, multiple generation tasks per predicate\n annot_df = read_csv(os.path.join(root_dir, f'{dataset_name}.annot.csv'))\n annot_df = decode_qasrl(annot_df)\n print(annot_df.worker_id.value_counts())\n evaluate_generator_agreement(annot_df, sent_map)\n\n slice_pairs = []\n for arbitrators_, generators_ in zip(readme.arbitrators, readme.generators):\n arb1, arb2 = arbitrators_.split()\n gen1, gen2, gen3, gen4 = generators_.split()\n slice1_path = dataset_path(root_dir, dataset_name, gen1, gen2, arb1)\n slice2_path = dataset_path(root_dir, dataset_name, gen3, gen4, arb2)\n slice1 = decode_qasrl(pd.read_csv(slice1_path))\n slice2 = decode_qasrl(pd.read_csv(slice2_path))\n # make sure they have the same predicates...\n s1 = set(zip(slice1.qasrl_id, slice1.verb_idx))\n s2 = set(zip(slice2.qasrl_id, slice2.verb_idx))\n print(len(s1), len(s2))\n unlabelled_arg, labeled_arg, unlabelled_role = eval_datasets(slice1, slice2)\n print(unlabelled_arg)\n print(labeled_arg)\n print(unlabelled_role)\n\n\nif __name__ == \"__main__\":\n ap = ArgumentParser()\n ap.add_argument(\"inter_annotator_dir\")\n ap.add_argument(\"dataset_name\")\n args = ap.parse_args()\n main(args.inter_annotator_dir, args.dataset_name)\n"
] | [
[
"numpy.array",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
changminL/MT-VAE | [
"0500df9e24700157a0ef81c5d627d544c06e580f"
] | [
"fairseq/models/masked_lm.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom fairseq import utils\nfrom fairseq.models import (\n FairseqEncoderModel,\n FairseqEncoder,\n register_model,\n register_model_architecture,\n)\nfrom fairseq.modules import (\n LayerNorm,\n SinusoidalPositionalEmbedding,\n TransformerSentenceEncoder,\n)\nfrom fairseq.modules.transformer_sentence_encoder import init_bert_params\n\n\nlogger = logging.getLogger(__name__)\n\n\n@register_model('masked_lm')\nclass MaskedLMModel(FairseqEncoderModel):\n \"\"\"\n Class for training a Masked Language Model. It also supports an\n additional sentence level prediction if the sent-loss argument is set.\n \"\"\"\n def __init__(self, args, encoder):\n super().__init__(encoder)\n self.args = args\n\n # if specified then apply bert initialization on the model. We need\n # to explictly call this to make sure that the output embeddings\n # and projection layers are also correctly initialized\n if getattr(args, 'apply_bert_init', False):\n self.apply(init_bert_params)\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # Arguments related to dropout\n parser.add_argument('--dropout', type=float, metavar='D',\n help='dropout probability')\n parser.add_argument('--attention-dropout', type=float,\n metavar='D', help='dropout probability for'\n ' attention weights')\n parser.add_argument('--act-dropout', type=float,\n metavar='D', help='dropout probability after'\n ' activation in FFN')\n\n # Arguments related to hidden states and self-attention\n parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension for FFN')\n parser.add_argument('--encoder-layers', type=int, metavar='N',\n help='num encoder layers')\n parser.add_argument('--encoder-attention-heads', type=int, metavar='N',\n help='num encoder attention heads')\n\n # Arguments related to input and output embeddings\n parser.add_argument('--encoder-embed-dim', type=int, metavar='N',\n help='encoder embedding dimension')\n parser.add_argument('--share-encoder-input-output-embed',\n action='store_true', help='share encoder input'\n ' and output embeddings')\n parser.add_argument('--encoder-learned-pos', action='store_true',\n help='use learned positional embeddings in the encoder')\n parser.add_argument('--no-token-positional-embeddings',\n action='store_true',\n help='if set, disables positional embeddings'\n ' (outside self attention)')\n parser.add_argument('--num-segment', type=int, metavar='N',\n help='num segment in the input')\n\n # Arguments related to sentence level prediction\n parser.add_argument('--sentence-class-num', type=int, metavar='N',\n help='number of classes for sentence task')\n parser.add_argument('--sent-loss', action='store_true', help='if set,'\n ' calculate sentence level predictions')\n\n # Arguments related to parameter initialization\n parser.add_argument('--apply-bert-init', action='store_true',\n help='use custom param initialization for BERT')\n\n # misc params\n parser.add_argument('--activation-fn',\n choices=utils.get_available_activation_fns(),\n help='activation function to use')\n parser.add_argument('--pooler-activation-fn',\n choices=utils.get_available_activation_fns(),\n help='Which activation function to use for pooler layer.')\n parser.add_argument('--encoder-normalize-before', action='store_true',\n help='apply layernorm before each encoder block')\n\n def forward(self, src_tokens, segment_labels=None, **kwargs):\n return self.encoder(src_tokens, segment_labels=segment_labels, **kwargs)\n\n def max_positions(self):\n return self.encoder.max_positions\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\"Build a new model instance.\"\"\"\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if not hasattr(args, 'max_positions'):\n args.max_positions = args.tokens_per_sample\n\n logger.info(args)\n\n encoder = MaskedLMEncoder(args, task.dictionary)\n return cls(args, encoder)\n\n\nclass MaskedLMEncoder(FairseqEncoder):\n \"\"\"\n Encoder for Masked Language Modelling.\n \"\"\"\n\n def __init__(self, args, dictionary):\n super().__init__(dictionary)\n\n self.padding_idx = dictionary.pad()\n self.vocab_size = dictionary.__len__()\n self.max_positions = args.max_positions\n\n self.sentence_encoder = TransformerSentenceEncoder(\n padding_idx=self.padding_idx,\n vocab_size=self.vocab_size,\n num_encoder_layers=args.encoder_layers,\n embedding_dim=args.encoder_embed_dim,\n ffn_embedding_dim=args.encoder_ffn_embed_dim,\n num_attention_heads=args.encoder_attention_heads,\n dropout=args.dropout,\n attention_dropout=args.attention_dropout,\n activation_dropout=args.act_dropout,\n max_seq_len=self.max_positions,\n num_segments=args.num_segment,\n use_position_embeddings=not args.no_token_positional_embeddings,\n encoder_normalize_before=args.encoder_normalize_before,\n apply_bert_init=args.apply_bert_init,\n activation_fn=args.activation_fn,\n learned_pos_embedding=args.encoder_learned_pos,\n )\n\n self.share_input_output_embed = args.share_encoder_input_output_embed\n self.embed_out = None\n self.sentence_projection_layer = None\n self.sentence_out_dim = args.sentence_class_num\n self.lm_output_learned_bias = None\n\n # Remove head is set to true during fine-tuning\n self.load_softmax = not getattr(args, 'remove_head', False)\n\n self.masked_lm_pooler = nn.Linear(\n args.encoder_embed_dim, args.encoder_embed_dim\n )\n self.pooler_activation = utils.get_activation_fn(args.pooler_activation_fn)\n\n self.lm_head_transform_weight = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim)\n self.activation_fn = utils.get_activation_fn(args.activation_fn)\n self.layer_norm = LayerNorm(args.encoder_embed_dim)\n\n self.lm_output_learned_bias = None\n if self.load_softmax:\n self.lm_output_learned_bias = nn.Parameter(torch.zeros(self.vocab_size))\n\n if not self.share_input_output_embed:\n self.embed_out = nn.Linear(\n args.encoder_embed_dim,\n self.vocab_size,\n bias=False\n )\n\n if args.sent_loss:\n self.sentence_projection_layer = nn.Linear(\n args.encoder_embed_dim,\n self.sentence_out_dim,\n bias=False\n )\n\n def forward(self, src_tokens, segment_labels=None, masked_tokens=None, **unused):\n \"\"\"\n Forward pass for Masked LM encoder. This first computes the token\n embedding using the token embedding matrix, position embeddings (if\n specified) and segment embeddings (if specified).\n\n Here we assume that the sentence representation corresponds to the\n output of the classification_token (see bert_task or cross_lingual_lm\n task for more details).\n Args:\n - src_tokens: B x T matrix representing sentences\n - segment_labels: B x T matrix representing segment label for tokens\n Returns:\n - a tuple of the following:\n - logits for predictions in format B x T x C to be used in\n softmax afterwards\n - a dictionary of additional data, where 'pooled_output' contains\n the representation for classification_token and 'inner_states'\n is a list of internal model states used to compute the\n predictions (similar in ELMO). 'sentence_logits'\n is the prediction logit for NSP task and is only computed if\n this is specified in the input arguments.\n \"\"\"\n\n inner_states, sentence_rep = self.sentence_encoder(\n src_tokens,\n segment_labels=segment_labels,\n )\n\n x = inner_states[-1].transpose(0, 1)\n # project masked tokens only\n if masked_tokens is not None:\n x = x[masked_tokens, :]\n x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x)))\n\n pooled_output = self.pooler_activation(self.masked_lm_pooler(sentence_rep))\n\n # project back to size of vocabulary\n if self.share_input_output_embed \\\n and hasattr(self.sentence_encoder.embed_tokens, 'weight'):\n x = F.linear(x, self.sentence_encoder.embed_tokens.weight)\n elif self.embed_out is not None:\n x = self.embed_out(x)\n if self.lm_output_learned_bias is not None:\n x = x + self.lm_output_learned_bias\n sentence_logits = None\n if self.sentence_projection_layer:\n sentence_logits = self.sentence_projection_layer(pooled_output)\n\n return x, {\n 'inner_states': inner_states,\n 'pooled_output': pooled_output,\n 'sentence_logits': sentence_logits\n }\n\n def max_positions(self):\n \"\"\"Maximum output length supported by the encoder.\"\"\"\n return self.max_positions\n\n def upgrade_state_dict_named(self, state_dict, name):\n if isinstance(\n self.sentence_encoder.embed_positions,\n SinusoidalPositionalEmbedding\n ):\n state_dict[\n name + '.sentence_encoder.embed_positions._float_tensor'\n ] = torch.FloatTensor(1)\n if not self.load_softmax:\n for k in list(state_dict.keys()):\n if (\n \"embed_out.weight\" in k or\n \"sentence_projection_layer.weight\" in k or\n \"lm_output_learned_bias\" in k\n ):\n del state_dict[k]\n return state_dict\n\n\n@register_model_architecture('masked_lm', 'masked_lm')\ndef base_architecture(args):\n args.dropout = getattr(args, 'dropout', 0.1)\n args.attention_dropout = getattr(args, 'attention_dropout', 0.1)\n args.act_dropout = getattr(args, 'act_dropout', 0.0)\n\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)\n args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)\n args.num_segment = getattr(args, 'num_segment', 2)\n\n args.sentence_class_num = getattr(args, 'sentence_class_num', 2)\n args.sent_loss = getattr(args, 'sent_loss', False)\n\n args.apply_bert_init = getattr(args, 'apply_bert_init', False)\n\n args.activation_fn = getattr(args, 'activation_fn', 'relu')\n args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n\n\n@register_model_architecture('masked_lm', 'bert_base')\ndef bert_base_architecture(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)\n args.share_encoder_input_output_embed = getattr(\n args, 'share_encoder_input_output_embed', True)\n args.no_token_positional_embeddings = getattr(\n args, 'no_token_positional_embeddings', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)\n args.num_segment = getattr(args, 'num_segment', 2)\n\n args.encoder_layers = getattr(args, 'encoder_layers', 12)\n\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 3072)\n\n args.sentence_class_num = getattr(args, 'sentence_class_num', 2)\n args.sent_loss = getattr(args, 'sent_loss', True)\n\n args.apply_bert_init = getattr(args, 'apply_bert_init', True)\n\n args.activation_fn = getattr(args, 'activation_fn', 'gelu')\n args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)\n base_architecture(args)\n\n\n@register_model_architecture('masked_lm', 'bert_large')\ndef bert_large_architecture(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.encoder_layers = getattr(args, 'encoder_layers', 24)\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n bert_base_architecture(args)\n\n\n@register_model_architecture('masked_lm', 'xlm_base')\ndef xlm_architecture(args):\n args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)\n args.share_encoder_input_output_embed = getattr(\n args, 'share_encoder_input_output_embed', True)\n args.no_token_positional_embeddings = getattr(\n args, 'no_token_positional_embeddings', False)\n args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)\n args.num_segment = getattr(args, 'num_segment', 1)\n\n args.encoder_layers = getattr(args, 'encoder_layers', 6)\n\n args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)\n args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)\n\n args.sent_loss = getattr(args, 'sent_loss', False)\n\n args.activation_fn = getattr(args, 'activation_fn', 'gelu')\n args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)\n args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')\n args.apply_bert_init = getattr(args, 'apply_bert_init', True)\n base_architecture(args)\n"
] | [
[
"torch.FloatTensor",
"torch.nn.Linear",
"torch.nn.functional.linear",
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Paethon/pytorch-pretrained-BERT | [
"2152bfeae82439600dc5b5deab057a3c4331c62d"
] | [
"tests/modeling_openai_test.py"
] | [
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport unittest\nimport json\nimport random\n\nimport torch\n\nfrom pytorch_pretrained_bert import (OpenAIGPTConfig, OpenAIGPTModel,\n OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel)\n\n\nclass OpenAIGPTModelTest(unittest.TestCase):\n class OpenAIGPTModelTester(object):\n\n def __init__(self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_position_ids=True,\n use_token_type_ids=True,\n use_labels=True,\n vocab_size=99,\n n_special=1,\n n_positions=33,\n n_embd=32,\n n_layer=5,\n n_head=4,\n n_choices=3,\n afn=\"gelu\",\n resid_pdrop=0.1,\n attn_pdrop=0.1,\n embd_pdrop=0.1,\n type_sequence_label_size=2,\n initializer_range=0.02,\n num_labels=3,\n scope=None):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_position_ids = use_position_ids\n self.use_token_type_ids = use_token_type_ids\n self.use_labels = use_labels\n self.vocab_size = vocab_size\n self.n_special = n_special\n self.n_positions = n_positions\n self.n_embd = n_embd\n self.n_layer = n_layer\n self.n_head = n_head\n self.afn = afn\n self.n_choices = n_choices\n self.resid_pdrop = resid_pdrop\n self.attn_pdrop = attn_pdrop\n self.embd_pdrop = embd_pdrop\n self.type_sequence_label_size = type_sequence_label_size\n self.initializer_range = initializer_range\n self.num_labels = num_labels\n self.scope = scope\n\n def prepare_config_and_inputs(self):\n input_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.vocab_size)\n\n position_ids = None\n if self.use_position_ids:\n position_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions)\n\n token_type_ids = None\n if self.use_token_type_ids:\n total_voc = self.vocab_size + self.n_special\n token_type_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc)\n\n mc_labels = None\n lm_labels = None\n mc_token_ids = None\n if self.use_labels:\n mc_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)\n lm_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels)\n mc_token_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices], self.seq_length)\n\n config = OpenAIGPTConfig(\n vocab_size_or_config_json_file=self.vocab_size,\n n_positions=self.n_positions,\n n_special=self.n_special,\n n_embd=self.n_embd,\n n_layer=self.n_layer,\n n_head=self.n_head,\n afn=self.afn,\n resid_pdrop=self.resid_pdrop,\n attn_pdrop=self.attn_pdrop,\n embd_pdrop=self.embd_pdrop,\n initializer_range=self.initializer_range)\n\n return (config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids)\n\n def create_openai_model(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = OpenAIGPTModel(config)\n model.eval()\n hidden_states = model(input_ids, position_ids, token_type_ids)\n outputs = {\n \"hidden_states\": hidden_states,\n }\n return outputs\n\n def check_openai_model_output(self, result):\n self.parent.assertListEqual(\n list(result[\"hidden_states\"].size()),\n [self.batch_size, self.n_choices, self.seq_length, self.n_embd])\n\n\n def create_openai_lm_head(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = OpenAIGPTLMHeadModel(config)\n model.eval()\n loss = model(input_ids, position_ids, token_type_ids, lm_labels)\n lm_logits = model(input_ids, position_ids, token_type_ids)\n outputs = {\n \"loss\": loss,\n \"lm_logits\": lm_logits,\n }\n return outputs\n\n def check_openai_lm_head_output(self, result):\n total_voc = self.n_special + self.vocab_size\n self.parent.assertListEqual(\n list(result[\"lm_logits\"].size()),\n [self.batch_size, self.n_choices, self.seq_length, total_voc])\n\n def check_openai_lm_head_loss_output(self, result):\n self.parent.assertListEqual(\n list(result[\"loss\"].size()),\n [])\n\n def create_openai_double_heads(self, config, input_ids, token_type_ids, position_ids,\n mc_labels, lm_labels, mc_token_ids):\n model = OpenAIGPTDoubleHeadsModel(config)\n model.eval()\n loss = model(input_ids, mc_token_ids,\n lm_labels=lm_labels, mc_labels=mc_labels,\n token_type_ids=token_type_ids, position_ids=position_ids)\n lm_logits, mc_logits = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids)\n outputs = {\n \"loss\": loss,\n \"lm_logits\": lm_logits,\n \"mc_logits\": mc_logits,\n }\n return outputs\n\n def check_openai_double_heads_output(self, result):\n total_voc = self.n_special + self.vocab_size\n self.parent.assertListEqual(\n list(result[\"lm_logits\"].size()),\n [self.batch_size, self.n_choices, self.seq_length, total_voc])\n self.parent.assertListEqual(\n list(result[\"mc_logits\"].size()),\n [self.batch_size, self.n_choices])\n\n def check_openai_double_heads_loss_output(self, result):\n self.parent.assertListEqual(\n [list(l.size()) for l in result[\"loss\"]],\n [[], []])\n\n def test_default(self):\n self.run_tester(OpenAIGPTModelTest.OpenAIGPTModelTester(self))\n\n def test_config_to_json_string(self):\n config = OpenAIGPTConfig(vocab_size_or_config_json_file=99, n_embd=37)\n obj = json.loads(config.to_json_string())\n self.assertEqual(obj[\"vocab_size\"], 99)\n self.assertEqual(obj[\"n_embd\"], 37)\n\n def run_tester(self, tester):\n config_and_inputs = tester.prepare_config_and_inputs()\n output_result = tester.create_openai_model(*config_and_inputs)\n tester.check_openai_model_output(output_result)\n\n output_result = tester.create_openai_lm_head(*config_and_inputs)\n tester.check_openai_lm_head_output(output_result)\n tester.check_openai_lm_head_loss_output(output_result)\n\n output_result = tester.create_openai_double_heads(*config_and_inputs)\n tester.check_openai_double_heads_output(output_result)\n tester.check_openai_double_heads_loss_output(output_result)\n\n @classmethod\n def ids_tensor(cls, shape, vocab_size, rng=None, name=None):\n \"\"\"Creates a random int32 tensor of the shape within the vocab size.\"\"\"\n if rng is None:\n rng = random.Random()\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.randint(0, vocab_size - 1))\n\n return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
koustuvsinha/hier-class | [
"6ee2702bc228e9c94b42a32a6a78dc27159fefb8"
] | [
"codes/experiments/hier_classifier.py"
] | [
"# Experiment on simple decoder classification\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport logging\nimport numpy as np\nfrom pprint import pprint, pformat\nimport os\nimport copy\nfrom codes.utils import data as data_utils\nfrom codes.models import decoders, baselines\nfrom codes.utils import constants as CONSTANTS\nfrom codes.utils import model_utils as mu\nfrom codes.utils.stats import Statistics\nfrom codes.utils.evaluate import evaluate_test\n\n\n# select device automatically\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef run_experiment(config, _run):\n # set seed\n torch.manual_seed(config['seed'])\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(config['seed'])\n # bookkeeping\n if len(config['exp_name']) < 1:\n config['exp_name'] = _run.start_time.strftime('%Y_%m_%d_%H_%M_%S')\n # if experiment folder exists, append timestamp after\n base_dir = str(os.path.dirname(os.path.realpath(__file__)).split('codes')[0])\n exp_log_dir = os.path.join(base_dir, 'logs')\n if os.path.exists(os.path.join(exp_log_dir, config['exp_name'])):\n config['exp_name'] = os.path.join(config['exp_name'], _run.start_time.strftime('%Y_%m_%d_%H_%M_%S'))\n data = data_utils.Data_Utility(config)\n max_categories = config['levels']\n if config['level'] != -1:\n max_categories = 1\n logging.info(\"Loading data\")\n data.load()\n\n batch_size = config['batch_size']\n gpu = config['gpu']\n use_gpu = config['use_gpu']\n model_params = copy.deepcopy(config)\n cat_per_level = []\n label_size = 1\n for level in range(config['levels']):\n nl = len(data.get_level_labels(level))\n logging.info('Classes in level {} = {}'.format(level, nl))\n cat_per_level.append(nl)\n label_size += nl\n if config['level'] != -1:\n break\n print(cat_per_level)\n\n if config['level'] != -1:\n # check if _config['level'] is not arbitrary\n if config['level'] >= len(cat_per_level):\n raise RuntimeError(\"config['level'] cannot be more than config['levels']\")\n logging.info(\"Choosing only {} level to classify\".format(config['level']))\n label_size = cat_per_level[config['level']] + 1\n embedding = None\n if config['use_embedding']:\n logging.info(\"Creating / loading word embeddings\")\n embedding = data.load_embedding(config['embedding_file'],\n config['embedding_saved'],\n embedding_dim=config['embedding_dim'],\n data_path=config['data_path'])\n model_params.update({\n 'vocab_size': len(data.word2id),\n 'label_size': label_size,\n 'embedding': embedding,\n 'pad_token': data.word2id[CONSTANTS.PAD_WORD],\n 'total_cats': sum(cat_per_level) + 1,\n 'taxonomy': data.taxonomy,\n 'label_sizes':cat_per_level,\n 'label2id': data.label2id,\n 'max_categories': max_categories,\n 'gpu':config['gpu']\n })\n\n logging.info(\"Parameters\")\n logging.info(pformat(config))\n\n ## calculate label weights\n ## for level1 labels = 1.0\n ## for level2 labels = 0.8\n ## for level3 labels = 0.6\n level2w = {}\n for i,lb in enumerate(config['label_weights']):\n level2w[i] = lb\n label_weights = []\n if config['level'] == -1:\n label_weights = [0.0]\n for level in range(config['levels']):\n if config['level'] != -1 and config['level'] != level:\n continue\n labels = list(sorted(data.get_level_labels(level)))\n for lb in labels:\n label_weights.append(level2w[level])\n #label_weights = torch.FloatTensor(label_weights)\n #label_weights = label_weights.cuda(gpu)\n\n\n #model = decoders.SimpleDecoder(**model_params)\n if config['baseline']:\n assert config['level'] != -1\n label_weights = data.calculate_weights(config['level'])\n #logging.info(\"Label weights\")\n #logging.info(label_weights)\n label_weights = torch.FloatTensor(label_weights).to(device)\n if config['baseline'] == 'fast':\n model = baselines.FastText(**model_params)\n elif config['baseline'] == 'bilstm':\n model = baselines.BiLSTM_MLP(**model_params)\n else:\n raise NotImplementedError(\"Baseline not implemented\")\n else:\n label_weights = [1.0]\n for i in range(config['levels']):\n label_weights.extend(data.calculate_weights(i+1))\n #logging.info(\"Label weights\")\n #logging.info(label_weights)\n label_weights = torch.FloatTensor(label_weights).to(device)\n if config['model_type'] == 'attentive':\n model = decoders.AttentiveHierarchicalClassifier(**model_params)\n elif config['model_type'] == 'pooling':\n model = decoders.PooledHierarchicalClassifier(**model_params)\n\n print(model)\n\n m_params = [p for p in model.parameters() if p.requires_grad]\n optimizer = config['optimizer']\n num_params = sum([np.prod(p.size()) for p in m_params])\n logging.info(\"Model parameters : {}\".format(num_params))\n if optimizer == 'adam':\n optimizer = optim.Adam(m_params, lr=config['lr'], weight_decay=config['weight_decay'])\n elif optimizer == 'rmsprop':\n optimizer = optim.RMSprop(m_params, lr=config['lr'], weight_decay=config['weight_decay'])\n elif optimizer == 'sgd':\n optimizer = optim.SGD(m_params, lr=config['lr'], momentum=config['momentum'],\n weight_decay=config['weight_decay'])\n else:\n raise NotImplementedError()\n\n model = model.to(device)\n\n # set learning rate scheduler\n if config['lr_scheduler'] == 'plateau':\n lr_scheduler = ReduceLROnPlateau(optimizer,\n mode='min',\n factor=config['lr_factor'],\n threshold=config['lr_threshold'],\n patience=config['lr_patience'],\n cooldown=1,\n verbose=True)\n elif config['lr_scheduler'] == 'sltr':\n lr_scheduler = mu.SLTR(epochs=config['epochs'],\n batch_size=config['batch_size'],\n num_train=len(data.train_indices))\n else:\n raise NotImplementedError(\"lr_scheduler {} not implemented\".format(config['lr_scheduler']))\n\n # create trainer\n trainer = decoders.Trainer(model=model, loss_weights=label_weights,\n **model_params)\n\n tf_ratio = config['tf_ratio']\n logging.info(\"Starting to train\")\n pytorch_version = torch.__version__\n logging.info(\"Using pytorch version : {}\".format(pytorch_version))\n epochs = config['epochs']\n max_levels = config['levels']\n if config['level'] != -1:\n max_levels = 1\n stats = Statistics(batch_size, max_levels, config['exp_name'], data=data, n_heads=config['n_heads'], level=config['level'])\n logging.info(\"With focus : {}\".format(config['loss_focus']))\n all_step = 0\n for epoch in range(epochs):\n stats.next()\n logging.info(\"Getting data\")\n logging.info(\"Num Train Rows: {}\".format(len(data.train_indices)))\n logging.info(\"Num Test Rows: {}\".format(len(data.test_indices)))\n logging.info(\"TF Ratio: {}\".format(tf_ratio))\n train_data_loader = data.get_dataloader(mode='train')\n model.train()\n loss = None\n for batch_idx, batch in enumerate(train_data_loader):\n if config['lr_scheduler'] == 'sltr':\n optimizer = lr_scheduler.step(optimizer)\n optimizer.zero_grad()\n batch.to_device(device)\n (loss, log_loss), accs, attns, *_ = trainer.batchNLLLoss(batch.inp, batch.inp_lengths,\n batch.outp,mode='train', tf_ratio=config['tf_ratio'])\n torch.cuda.empty_cache()\n loss.backward()\n m_params = [p for p in model.parameters() if p.requires_grad]\n nn.utils.clip_grad_norm(m_params, config['clip_grad'])\n optimizer.step()\n stats.update_train(loss.item(), accs, log_loss=log_loss.item())\n ## free up memory\n del batch\n del loss\n del accs\n del attns\n if config['debug']:\n break\n ## validate\n model.eval()\n ## store the attention weights and words in a separate file for\n ## later visualization\n storage = []\n test_data_loader = data.get_dataloader(mode='test')\n valid_losses = []\n with torch.no_grad():\n for batch_idx, batch in enumerate(test_data_loader):\n batch.to_device(device)\n ## overall - teacher_forcing false\n (loss, log_loss), accs, attns, preds, correct, correct_confs, incorrect_confs,_ = trainer.batchNLLLoss(\n batch.inp, batch.inp_lengths, batch.outp, mode='infer',overall=True)\n stats.update_validation(loss.item(),accs, attn=attns, src=batch.inp, preds=preds, correct=correct,\n correct_confs=correct_confs,\n incorrect_confs=incorrect_confs,\n log_loss=log_loss.item(),\n mode='overall')\n valid_losses.append(loss.item())\n\n ## exact - teacher_forcing true\n (loss, log_loss), accs, attns, preds, correct, correct_confs, incorrect_confs,_ = trainer.batchNLLLoss(\n batch.inp, batch.inp_lengths, batch.outp, mode='infer', overall=False)\n stats.update_validation(loss.item(), accs, attn=attns, src=batch.inp, preds=preds, correct=correct,\n correct_confs=correct_confs,\n incorrect_confs=incorrect_confs,\n log_loss=log_loss.item(),\n mode='exact')\n\n valid_losses.append(loss.item())\n\n del batch\n del loss\n del accs\n del attns\n del preds\n if config['debug']:\n break\n stats.log_loss()\n valid_loss = np.mean(valid_losses)\n #valid_acc_lr = stats.get_valid_acc(config['levels'] - 1)\n #print('valid_acc_lr {}'.format(valid_acc_lr))\n if config['lr_scheduler'] == 'plateau':\n lr_scheduler.step(valid_loss)\n stats.reset()\n ## anneal\n tf_ratio = tf_ratio * config['tf_anneal']\n ## saving model\n mu.save_model(model, epoch, 0, config['exp_name'], model_params)\n ## Evaluate Testing data\n ## trainer.model.eval()\n ## evaluate_test(trainer, data, config['test_file_name'], config['test_output_name'], config)\n\n\n\n\n\n"
] | [
[
"torch.optim.Adam",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.manual_seed",
"torch.cuda.empty_cache",
"torch.nn.utils.clip_grad_norm",
"torch.optim.RMSprop",
"torch.no_grad",
"numpy.mean",
"torch.cuda.is_available",
"torch.cuda.manual_seed_all",
"torch.FloatTensor",
"torch.optim.SGD"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
manesioz/modin | [
"637e148dba354825307f3f131fa2185ad5a6b54a"
] | [
"modin/pandas/test/test_groupby.py"
] | [
"import pytest\nimport pandas\nimport numpy as np\nimport modin.pandas as pd\nfrom modin.pandas.utils import from_pandas, to_pandas\n\npd.DEFAULT_NPARTITIONS = 4\n\n\ndef ray_df_equals_pandas(ray_df, pandas_df):\n assert isinstance(ray_df, pd.DataFrame)\n # Order may not match here, but pandas behavior can change, so we will be consistent\n # ourselves in keeping the columns in the order they were in before the groupby\n try:\n assert (\n to_pandas(ray_df).equals(pandas_df)\n or (all(ray_df.isna().all()) and all(pandas_df.isna().all()))\n or to_pandas(ray_df)[list(pandas_df.columns)].equals(pandas_df)\n )\n # Pandas does not seem to be consistent with the way that it handles as_index.\n # Because the behavior is determined to be non-deterministic we will at least check\n # that everything else matches if we drop that column from the pandas side.\n except KeyError:\n assert to_pandas(ray_df).equals(pandas_df.drop(columns=[ray_df.index.name]))\n\n\ndef ray_df_almost_equals_pandas(ray_df, pandas_df):\n assert isinstance(ray_df, pd.DataFrame)\n difference = to_pandas(ray_df) - pandas_df\n diff_max = difference.max().max()\n assert (\n to_pandas(ray_df).equals(pandas_df)\n or diff_max < 0.0001\n or (all(ray_df.isna().all()) and all(pandas_df.isna().all()))\n )\n\n\ndef ray_series_equals_pandas(ray_df, pandas_df):\n assert ray_df.equals(pandas_df)\n\n\ndef ray_df_equals(ray_df1, ray_df2):\n assert to_pandas(ray_df1).equals(to_pandas(ray_df2))\n\n\ndef ray_groupby_equals_pandas(ray_groupby, pandas_groupby):\n for g1, g2 in zip(ray_groupby, pandas_groupby):\n assert g1[0] == g2[0]\n ray_df_equals_pandas(g1[1], g2[1])\n\n\ndef test_mixed_dtypes_groupby():\n frame_data = np.random.randint(97, 198, size=(2 ** 6, 2 ** 4))\n pandas_df = pandas.DataFrame(frame_data).add_prefix(\"col\")\n # Convert every other column to string\n for col in pandas_df.iloc[\n :, [i for i in range(len(pandas_df.columns)) if i % 2 == 0]\n ]:\n pandas_df[col] = [str(chr(i)) for i in pandas_df[col]]\n ray_df = from_pandas(pandas_df)\n\n n = 1\n\n by_values = [\"col1\", lambda x: x % 2, pandas_df.col1]\n\n for by in by_values:\n ray_groupby = ray_df.groupby(by=by)\n pandas_groupby = pandas_df.groupby(by=by)\n\n ray_groupby_equals_pandas(ray_groupby, pandas_groupby)\n eval_ngroups(ray_groupby, pandas_groupby)\n eval_skew(ray_groupby, pandas_groupby)\n eval_ffill(ray_groupby, pandas_groupby)\n eval_sem(ray_groupby, pandas_groupby)\n eval_mean(ray_groupby, pandas_groupby)\n eval_any(ray_groupby, pandas_groupby)\n eval_min(ray_groupby, pandas_groupby)\n eval_idxmax(ray_groupby, pandas_groupby)\n eval_ndim(ray_groupby, pandas_groupby)\n eval_cumsum(ray_groupby, pandas_groupby)\n eval_pct_change(ray_groupby, pandas_groupby)\n eval_cummax(ray_groupby, pandas_groupby)\n\n # TODO Add more apply functions\n apply_functions = [lambda df: df.sum(), min]\n for func in apply_functions:\n eval_apply(ray_groupby, pandas_groupby, func)\n\n eval_dtypes(ray_groupby, pandas_groupby)\n eval_first(ray_groupby, pandas_groupby)\n eval_backfill(ray_groupby, pandas_groupby)\n eval_cummin(ray_groupby, pandas_groupby)\n eval_bfill(ray_groupby, pandas_groupby)\n eval_idxmin(ray_groupby, pandas_groupby)\n eval_prod(ray_groupby, pandas_groupby)\n eval_std(ray_groupby, pandas_groupby)\n\n agg_functions = [\"min\", \"max\"]\n for func in agg_functions:\n eval_agg(ray_groupby, pandas_groupby, func)\n eval_aggregate(ray_groupby, pandas_groupby, func)\n\n eval_last(ray_groupby, pandas_groupby)\n eval_mad(ray_groupby, pandas_groupby)\n eval_max(ray_groupby, pandas_groupby)\n eval_var(ray_groupby, pandas_groupby)\n eval_len(ray_groupby, pandas_groupby)\n eval_sum(ray_groupby, pandas_groupby)\n eval_ngroup(ray_groupby, pandas_groupby)\n eval_nunique(ray_groupby, pandas_groupby)\n eval_median(ray_groupby, pandas_groupby)\n eval_head(ray_groupby, pandas_groupby, n)\n eval_cumprod(ray_groupby, pandas_groupby)\n eval_cov(ray_groupby, pandas_groupby)\n\n transform_functions = [lambda df: df + 4, lambda df: -df - 10]\n for func in transform_functions:\n eval_transform(ray_groupby, pandas_groupby, func)\n\n pipe_functions = [lambda dfgb: dfgb.sum()]\n for func in pipe_functions:\n eval_pipe(ray_groupby, pandas_groupby, func)\n\n eval_corr(ray_groupby, pandas_groupby)\n eval_fillna(ray_groupby, pandas_groupby)\n eval_count(ray_groupby, pandas_groupby)\n eval_tail(ray_groupby, pandas_groupby, n)\n eval_quantile(ray_groupby, pandas_groupby)\n eval_take(ray_groupby, pandas_groupby)\n eval___getattr__(ray_groupby, pandas_groupby)\n eval_groups(ray_groupby, pandas_groupby)\n\n\ndef test_simple_row_groupby():\n pandas_df = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 6, 7],\n \"col3\": [3, 8, 12, 10],\n \"col4\": [17, 13, 16, 15],\n \"col5\": [-4, -5, -6, -7],\n }\n )\n\n ray_df = from_pandas(pandas_df)\n\n by_values = [[1, 2, 1, 2], lambda x: x % 3, pandas_df.col1]\n n = 1\n for by in by_values:\n ray_groupby = ray_df.groupby(by=by)\n pandas_groupby = pandas_df.groupby(by=by)\n\n ray_groupby_equals_pandas(ray_groupby, pandas_groupby)\n eval_ngroups(ray_groupby, pandas_groupby)\n eval_skew(ray_groupby, pandas_groupby)\n eval_ffill(ray_groupby, pandas_groupby)\n eval_sem(ray_groupby, pandas_groupby)\n eval_mean(ray_groupby, pandas_groupby)\n eval_any(ray_groupby, pandas_groupby)\n eval_min(ray_groupby, pandas_groupby)\n eval_idxmax(ray_groupby, pandas_groupby)\n eval_ndim(ray_groupby, pandas_groupby)\n eval_cumsum(ray_groupby, pandas_groupby)\n eval_pct_change(ray_groupby, pandas_groupby)\n eval_cummax(ray_groupby, pandas_groupby)\n\n apply_functions = [lambda df: df.sum(), lambda df: -df]\n for func in apply_functions:\n eval_apply(ray_groupby, pandas_groupby, func)\n\n eval_dtypes(ray_groupby, pandas_groupby)\n eval_first(ray_groupby, pandas_groupby)\n eval_backfill(ray_groupby, pandas_groupby)\n eval_cummin(ray_groupby, pandas_groupby)\n eval_bfill(ray_groupby, pandas_groupby)\n eval_idxmin(ray_groupby, pandas_groupby)\n eval_prod(ray_groupby, pandas_groupby)\n eval_std(ray_groupby, pandas_groupby)\n\n agg_functions = [\"min\", \"max\"]\n for func in agg_functions:\n eval_agg(ray_groupby, pandas_groupby, func)\n eval_aggregate(ray_groupby, pandas_groupby, func)\n\n eval_last(ray_groupby, pandas_groupby)\n eval_mad(ray_groupby, pandas_groupby)\n eval_rank(ray_groupby, pandas_groupby)\n eval_max(ray_groupby, pandas_groupby)\n eval_var(ray_groupby, pandas_groupby)\n eval_len(ray_groupby, pandas_groupby)\n eval_sum(ray_groupby, pandas_groupby)\n eval_ngroup(ray_groupby, pandas_groupby)\n eval_nunique(ray_groupby, pandas_groupby)\n eval_median(ray_groupby, pandas_groupby)\n eval_head(ray_groupby, pandas_groupby, n)\n eval_cumprod(ray_groupby, pandas_groupby)\n eval_cov(ray_groupby, pandas_groupby)\n\n transform_functions = [lambda df: df + 4, lambda df: -df - 10]\n for func in transform_functions:\n eval_transform(ray_groupby, pandas_groupby, func)\n\n pipe_functions = [lambda dfgb: dfgb.sum()]\n for func in pipe_functions:\n eval_pipe(ray_groupby, pandas_groupby, func)\n\n eval_corr(ray_groupby, pandas_groupby)\n eval_fillna(ray_groupby, pandas_groupby)\n eval_count(ray_groupby, pandas_groupby)\n eval_tail(ray_groupby, pandas_groupby, n)\n eval_quantile(ray_groupby, pandas_groupby)\n eval_take(ray_groupby, pandas_groupby)\n eval___getattr__(ray_groupby, pandas_groupby)\n eval_groups(ray_groupby, pandas_groupby)\n\n\ndef test_single_group_row_groupby():\n pandas_df = pandas.DataFrame(\n {\n \"col1\": [0, 1, 2, 3],\n \"col2\": [4, 5, 36, 7],\n \"col3\": [3, 8, 12, 10],\n \"col4\": [17, 3, 16, 15],\n \"col5\": [-4, 5, -6, -7],\n }\n )\n\n ray_df = from_pandas(pandas_df)\n\n by = [\"1\", \"1\", \"1\", \"1\"]\n n = 6\n\n ray_groupby = ray_df.groupby(by=by)\n pandas_groupby = pandas_df.groupby(by=by)\n\n ray_groupby_equals_pandas(ray_groupby, pandas_groupby)\n eval_ngroups(ray_groupby, pandas_groupby)\n eval_skew(ray_groupby, pandas_groupby)\n eval_ffill(ray_groupby, pandas_groupby)\n eval_sem(ray_groupby, pandas_groupby)\n eval_mean(ray_groupby, pandas_groupby)\n eval_any(ray_groupby, pandas_groupby)\n eval_min(ray_groupby, pandas_groupby)\n eval_idxmax(ray_groupby, pandas_groupby)\n eval_ndim(ray_groupby, pandas_groupby)\n eval_cumsum(ray_groupby, pandas_groupby)\n eval_pct_change(ray_groupby, pandas_groupby)\n eval_cummax(ray_groupby, pandas_groupby)\n\n apply_functions = [lambda df: df.sum(), lambda df: -df]\n for func in apply_functions:\n eval_apply(ray_groupby, pandas_groupby, func)\n\n eval_dtypes(ray_groupby, pandas_groupby)\n eval_first(ray_groupby, pandas_groupby)\n eval_backfill(ray_groupby, pandas_groupby)\n eval_cummin(ray_groupby, pandas_groupby)\n eval_bfill(ray_groupby, pandas_groupby)\n eval_idxmin(ray_groupby, pandas_groupby)\n eval_prod(ray_groupby, pandas_groupby)\n eval_std(ray_groupby, pandas_groupby)\n\n agg_functions = [\"min\", \"max\"]\n for func in agg_functions:\n eval_agg(ray_groupby, pandas_groupby, func)\n eval_aggregate(ray_groupby, pandas_groupby, func)\n\n eval_last(ray_groupby, pandas_groupby)\n eval_mad(ray_groupby, pandas_groupby)\n eval_rank(ray_groupby, pandas_groupby)\n eval_max(ray_groupby, pandas_groupby)\n eval_var(ray_groupby, pandas_groupby)\n eval_len(ray_groupby, pandas_groupby)\n eval_sum(ray_groupby, pandas_groupby)\n eval_ngroup(ray_groupby, pandas_groupby)\n eval_nunique(ray_groupby, pandas_groupby)\n eval_median(ray_groupby, pandas_groupby)\n eval_head(ray_groupby, pandas_groupby, n)\n eval_cumprod(ray_groupby, pandas_groupby)\n eval_cov(ray_groupby, pandas_groupby)\n\n transform_functions = [lambda df: df + 4, lambda df: -df - 10]\n for func in transform_functions:\n eval_transform(ray_groupby, pandas_groupby, func)\n\n pipe_functions = [lambda dfgb: dfgb.sum()]\n for func in pipe_functions:\n eval_pipe(ray_groupby, pandas_groupby, func)\n\n eval_corr(ray_groupby, pandas_groupby)\n eval_fillna(ray_groupby, pandas_groupby)\n eval_count(ray_groupby, pandas_groupby)\n eval_tail(ray_groupby, pandas_groupby, n)\n eval_quantile(ray_groupby, pandas_groupby)\n eval_take(ray_groupby, pandas_groupby)\n eval___getattr__(ray_groupby, pandas_groupby)\n eval_groups(ray_groupby, pandas_groupby)\n\n\ndef test_large_row_groupby():\n pandas_df = pandas.DataFrame(\n np.random.randint(0, 8, size=(100, 4)), columns=list(\"ABCD\")\n )\n\n ray_df = from_pandas(pandas_df)\n\n by = [str(i) for i in pandas_df[\"A\"].tolist()]\n n = 4\n\n ray_groupby = ray_df.groupby(by=by)\n pandas_groupby = pandas_df.groupby(by=by)\n\n ray_groupby_equals_pandas(ray_groupby, pandas_groupby)\n eval_ngroups(ray_groupby, pandas_groupby)\n eval_skew(ray_groupby, pandas_groupby)\n eval_ffill(ray_groupby, pandas_groupby)\n eval_sem(ray_groupby, pandas_groupby)\n eval_mean(ray_groupby, pandas_groupby)\n eval_any(ray_groupby, pandas_groupby)\n eval_min(ray_groupby, pandas_groupby)\n eval_idxmax(ray_groupby, pandas_groupby)\n eval_ndim(ray_groupby, pandas_groupby)\n eval_cumsum(ray_groupby, pandas_groupby)\n eval_pct_change(ray_groupby, pandas_groupby)\n eval_cummax(ray_groupby, pandas_groupby)\n\n apply_functions = [lambda df: df.sum(), lambda df: -df]\n for func in apply_functions:\n eval_apply(ray_groupby, pandas_groupby, func)\n\n eval_dtypes(ray_groupby, pandas_groupby)\n eval_first(ray_groupby, pandas_groupby)\n eval_backfill(ray_groupby, pandas_groupby)\n eval_cummin(ray_groupby, pandas_groupby)\n eval_bfill(ray_groupby, pandas_groupby)\n eval_idxmin(ray_groupby, pandas_groupby)\n # eval_prod(ray_groupby, pandas_groupby) causes overflows\n eval_std(ray_groupby, pandas_groupby)\n\n agg_functions = [\"min\", \"max\"]\n for func in agg_functions:\n eval_agg(ray_groupby, pandas_groupby, func)\n eval_aggregate(ray_groupby, pandas_groupby, func)\n\n eval_last(ray_groupby, pandas_groupby)\n eval_mad(ray_groupby, pandas_groupby)\n eval_rank(ray_groupby, pandas_groupby)\n eval_max(ray_groupby, pandas_groupby)\n eval_var(ray_groupby, pandas_groupby)\n eval_len(ray_groupby, pandas_groupby)\n eval_sum(ray_groupby, pandas_groupby)\n eval_ngroup(ray_groupby, pandas_groupby)\n eval_nunique(ray_groupby, pandas_groupby)\n eval_median(ray_groupby, pandas_groupby)\n eval_head(ray_groupby, pandas_groupby, n)\n # eval_cumprod(ray_groupby, pandas_groupby) causes overflows\n eval_cov(ray_groupby, pandas_groupby)\n\n transform_functions = [lambda df: df + 4, lambda df: -df - 10]\n for func in transform_functions:\n eval_transform(ray_groupby, pandas_groupby, func)\n\n pipe_functions = [lambda dfgb: dfgb.sum()]\n for func in pipe_functions:\n eval_pipe(ray_groupby, pandas_groupby, func)\n\n eval_corr(ray_groupby, pandas_groupby)\n eval_fillna(ray_groupby, pandas_groupby)\n eval_count(ray_groupby, pandas_groupby)\n eval_tail(ray_groupby, pandas_groupby, n)\n eval_quantile(ray_groupby, pandas_groupby)\n eval_take(ray_groupby, pandas_groupby)\n eval_groups(ray_groupby, pandas_groupby)\n\n\ndef test_simple_col_groupby():\n pandas_df = pandas.DataFrame(\n {\n \"col1\": [0, 3, 2, 3],\n \"col2\": [4, 1, 6, 7],\n \"col3\": [3, 8, 2, 10],\n \"col4\": [1, 13, 6, 15],\n \"col5\": [-4, 5, 6, -7],\n }\n )\n\n ray_df = from_pandas(pandas_df)\n\n by = [1, 2, 3, 2, 1]\n\n ray_groupby = ray_df.groupby(axis=1, by=by)\n pandas_groupby = pandas_df.groupby(axis=1, by=by)\n\n ray_groupby_equals_pandas(ray_groupby, pandas_groupby)\n eval_ngroups(ray_groupby, pandas_groupby)\n eval_skew(ray_groupby, pandas_groupby)\n eval_ffill(ray_groupby, pandas_groupby)\n eval_sem(ray_groupby, pandas_groupby)\n eval_mean(ray_groupby, pandas_groupby)\n eval_any(ray_groupby, pandas_groupby)\n eval_min(ray_groupby, pandas_groupby)\n eval_ndim(ray_groupby, pandas_groupby)\n\n eval_idxmax(ray_groupby, pandas_groupby)\n eval_idxmin(ray_groupby, pandas_groupby)\n eval_quantile(ray_groupby, pandas_groupby)\n\n # https://github.com/pandas-dev/pandas/issues/21127\n # eval_cumsum(ray_groupby, pandas_groupby)\n # eval_cummax(ray_groupby, pandas_groupby)\n # eval_cummin(ray_groupby, pandas_groupby)\n # eval_cumprod(ray_groupby, pandas_groupby)\n\n eval_pct_change(ray_groupby, pandas_groupby)\n apply_functions = [lambda df: -df, lambda df: df.sum(axis=1)]\n for func in apply_functions:\n eval_apply(ray_groupby, pandas_groupby, func)\n\n eval_first(ray_groupby, pandas_groupby)\n eval_backfill(ray_groupby, pandas_groupby)\n eval_bfill(ray_groupby, pandas_groupby)\n eval_prod(ray_groupby, pandas_groupby)\n eval_std(ray_groupby, pandas_groupby)\n eval_last(ray_groupby, pandas_groupby)\n eval_mad(ray_groupby, pandas_groupby)\n eval_max(ray_groupby, pandas_groupby)\n eval_var(ray_groupby, pandas_groupby)\n eval_len(ray_groupby, pandas_groupby)\n eval_sum(ray_groupby, pandas_groupby)\n\n # Pandas fails on this case with ValueError\n # eval_ngroup(ray_groupby, pandas_groupby)\n # eval_nunique(ray_groupby, pandas_groupby)\n eval_median(ray_groupby, pandas_groupby)\n eval_cov(ray_groupby, pandas_groupby)\n\n transform_functions = [lambda df: df + 4, lambda df: -df - 10]\n for func in transform_functions:\n eval_transform(ray_groupby, pandas_groupby, func)\n\n pipe_functions = [lambda dfgb: dfgb.sum()]\n for func in pipe_functions:\n eval_pipe(ray_groupby, pandas_groupby, func)\n\n eval_corr(ray_groupby, pandas_groupby)\n eval_fillna(ray_groupby, pandas_groupby)\n eval_count(ray_groupby, pandas_groupby)\n eval_take(ray_groupby, pandas_groupby)\n eval___getattr__(ray_groupby, pandas_groupby)\n eval_groups(ray_groupby, pandas_groupby)\n\n\ndef test_multi_column_groupby():\n pandas_df = pandas.DataFrame(\n {\n \"col1\": np.random.randint(0, 100, size=1000),\n \"col2\": np.random.randint(0, 100, size=1000),\n \"col3\": np.random.randint(0, 100, size=1000),\n \"col4\": np.random.randint(0, 100, size=1000),\n \"col5\": np.random.randint(0, 100, size=1000),\n },\n index=[\"row{}\".format(i) for i in range(1000)],\n )\n\n ray_df = from_pandas(pandas_df)\n by = [\"col1\", \"col2\"]\n\n with pytest.warns(UserWarning):\n ray_df.groupby(by).count()\n\n with pytest.warns(UserWarning):\n for k, _ in ray_df.groupby(by):\n assert isinstance(k, tuple)\n\n by = [\"row0\", \"row1\"]\n with pytest.raises(KeyError):\n ray_df.groupby(by, axis=1).count()\n\n\ndef eval_ngroups(ray_groupby, pandas_groupby):\n assert ray_groupby.ngroups == pandas_groupby.ngroups\n\n\ndef eval_skew(ray_groupby, pandas_groupby):\n ray_df_almost_equals_pandas(ray_groupby.skew(), pandas_groupby.skew())\n\n\ndef eval_ffill(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n try:\n ray_groupby.ffill()\n except Exception:\n pass\n\n\ndef eval_sem(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.sem()\n\n\ndef eval_mean(ray_groupby, pandas_groupby):\n ray_df_almost_equals_pandas(ray_groupby.mean(), pandas_groupby.mean())\n\n\ndef eval_any(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.any(), pandas_groupby.any())\n\n\ndef eval_min(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.min(), pandas_groupby.min())\n\n\ndef eval_idxmax(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.idxmax()\n\n\ndef eval_ndim(ray_groupby, pandas_groupby):\n assert ray_groupby.ndim == pandas_groupby.ndim\n\n\ndef eval_cumsum(ray_groupby, pandas_groupby, axis=0):\n ray_df_equals_pandas(\n ray_groupby.cumsum(axis=axis), pandas_groupby.cumsum(axis=axis)\n )\n\n\ndef eval_pct_change(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n try:\n ray_groupby.pct_change()\n except Exception:\n pass\n\n\ndef eval_cummax(ray_groupby, pandas_groupby, axis=0):\n ray_df_equals_pandas(\n ray_groupby.cummax(axis=axis), pandas_groupby.cummax(axis=axis)\n )\n\n\ndef eval_apply(ray_groupby, pandas_groupby, func):\n ray_df_equals_pandas(ray_groupby.apply(func), pandas_groupby.apply(func))\n\n\ndef eval_dtypes(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.dtypes, pandas_groupby.dtypes)\n\n\ndef eval_first(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.first()\n\n\ndef eval_backfill(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n try:\n ray_groupby.backfill()\n except Exception:\n pass\n\n\ndef eval_cummin(ray_groupby, pandas_groupby, axis=0):\n ray_df_equals_pandas(\n ray_groupby.cummin(axis=axis), pandas_groupby.cummin(axis=axis)\n )\n\n\ndef eval_bfill(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n try:\n ray_groupby.bfill()\n except Exception:\n pass\n\n\ndef eval_idxmin(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.idxmin()\n\n\ndef eval_prod(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.prod(), pandas_groupby.prod())\n\n\ndef eval_std(ray_groupby, pandas_groupby):\n ray_df_almost_equals_pandas(ray_groupby.std(), pandas_groupby.std())\n\n\ndef eval_aggregate(ray_groupby, pandas_groupby, func):\n ray_df_equals_pandas(ray_groupby.aggregate(func), pandas_groupby.aggregate(func))\n\n\ndef eval_agg(ray_groupby, pandas_groupby, func):\n ray_df_equals_pandas(ray_groupby.agg(func), pandas_groupby.agg(func))\n\n\ndef eval_last(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.last()\n\n\ndef eval_mad(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.mad()\n\n\ndef eval_rank(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.rank(), pandas_groupby.rank())\n\n\ndef eval_max(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.max(), pandas_groupby.max())\n\n\ndef eval_var(ray_groupby, pandas_groupby):\n ray_df_almost_equals_pandas(ray_groupby.var(), pandas_groupby.var())\n\n\ndef eval_len(ray_groupby, pandas_groupby):\n assert len(ray_groupby) == len(pandas_groupby)\n\n\ndef eval_sum(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.sum(), pandas_groupby.sum())\n\n\ndef eval_ngroup(ray_groupby, pandas_groupby):\n ray_series_equals_pandas(ray_groupby.ngroup(), pandas_groupby.ngroup())\n\n\ndef eval_nunique(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.nunique(), pandas_groupby.nunique())\n\n\ndef eval_median(ray_groupby, pandas_groupby):\n ray_df_almost_equals_pandas(ray_groupby.median(), pandas_groupby.median())\n\n\ndef eval_head(ray_groupby, pandas_groupby, n):\n with pytest.warns(UserWarning):\n ray_groupby.head()\n\n\ndef eval_cumprod(ray_groupby, pandas_groupby, axis=0):\n ray_df_equals_pandas(ray_groupby.cumprod(), pandas_groupby.cumprod())\n ray_df_equals_pandas(\n ray_groupby.cumprod(axis=axis), pandas_groupby.cumprod(axis=axis)\n )\n\n\ndef eval_cov(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.cov()\n\n\ndef eval_transform(ray_groupby, pandas_groupby, func):\n ray_df_equals_pandas(ray_groupby.transform(func), pandas_groupby.transform(func))\n\n\ndef eval_corr(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby.corr()\n\n\ndef eval_fillna(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(\n ray_groupby.fillna(method=\"ffill\"), pandas_groupby.fillna(method=\"ffill\")\n )\n\n\ndef eval_count(ray_groupby, pandas_groupby):\n ray_df_equals_pandas(ray_groupby.count(), pandas_groupby.count())\n\n\ndef eval_pipe(ray_groupby, pandas_groupby, func):\n ray_df_equals_pandas(ray_groupby.pipe(func), pandas_groupby.pipe(func))\n\n\ndef eval_tail(ray_groupby, pandas_groupby, n):\n with pytest.warns(UserWarning):\n ray_groupby.tail()\n\n\ndef eval_quantile(ray_groupby, pandas_groupby):\n try:\n pandas_result = pandas_groupby.quantile(q=0.4)\n except Exception as e:\n with pytest.raises(type(e)):\n ray_groupby.quantile(q=0.4)\n else:\n ray_df_equals_pandas(ray_groupby.quantile(q=0.4), pandas_result)\n\n\ndef eval_take(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n try:\n ray_groupby.take()\n except Exception:\n pass\n\n\ndef eval___getattr__(ray_groupby, pandas_groupby):\n with pytest.warns(UserWarning):\n ray_groupby[\"col1\"]\n\n with pytest.warns(UserWarning):\n ray_groupby.col1\n\n\ndef eval_groups(ray_groupby, pandas_groupby):\n for k, v in ray_groupby.groups.items():\n assert v.equals(pandas_groupby.groups[k])\n\n\ndef eval_shift(ray_groupby, pandas_groupby):\n assert ray_groupby.groups == pandas_groupby.groups\n\n\ndef test_groupby_on_index_values_with_loop():\n length = 2 ** 6\n data = {\n \"a\": np.random.randint(0, 100, size=length),\n \"b\": np.random.randint(0, 100, size=length),\n \"c\": np.random.randint(0, 100, size=length),\n }\n idx = [\"g1\" if i % 3 != 0 else \"g2\" for i in range(length)]\n modin_df = pd.DataFrame(data, index=idx, columns=list(\"aba\"))\n pandas_df = pandas.DataFrame(data, index=idx, columns=list(\"aba\"))\n modin_groupby_obj = modin_df.groupby(modin_df.index)\n pandas_groupby_obj = pandas_df.groupby(pandas_df.index)\n\n modin_dict = {k: v for k, v in modin_groupby_obj}\n pandas_dict = {k: v for k, v in pandas_groupby_obj}\n\n for k in modin_dict:\n ray_df_equals_pandas(modin_dict[k], pandas_dict[k])\n\n modin_groupby_obj = modin_df.groupby(modin_df.columns, axis=1)\n pandas_groupby_obj = pandas_df.groupby(pandas_df.columns, axis=1)\n\n modin_dict = {k: v for k, v in modin_groupby_obj}\n pandas_dict = {k: v for k, v in pandas_groupby_obj}\n\n for k in modin_dict:\n ray_df_equals_pandas(modin_dict[k], pandas_dict[k])\n\n\ndef test_groupby_multiindex():\n frame_data = np.random.randint(0, 100, size=(2 ** 6, 2 ** 4))\n modin_df = pd.DataFrame(frame_data)\n pandas_df = pandas.DataFrame(frame_data)\n\n new_columns = pandas.MultiIndex.from_tuples(\n [(i // 4, i // 2, i) for i in modin_df.columns], names=[\"four\", \"two\", \"one\"]\n )\n modin_df.columns = new_columns\n pandas_df.columns = new_columns\n modin_df.groupby(level=1, axis=1).sum()\n\n modin_df = modin_df.T\n pandas_df = pandas_df.T\n ray_df_equals_pandas(\n modin_df.groupby(level=1).count(), pandas_df.groupby(level=1).count()\n )\n ray_df_equals_pandas(\n modin_df.groupby(by=\"four\").count(), pandas_df.groupby(by=\"four\").count()\n )\n\n by = [\"one\", \"two\"]\n ray_df_equals_pandas(\n modin_df.groupby(by=by).count(), pandas_df.groupby(by=by).count()\n )\n"
] | [
[
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Iximiel/dscribe | [
"1dd845cb918a244714f835023bdc82d95719eef1"
] | [
"dscribe/descriptors/matrixdescriptor.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Copyright 2019 DScribe developers\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\nfrom numpy.random import RandomState\n\nimport sparse\n\nfrom dscribe.descriptors import Descriptor\nfrom abc import abstractmethod\n\n\nclass MatrixDescriptor(Descriptor):\n \"\"\"A common base class for two-body matrix-like descriptors.\"\"\"\n\n def __init__(\n self,\n n_atoms_max,\n permutation=\"sorted_l2\",\n sigma=None,\n seed=None,\n flatten=True,\n sparse=False,\n ):\n \"\"\"\n Args:\n n_atoms_max (int): The maximum nuber of atoms that any of the\n samples can have. This controls how much zeros need to be\n padded to the final result.\n permutation (string): Defines the method for handling permutational\n invariance. Can be one of the following:\n - none: The matrix is returned in the order defined by the\n Atoms.\n - sorted_l2: The rows and columns are sorted by the L2 norm.\n - eigenspectrum: Only the eigenvalues are returned sorted\n by their absolute value in descending order.\n - random: The rows and columns are sorted by their L2 norm\n after applying Gaussian noise to the norms. The standard\n deviation of the noise is determined by the\n sigma-parameter.\n sigma (float): Provide only when using the *random*-permutation\n option. Standard deviation of the gaussian distributed noise\n determining how much the rows and columns of the randomly\n sorted matrix are scrambled.\n seed (int): Provide only when using the *random*-permutation\n option. A seed to use for drawing samples from a normal\n distribution.\n flatten (bool): Whether the output of create() should be flattened\n to a 1D array.\n sparse (bool): Whether the output should be a sparse matrix or a\n dense numpy array.\n \"\"\"\n super().__init__(periodic=False, flatten=flatten, sparse=sparse)\n\n # Check parameter validity\n if n_atoms_max <= 0:\n raise ValueError(\"The maximum number of atoms must be a positive number.\")\n perm_options = set(\n (\"sorted_l2\", \"none\", \"eigenspectrum\", \"eigenspectrum\", \"random\")\n )\n if permutation not in perm_options:\n raise ValueError(\n \"Unknown permutation option given. Please use one of the \"\n \"following: {}.\".format(\", \".join(perm_options))\n )\n\n if not sigma and permutation == \"random\":\n raise ValueError(\"Please specify sigma as a degree of random noise.\")\n\n # Raise a value error if sigma specified, but random sorting not used\n if permutation != \"random\" and sigma is not None:\n raise ValueError(\n \"Sigma value specified but the parameter 'permutation' not set \"\n \"as 'random'.\"\n )\n\n self.seed = seed\n self.random_state = RandomState(seed)\n self.n_atoms_max = n_atoms_max\n self.permutation = permutation\n self._norm_vector = None\n self.sigma = sigma\n\n def get_matrix(self, system):\n \"\"\"Used to get the final matrix for this descriptor.\n\n Args:\n system (:class:`ase.Atoms` | :class:`.System`): Input system.\n\n Returns:\n np.ndarray: The final two-dimensional matrix for this descriptor.\n \"\"\"\n\n def create_single(self, system):\n \"\"\"\n Args:\n system (:class:`ase.Atoms` | :class:`.System`): Input system.\n\n Returns:\n ndarray: The zero padded matrix either as a 2D array or as\n a 1D array depending on the setting self._flatten.\n \"\"\"\n # Remove the old norm vector for the new system\n self._norm_vector = None\n\n matrix = self.get_matrix(system)\n\n # Handle the permutation option\n if self.permutation == \"none\":\n pass\n elif self.permutation == \"sorted_l2\":\n matrix = self.sort(matrix)\n elif self.permutation == \"eigenspectrum\":\n matrix = self.get_eigenspectrum(matrix)\n elif self.permutation == \"random\":\n matrix = self.sort_randomly(matrix, self.sigma)\n\n # Add zero padding\n matrix = self.zero_pad(matrix)\n\n # Flatten\n if self.permutation == \"eigenspectrum\" or self._flatten:\n matrix = np.reshape(matrix, (matrix.size,))\n\n # If a sparse matrix is requested, convert to coo_matrix\n if self._sparse:\n matrix = sparse.COO.from_numpy(matrix)\n\n return matrix\n\n def sort(self, matrix):\n \"\"\"Sorts the given matrix by using the L2 norm.\n\n Args:\n matrix(np.ndarray): The matrix to sort.\n\n Returns:\n np.ndarray: The sorted matrix.\n \"\"\"\n # Sort the atoms such that the norms of the rows are in descending\n # order\n norms = np.linalg.norm(matrix, axis=1)\n sorted_indices = np.argsort(norms, axis=0)[::-1]\n sorted_matrix = matrix[sorted_indices]\n sorted_matrix = sorted_matrix[:, sorted_indices]\n\n return sorted_matrix\n\n def get_eigenspectrum(self, matrix):\n \"\"\"Calculates the eigenvalues of the matrix and returns a list of them\n sorted by their descending absolute value.\n\n Args:\n matrix(np.ndarray): The matrix to sort.\n\n Returns:\n np.ndarray: A list of eigenvalues sorted by absolute value.\n \"\"\"\n # Calculate eigenvalues\n eigenvalues, _ = np.linalg.eig(matrix)\n\n # Remove sign\n abs_values = np.absolute(eigenvalues)\n\n # Get ordering that sorts the values in descending order by absolute\n # value\n sorted_indices = np.argsort(abs_values)[::-1]\n eigenvalues = eigenvalues[sorted_indices]\n\n return eigenvalues\n\n def zero_pad(self, array):\n \"\"\"Zero-pads the given matrix.\n\n Args:\n array (np.ndarray): The array to pad\n\n Returns:\n np.ndarray: The zero-padded array.\n \"\"\"\n # Pad with zeros\n n_atoms = array.shape[0]\n n_dim = array.ndim\n padded = np.pad(array, [(0, self.n_atoms_max - n_atoms)] * n_dim, \"constant\")\n\n return padded\n\n def get_number_of_features(self):\n \"\"\"Used to inquire the final number of features that this descriptor\n will have.\n\n Returns:\n int: Number of features for this descriptor.\n \"\"\"\n if self.permutation == \"eigenspectrum\":\n return int(self.n_atoms_max)\n else:\n return int(self.n_atoms_max ** 2)\n\n def sort_randomly(self, matrix, sigma):\n \"\"\"\n Given a coulomb matrix, it adds random noise to the sorting defined by\n sigma. For sorting, L2-norm is used.\n\n Args:\n matrix(np.ndarray): The matrix to randomly sort.\n\n sigma:\n float: Width of gaussian distributed noise determining how much the\n rows and columns of the randomly sorted coulomb matrix are\n scrambled.\n\n Returns:\n np.ndarray: The randomly sorted matrix.\n \"\"\"\n norm_vector = self._get_norm_vector(matrix)\n noise_norm_vector = self.random_state.normal(norm_vector, sigma)\n indexlist = np.argsort(noise_norm_vector)\n indexlist = indexlist[::-1] # Order highest to lowest\n\n matrix = matrix[indexlist][:, indexlist]\n\n return matrix\n\n def _get_norm_vector(self, matrix):\n \"\"\"\n Takes a coulomb matrix as input. Returns L2 norm of each row / column in a 1D-array.\n Args:\n matrix(np.ndarray): The matrix to sort.\n\n Returns:\n np.ndarray: L2 norm of each row / column.\n\n \"\"\"\n if self._norm_vector is None:\n self._norm_vector = np.linalg.norm(matrix, axis=1)\n return self._norm_vector\n"
] | [
[
"numpy.absolute",
"numpy.pad",
"numpy.reshape",
"numpy.linalg.eig",
"numpy.linalg.norm",
"numpy.argsort",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sheeptako/bi-lstm-crf | [
"5a8a4f15d085b716ef654f6ca0eba03e579c1009"
] | [
"bi_lstm_crf/model/crf.py"
] | [
"import torch\nimport torch.nn as nn\n\n\ndef log_sum_exp(x):\n \"\"\"calculate log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))\n \"\"\"\n max_score = x.max(-1)[0]\n return max_score + (x - max_score.unsqueeze(-1)).exp().sum(-1).log()\n\n\nIMPOSSIBLE = -1e4\n\n\nclass CRF(nn.Module):\n \"\"\"General CRF module.\n The CRF module contain a inner Linear Layer which transform the input from features space to tag space.\n\n :param in_features: number of features for the input\n :param num_tag: number of tags. DO NOT include START, STOP tags, they are included internal.\n \"\"\"\n\n def __init__(self, in_features, num_tags):\n super(CRF, self).__init__()\n\n self.num_tags = num_tags + 2\n self.start_idx = self.num_tags - 2\n self.stop_idx = self.num_tags - 1\n\n self.fc = nn.Linear(in_features, self.num_tags)\n\n # transition factor, Tij mean transition from j to i\n self.transitions = nn.Parameter(torch.randn(self.num_tags, self.num_tags), requires_grad=True)\n self.transitions.data[self.start_idx, :] = IMPOSSIBLE\n self.transitions.data[:, self.stop_idx] = IMPOSSIBLE\n\n def forward(self, features, masks):\n \"\"\"decode tags\n\n :param features: [B, L, C], batch of unary scores\n :param masks: [B, L] masks\n :return: (best_score, best_paths)\n best_score: [B]\n best_paths: [B, L]\n \"\"\"\n features = self.fc(features)\n return self.__viterbi_decode(features, masks[:, :features.size(1)].float())\n\n def loss(self, features, ys, masks):\n \"\"\"negative log likelihood loss\n B: batch size, L: sequence length, D: dimension\n\n :param features: [B, L, D]\n :param ys: tags, [B, L]\n :param masks: masks for padding, [B, L]\n :return: loss\n \"\"\"\n features = self.fc(features)\n\n L = features.size(1)\n masks_ = masks[:, :L].float()\n\n forward_score = self.__forward_algorithm(features, masks_)\n gold_score = self.__score_sentence(features, ys[:, :L].long(), masks_)\n loss = (forward_score - gold_score).mean()\n return loss\n\n def __score_sentence(self, features, tags, masks):\n \"\"\"Gives the score of a provided tag sequence\n\n :param features: [B, L, C]\n :param tags: [B, L]\n :param masks: [B, L]\n :return: [B] score in the log space\n \"\"\"\n B, L, C = features.shape\n\n # emission score\n emit_scores = features.gather(dim=2, index=tags.unsqueeze(-1)).squeeze(-1)\n\n # transition score\n start_tag = torch.full((B, 1), self.start_idx, dtype=torch.long, device=tags.device)\n tags = torch.cat([start_tag, tags], dim=1) # [B, L+1]\n trans_scores = self.transitions[tags[:, 1:], tags[:, :-1]]\n\n # last transition score to STOP tag\n last_tag = tags.gather(dim=1, index=masks.sum(1).long().unsqueeze(1)).squeeze(1) # [B]\n last_score = self.transitions[self.stop_idx, last_tag]\n\n score = ((trans_scores + emit_scores) * masks).sum(1) + last_score\n return score\n\n def __viterbi_decode(self, features, masks):\n \"\"\"decode to tags using viterbi algorithm\n\n :param features: [B, L, C], batch of unary scores\n :param masks: [B, L] masks\n :return: (best_score, best_paths)\n best_score: [B]\n best_paths: [B, L]\n \"\"\"\n B, L, C = features.shape\n\n bps = torch.zeros(B, L, C, dtype=torch.long, device=features.device) # back pointers\n\n # Initialize the viterbi variables in log space\n max_score = torch.full((B, C), IMPOSSIBLE, device=features.device) # [B, C]\n max_score[:, self.start_idx] = 0\n\n for t in range(L):\n mask_t = masks[:, t].unsqueeze(1) # [B, 1]\n emit_score_t = features[:, t] # [B, C]\n\n # [B, 1, C] + [C, C]\n acc_score_t = max_score.unsqueeze(1) + self.transitions # [B, C, C]\n acc_score_t, bps[:, t, :] = acc_score_t.max(dim=-1)\n acc_score_t += emit_score_t\n max_score = acc_score_t * mask_t + max_score * (1 - mask_t) # max_score or acc_score_t\n\n # Transition to STOP_TAG\n max_score += self.transitions[self.stop_idx]\n best_score, best_tag = max_score.max(dim=-1)\n\n # Follow the back pointers to decode the best path.\n best_paths = []\n bps = bps.cpu().numpy()\n for b in range(B):\n best_tag_b = best_tag[b].item()\n seq_len = int(masks[b, :].sum().item())\n best_path = [best_tag_b] + [bps_t[best_tag_b] for bps_t in reversed(bps[b, :seq_len])]\n # drop the last tag and reverse the left\n best_paths.append(best_path[-2::-1])\n\n return best_score, best_paths\n\n def __forward_algorithm(self, features, masks):\n \"\"\"calculate the partition function with forward algorithm.\n TRICK: log_sum_exp([x1, x2, x3, x4, ...]) = log_sum_exp([log_sum_exp([x1, x2]), log_sum_exp([x3, x4]), ...])\n\n :param features: features. [B, L, C]\n :param masks: [B, L] masks\n :return: [B], score in the log space\n \"\"\"\n B, L, C = features.shape\n\n scores = torch.full((B, C), IMPOSSIBLE, device=features.device) # [B, C]\n scores[:, self.start_idx] = 0.\n trans = self.transitions.unsqueeze(0) # [1, C, C]\n\n # Iterate through the sentence\n for t in range(L):\n emit_score_t = features[:, t].unsqueeze(2) # [B, C, 1]\n score_t = scores.unsqueeze(1) + trans + emit_score_t # [B, 1, C] + [1, C, C] + [B, C, 1] => [B, C, C]\n score_t = log_sum_exp(score_t) # [B, C]\n\n mask_t = masks[:, t].unsqueeze(1) # [B, 1]\n scores = score_t * mask_t + scores * (1 - mask_t)\n scores = log_sum_exp(scores + self.transitions[self.stop_idx])\n return scores\n"
] | [
[
"torch.full",
"torch.zeros",
"torch.cat",
"torch.randn",
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
martiansideofthemoon/mixmatch_lxmert | [
"bd2e9bee23ecdd2ba06d272e9686ddfbab9418b5"
] | [
"src/tasks/nlvr2_model.py"
] | [
"# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport torch.nn as nn\nfrom lxrt.modeling import GeLU, BertLayerNorm\nfrom lxrt.entry import LXRTEncoder\nfrom param import args\n\n\nclass NLVR2Model(nn.Module):\n def __init__(self):\n super().__init__()\n self.lxrt_encoder = LXRTEncoder(\n args,\n max_seq_length=20\n )\n self.hid_dim = hid_dim = self.lxrt_encoder.dim\n self.logit_fc = nn.Sequential(\n nn.Linear(hid_dim * 2, hid_dim * 2),\n GeLU(),\n BertLayerNorm(hid_dim * 2, eps=1e-12),\n nn.Linear(hid_dim * 2, 2)\n )\n self.logit_fc.apply(self.lxrt_encoder.model.init_bert_weights)\n\n def forward(self, feat, pos, sent, mixup_tensor_fn=None):\n \"\"\"\n :param feat: b, 2, o, f\n :param pos: b, 2, o, 4\n :param sent: b, (string)\n :param leng: b, (numpy, int)\n :return:\n \"\"\"\n # Pairing images and sentences:\n # The input of NLVR2 is two images and one sentence. In batch level, they are saved as\n # [ [img0_0, img0_1], [img1_0, img1_1], ...] and [sent0, sent1, ...]\n # Here, we flat them to\n # feat/pos = [ img0_0, img0_1, img1_0, img1_1, ...]\n # sent = [ sent0, sent0, sent1, sent1, ...]\n sent = sum(zip(sent, sent), ())\n batch_size, img_num, obj_num, feat_size = feat.size()\n assert img_num == 2 and obj_num == 36 and feat_size == 2048\n feat = feat.view(batch_size * 2, obj_num, feat_size)\n pos = pos.view(batch_size * 2, obj_num, 4)\n\n # Extract feature --> Concat\n x = self.lxrt_encoder(sent, (feat, pos))\n x = x.view(-1, self.hid_dim*2)\n\n # Compute logit of answers\n logit = self.logit_fc(x)\n\n # If random indices are provided, shuffle them\n if mixup_tensor_fn:\n mixed_feats = mixup_tensor_fn(tensor1=x)\n mixed_logits = self.logit_fc(mixed_feats)\n else:\n mixed_logits = None\n\n return logit, mixed_logits, x\n"
] | [
[
"torch.nn.Linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DataPrajna/deep-nilmtk | [
"13d09ed92ccbc58f2924a04437e3447d18bf56ae"
] | [
"common/data_paraser.py"
] | [
"from nilmtk import DataSet\nimport matplotlib.pyplot as plt\nplt.ion()\nfrom networkx.drawing.nx_agraph import graphviz_layout\nimport networkx as nx\nfrom nilmtk.disaggregate import CombinatorialOptimisation\nfrom nilmtk import HDFDataStore, MeterGroup\nimport datetime\n\n\n\n\nclass VisualizeApplianceData:\n\n def __init__(self, filename):\n self.data = DataSet(filename).buildings\n\n def get_elec_meter_data_of_a_building(self, building_idx):\n return self.data[building_idx].elec\n\n def get_appliance_data_of_a_building(self, building_idx, appliance_name):\n return self.get_elec_meter_data_of_a_building(building_idx)[appliance_name]\n\n def plot_appliance_data_of_a_building(self, building_idx, appliance_name):\n activations = self.get_appliance_data_of_a_building(building_idx, appliance_name).power_series()\n activations = iter(activations)\n print(activations)\n for interval_activation in activations:\n plt.plot(interval_activation[0:10000])\n plt.xlabel('Hour of day')\n plt.ylabel('Kwh Consumption')\n #title = 'Consumption Data of ' + appliance_name\n plt.title(title)\n plt.show()\n plt.waitforbuttonpress()\n plt.gcf().clear()\n print(interval_activation)\n\n\n\n def plot_main_meter_data_of_a_building(self, building_idx):\n activations = self.get_elec_meter_data_of_a_building(building_idx).mains().power_series()\n activations = iter(activations)\n print(activations)\n for interval_activation in activations:\n plt.plot(interval_activation)\n plt.show()\n plt.waitforbuttonpress()\n plt.gcf().clear()\n\n def get_all_appliances_of_a_building(self, building_idx):\n all_applicances = self.get_elec_meter_data_of_a_building(building_idx).appliances\n return [a.identifier.type for a in all_applicances]\n\n def wiring_graph(self, meters):\n \"\"\"Returns a networkx.DiGraph of connections between meters.\"\"\"\n wiring_graph = nx.DiGraph()\n\n def _build_wiring_graph(meters):\n for meter in meters:\n if isinstance(meter, MeterGroup):\n metergroup = meter\n _build_wiring_graph(metergroup.meters)\n else:\n upstream_meter = meter.upstream_meter(raise_warning=False)\n # Need to ensure we use the same object\n # if upstream meter already exists.\n if upstream_meter is not None:\n for node in wiring_graph.nodes():\n if upstream_meter == node:\n upstream_meter = node\n break\n wiring_graph.add_edge(upstream_meter, meter)\n\n _build_wiring_graph(meters)\n return wiring_graph\n\n def draw_wire_between_mains_and_submeter_of_abuilding(self, building_idx, show_label=True):\n meters = self.get_elec_meter_data_of_a_building(building_idx).meters\n graph = self.wiring_graph(meters)\n meter_labels = {meter: meter.instance() for meter in graph.nodes()}\n pos = graphviz_layout(graph, prog='dot')\n nx.draw(graph, pos, labels=meter_labels, arrows=False)\n if show_label:\n meter_labels = {meter: meter.label() for meter in graph.nodes()}\n for meter, name in meter_labels.iteritems():\n x, y = pos[meter]\n if meter.is_site_meter():\n delta_y = 5\n else:\n delta_y = -5\n plt.text(x, y + delta_y, s=name, bbox=dict(facecolor='green', alpha=0.5),\n horizontalalignment='center')\n ax = plt.gca()\n return graph, ax\n\n def pie_plot_of_submeter_energy_of_a_building(self, building_index, is_save=False):\n elec = self.get_elec_meter_data_of_a_building(building_index)\n fraction = elec.submeters().fraction_per_meter().dropna()\n\n labels = elec.get_labels(fraction.index)\n plt.figure(figsize=(10, 10))\n fraction.plot(kind='pie', labels=labels);\n plt.show()\n plt.waitforbuttonpress()\n\n #to save the file in png format\n #to check if changes are in git\n if is_save:\n dt = datetime.datetime.now().strftime(\"%Y%m%d-%H:%M:%S\")\n filename = 'pie' + dt + '.png'\n plt.savefig(filename)\n\n def fit_a_model(self, building_idx):\n co = CombinatorialOptimisation()\n elec = self.get_elec_meter_data_of_a_building(building_idx)\n co.train(elec)\n return co\n\n\n def disaggregate_building_to_file(self, building_idx, filename, model=None):\n if model == None:\n model = self.fit_a_model(building_idx)\n elec = self.get_elec_meter_data_of_a_building(building_idx)\n\n output = HDFDataStore(filename, 'w')\n model.disaggregate(elec.mains(), output)\n output.close()\nimport os\nif __name__ == \"__main__\":\n fname = 'C:/Users/ppdash/workspace/deep-nilmtk/Data/ukdale.h5'\n print(os.path.isfile(fname))\n v = VisualizeApplianceData(fname)\n #v.get_elec_meter_data_of_a_building(1)\n #v.get_appliance_data_of_a_building(1)\n v.plot_appliance_data_of_a_building(1, 'washer dryer')\n #v.pie_plot_of_submeter_energy_of_a_building(5, is_save=False)\n #print(v.get_all_appliances_of_a_building(1))\n # v.fit_a_model(1)\n #v.disaggregate_building_to_file(1, 'my_model.h5')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.waitforbuttonpress",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wliuxingxiangyu/gnn | [
"d1f37473931a77595012e7308acd8c8c681a511d"
] | [
"gnn/GNN.py"
] | [
"import tensorflow as tf\nimport numpy as np\nimport datetime as time\n\n# class for the core of the architecture\nclass GNN:\n def __init__(self, net, input_dim, output_dim, state_dim, max_it=50, optimizer=tf.train.AdamOptimizer, learning_rate=0.01, threshold=0.01, graph_based=False,\n param=str(time.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')), config=None, tensorboard=False, mask_flag=False):\n \"\"\"\n create GNN instance. Feed this parameters:\n\n :net: Net instance - it contains state network, output network, initialized weights, loss function and metric;\n :input_dim: dimension of the input\n :output_dim: dimension of the output\n :state_dim: dimension for the state\n :max_it: maximum number of iteration of the state convergence procedure\n :optimizer: optimizer instance\n :learning_rate: learning rate value\n :threshold: value to establish the state convergence\n :graph_based: flag to denote a graph based problem\n :param: name of the experiment\n :config: ConfigProto protocol buffer object, to set configuration options for a session\n :tensorboard: boolean flag to activate tensorboard\n \"\"\"\n\n np.random.seed(0)\n tf.set_random_seed(0)\n self.tensorboard = tensorboard\n self.max_iter = max_it\n self.net = net\n self.optimizer = optimizer(learning_rate, name=\"optim\")\n self.state_threshold = threshold\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.state_dim = state_dim\n self.graph_based = graph_based\n self.mask_flag = mask_flag\n self.build()\n\n self.session = tf.Session(config=config)\n #self.session = tf.Session()\n self.session.run(tf.global_variables_initializer())\n self.init_l = tf.local_variables_initializer()\n\n # parameter to monitor the learning via tensorboard and to save the model\n if self.tensorboard:\n self.merged_all = tf.summary.merge_all(key='always')\n self.merged_train = tf.summary.merge_all(key='train')\n self.merged_val = tf.summary.merge_all(key='val')\n self.writer = tf.summary.FileWriter('tmp/' + param, self.session.graph)\n # self.saver = tf.train.Saver()\n # self.save_path = \"tmp/\" + param + \"saves/model.ckpt\"\n\n def VariableState(self):\n '''Define placeholders for input, output, state, state_old, arch-node conversion matrix'''\n # placeholder for input and output\n\n self.comp_inp = tf.placeholder(tf.float32, shape=(None, self.input_dim), name=\"input\")\n self.y = tf.placeholder(tf.float32, shape=(None, self.output_dim), name=\"target\")\n\n if self.mask_flag:\n self.mask = tf.placeholder(tf.float32, name=\"mask\")\n\n # state(t) & state(t-1)\n self.state = tf.placeholder(tf.float32, shape=(None, self.state_dim), name=\"state\")\n self.state_old = tf.placeholder(tf.float32, shape=(None, self.state_dim), name=\"old_state\")\n\n # arch-node conversion matrix\n self.ArcNode = tf.sparse_placeholder(tf.float32, name=\"ArcNode\")\n\n # node-graph conversion matrix\n if self.graph_based:\n self.NodeGraph = tf.sparse_placeholder(tf.float32, name=\"NodeGraph\")\n else:\n self.NodeGraph = tf.placeholder(tf.float32, name=\"NodeGraph\")\n\n def build(self):\n '''build the architecture, setting variable, loss, training'''\n # network\n self.VariableState()\n self.loss_op = self.Loop()\n\n # loss\n with tf.variable_scope('loss'):\n if self.mask_flag:\n self.loss = self.net.Loss(self.loss_op[0], self.y, mask=self.mask)\n self.val_loss = self.net.Loss(self.loss_op[0], self.y, mask=self.mask)\n else:\n self.loss = self.net.Loss(self.loss_op[0], self.y)\n # val loss\n self.val_loss = self.net.Loss(self.loss_op[0], self.y)\n\n if self.tensorboard:\n self.summ_loss = tf.summary.scalar('loss', self.loss, collections=['train'])\n self.summ_val_loss = tf.summary.scalar('val_loss', self.val_loss, collections=['val'])\n\n # optimizer\n with tf.variable_scope('train'):\n self.grads = self.optimizer.compute_gradients(self.loss)\n self.train_op = self.optimizer.apply_gradients(self.grads, name='train_op')\n if self.tensorboard:\n for index, grad in enumerate(self.grads):\n tf.summary.histogram(\"{}-grad\".format(self.grads[index][1].name), self.grads[index],\n collections=['always'])\n\n # metrics\n with tf.variable_scope('metrics'):\n if self.mask_flag:\n self.metrics = self.net.Metric(self.y, self.loss_op[0], mask=self.mask)\n else:\n self.metrics = self.net.Metric(self.y, self.loss_op[0])\n\n # val metric\n with tf.variable_scope('val_metric'):\n if self.mask_flag:\n self.val_met = self.net.Metric(self.y, self.loss_op[0], mask=self.mask)\n else:\n self.val_met = self.net.Metric(self.y, self.loss_op[0])\n if self.tensorboard:\n self.summ_val_met = tf.summary.scalar('val_metric', self.val_met, collections=['always'])\n\n def convergence(self, a, state, old_state, k):\n with tf.variable_scope('Convergence'):\n # body of the while cicle used to iteratively calculate state\n\n # assign current state to old state\n old_state = state\n\n # grub states of neighboring node\n gat = tf.gather(old_state, tf.cast(a[:, 1], tf.int32))\n\n # slice to consider only label of the node and that of it's neighbor\n # sl = tf.slice(a, [0, 1], [tf.shape(a)[0], tf.shape(a)[1] - 1])\n # equivalent code\n sl = a[:, 2:]\n\n # concat with retrieved state\n inp = tf.concat([sl, gat], axis=1)\n\n # evaluate next state and multiply by the arch-node conversion matrix to obtain per-node states\n layer1 = self.net.netSt(inp)\n state = tf.sparse_tensor_dense_matmul(self.ArcNode, layer1)\n\n # update the iteration counter\n k = k + 1\n return a, state, old_state, k\n\n def condition(self, a, state, old_state, k):\n # evaluate condition on the convergence of the state\n with tf.variable_scope('condition'):\n # evaluate distance by state(t) and state(t-1)\n outDistance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(state, old_state)), 1) + 0.00000000001)\n # vector showing item converged or not (given a certain threshold)\n checkDistanceVec = tf.greater(outDistance, self.state_threshold)\n\n c1 = tf.reduce_any(checkDistanceVec)\n c2 = tf.less(k, self.max_iter)\n\n return tf.logical_and(c1, c2)\n\n\n def Loop(self):\n # call to loop for the state computation and compute the output\n # compute state\n with tf.variable_scope('Loop'):\n k = tf.constant(0)\n res, st, old_st, num = tf.while_loop(self.condition, self.convergence,\n [self.comp_inp, self.state, self.state_old, k])\n if self.tensorboard:\n self.summ_iter = tf.summary.scalar('iteration', num, collections=['always'])\n\n if self.graph_based:\n # stf = tf.transpose(tf.matmul(tf.transpose(st), self.NodeGraph))\n\n stf = tf.sparse_tensor_dense_matmul(self.NodeGraph, st)\n else:\n stf = st\n out = self.net.netOut(stf)\n\n return out, num\n\n def Train(self, inputs, ArcNode, target, step, nodegraph=0.0, mask=None):\n ''' train methods: has to receive the inputs, arch-node matrix conversion, target,\n and optionally nodegraph indicator '''\n\n # Creating a SparseTEnsor with the feeded ArcNode Matrix\n arcnode_ = tf.SparseTensorValue(indices=ArcNode.indices, values=ArcNode.values,\n dense_shape=ArcNode.dense_shape)\n if self.graph_based:\n nodegraph = tf.SparseTensorValue(indices=nodegraph.indices, values=nodegraph.values,\n dense_shape=nodegraph.dense_shape)\n\n if self.mask_flag:\n fd = {self.NodeGraph: nodegraph, self.comp_inp: inputs,\n self.state: np.zeros((ArcNode.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((ArcNode.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_, self.y: target, self.mask: mask}\n else:\n\n fd = {self.NodeGraph: nodegraph, self.comp_inp: inputs, self.state: np.zeros((ArcNode.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((ArcNode.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_, self.y: target}\n if self.tensorboard:\n _, loss, loop, merge_all, merge_tr = self.session.run(\n [self.train_op, self.loss, self.loss_op, self.merged_all, self.merged_train],\n feed_dict=fd)\n if step % 100 == 0:\n self.writer.add_summary(merge_all, step)\n self.writer.add_summary(merge_tr, step)\n else:\n _, loss, loop = self.session.run(\n [self.train_op, self.loss, self.loss_op],\n feed_dict=fd)\n\n return loss, loop[1]\n\n def Validate(self, inptVal, arcnodeVal, targetVal, step, nodegraph=0.0, mask=None):\n \"\"\" Takes care of the validation of the model - it outputs, regarding the set given as input,\n the loss value, the accuracy (custom defined in the Net file), the number of iteration\n in the convergence procedure \"\"\"\n\n arcnode_ = tf.SparseTensorValue(indices=arcnodeVal.indices, values=arcnodeVal.values,\n dense_shape=arcnodeVal.dense_shape)\n if self.graph_based:\n nodegraph = tf.SparseTensorValue(indices=nodegraph.indices, values=nodegraph.values,\n dense_shape=nodegraph.dense_shape)\n\n if self.mask_flag:\n fd_val = {self.NodeGraph: nodegraph, self.comp_inp: inptVal,\n self.state: np.zeros((arcnodeVal.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((arcnodeVal.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_,\n self.y: targetVal,\n self.mask: mask}\n else:\n\n fd_val = {self.NodeGraph: nodegraph, self.comp_inp: inptVal,\n self.state: np.zeros((arcnodeVal.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((arcnodeVal.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_,\n self.y: targetVal}\n\n if self.tensorboard:\n loss_val, loop, merge_all, merge_val, metr = self.session.run(\n [self.val_loss, self.loss_op, self.merged_all, self.merged_val, self.metrics], feed_dict=fd_val)\n self.writer.add_summary(merge_all, step)\n self.writer.add_summary(merge_val, step)\n else:\n loss_val, loop, metr = self.session.run(\n [self.val_loss, self.loss_op, self.metrics], feed_dict=fd_val)\n return loss_val, metr, loop[1]\n\n def Evaluate(self, inputs, st, st_old, ArcNode, target):\n '''evaluate method with initialized state -- not used for the moment: has to receive the inputs,\n initialization for state(t) and state(t-1),\n arch-node matrix conversion, target -- gives as output the accuracy on the set given as input'''\n\n arcnode_ = tf.SparseTensorValue(indices=ArcNode.indices, values=ArcNode.values,\n dense_shape=ArcNode.dense_shape)\n\n fd = {self.comp_inp: inputs, self.state: st, self.state_old: st_old,\n self.ArcNode: arcnode_, self.y: target}\n _ = self.session.run([self.init_l])\n met = self.session.run([self.metrics], feed_dict=fd)\n return met\n\n def Evaluate(self, inputs, ArcNode, target, nodegraph=0.0):\n '''evaluate methods: has to receive the inputs, arch-node matrix conversion, target\n -- gives as output the accuracy on the set given as input'''\n\n arcnode_ = tf.SparseTensorValue(indices=ArcNode.indices, values=ArcNode.values,\n dense_shape=ArcNode.dense_shape)\n if self.graph_based:\n nodegraph = tf.SparseTensorValue(indices=nodegraph.indices, values=nodegraph.values,\n dense_shape=nodegraph.dense_shape)\n\n\n fd = {self.NodeGraph: nodegraph, self.comp_inp: inputs, self.state: np.zeros((ArcNode.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((ArcNode.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_, self.y: target}\n _ = self.session.run([self.init_l])\n met = self.session.run([self.metrics], feed_dict=fd)\n return met\n\n def Predict(self, inputs, st, st_old, ArcNode):\n ''' predict methods with initialized state -- not used for the moment:: has to receive the inputs,\n initialization for state(t) and state(t-1),\n arch-node matrix conversion -- gives as output the output values of the output function (all the nodes output\n for all the graphs (if node-based) or a single output for each graph (if graph based) '''\n\n arcnode_ = tf.SparseTensorValue(indices=ArcNode.indices, values=ArcNode.values,\n dense_shape=ArcNode.dense_shape)\n fd = {self.comp_inp: inputs, self.state: st, self.state_old: st_old,\n self.ArcNode: arcnode_}\n pr = self.session.run([self.loss_op], feed_dict=fd)\n return pr[0]\n\n def Predict(self, inputs, ArcNode, nodegraph=0.0):\n ''' predict methods: has to receive the inputs, arch-node matrix conversion -- gives as output the output\n values of the output function (all the nodes output\n for all the graphs (if node-based) or a single output for each graph (if graph based) '''\n\n arcnode_ = tf.SparseTensorValue(indices=ArcNode.indices, values=ArcNode.values,\n dense_shape=ArcNode.dense_shape)\n fd = {self.comp_inp: inputs, self.state: np.zeros((ArcNode.dense_shape[0], self.state_dim)),\n self.state_old: np.ones((ArcNode.dense_shape[0], self.state_dim)),\n self.ArcNode: arcnode_}\n pr = self.session.run([self.loss_op], feed_dict=fd)\n return pr[0]\n"
] | [
[
"tensorflow.sparse_placeholder",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.sparse_tensor_dense_matmul",
"tensorflow.SparseTensorValue",
"tensorflow.summary.scalar",
"tensorflow.while_loop",
"tensorflow.greater",
"tensorflow.subtract",
"tensorflow.Session",
"numpy.zeros",
"tensorflow.less",
"tensorflow.reduce_any",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.summary.merge_all",
"tensorflow.set_random_seed",
"tensorflow.local_variables_initializer",
"tensorflow.summary.FileWriter",
"numpy.random.seed",
"tensorflow.constant",
"numpy.ones",
"tensorflow.variable_scope",
"tensorflow.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
meenakshiravisankar/OCNet.pytorch | [
"3a73cf43c15b802e4877aad82f34bf2391acdbca"
] | [
"train.py"
] | [
"##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n## Created by: speedinghzl02\r\n## Modified by: RainbowSecret\r\n## Microsoft Research\r\n## [email protected]\r\n## Copyright (c) 2018\r\n##\r\n## This source code is licensed under the MIT-style license found in the\r\n## LICENSE file in the root directory of this source tree \r\n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\n\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.utils import data\r\nimport numpy as np\r\nimport pickle\r\nimport cv2\r\nfrom torch.autograd import Variable\r\nimport torch.optim as optim\r\nimport scipy.misc\r\nimport torch.backends.cudnn as cudnn\r\nimport sys\r\nimport os\r\nimport os.path as osp\r\nfrom dataset import get_segmentation_dataset\r\nfrom network import get_segmentation_model\r\nfrom config import Parameters\r\nimport random\r\nimport timeit\r\nimport logging\r\nimport pdb\r\nfrom tqdm import tqdm\r\nfrom tensorboardX import SummaryWriter\r\nimport mlflow\r\nfrom utils.criterion import CriterionCrossEntropy, CriterionDSN, CriterionOhemDSN, CriterionOhemDSN_single\r\nfrom utils.parallel import DataParallelModel, DataParallelCriterion\r\n\r\n\r\nstart = timeit.default_timer()\r\n\r\nargs = Parameters().parse()\r\n\r\n# mlflow to log\r\nexp_id = mlflow.set_experiment(args.experiment_name)\r\nmlflow.start_run(experiment_id=exp_id)\r\nmlflow.log_param(\"train_configs\", vars(args))\r\n\r\ndef lr_poly(base_lr, iter, max_iter, power):\r\n return base_lr*((1-float(iter)/max_iter)**(power))\r\n \r\n \r\ndef adjust_learning_rate(optimizer, i_iter):\r\n lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)\r\n optimizer.param_groups[0]['lr'] = lr\r\n return lr\r\n\r\n\r\ndef main():\r\n print(\"Input arguments:\")\r\n for key, val in vars(args).items():\r\n print(\"{:16} {}\".format(key, val))\r\n\r\n random.seed(args.seed)\r\n torch.manual_seed(args.seed)\r\n\r\n writer = SummaryWriter(args.snapshot_dir)\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.gpu\r\n h, w = map(int, args.input_size.split(','))\r\n input_size = (h, w)\r\n cudnn.enabled = True\r\n\r\n deeplab = get_segmentation_model(\"_\".join([args.network, args.method]), num_classes=args.num_classes)\r\n\r\n saved_state_dict = torch.load(args.restore_from)\r\n new_params = deeplab.state_dict().copy()\r\n\r\n if 'wide' in args.network:\r\n saved_state_dict = saved_state_dict['state_dict']\r\n if 'vistas' in args.method:\r\n saved_state_dict = saved_state_dict['body']\r\n for i in saved_state_dict:\r\n new_params[i] = saved_state_dict[i]\r\n else: \r\n for i in saved_state_dict:\r\n i_parts = i.split('.')\r\n if not 'classifier' in i_parts: \r\n new_params['.'.join(i_parts[1:])] = saved_state_dict[i]\r\n elif 'mobilenet' in args.network:\r\n for i in saved_state_dict:\r\n i_parts = i.split('.')\r\n if not (i_parts[0]=='features' and i_parts[1]=='18') and not i_parts[0]=='classifier':\r\n new_params['.'.join(i_parts[0:])] = saved_state_dict[i] \r\n else:\r\n for i in saved_state_dict:\r\n i_parts = i.split('.')\r\n if not i_parts[0]=='fc' and not i_parts[0]=='last_linear' and not i_parts[0]=='classifier':\r\n new_params['.'.join(i_parts[0:])] = saved_state_dict[i] \r\n\r\n if args.start_iters > 0:\r\n deeplab.load_state_dict(saved_state_dict)\r\n else:\r\n deeplab.load_state_dict(new_params)\r\n\r\n model = DataParallelModel(deeplab)\r\n # model = nn.DataParallel(deeplab)\r\n model.train() \r\n model.float()\r\n model.cuda() \r\n\r\n criterion = CriterionCrossEntropy()\r\n if \"dsn\" in args.method:\r\n if args.ohem:\r\n if args.ohem_single:\r\n print('use ohem only for the second prediction map.')\r\n criterion = CriterionOhemDSN_single(thres=args.ohem_thres, min_kept=args.ohem_keep, dsn_weight=float(args.dsn_weight))\r\n else:\r\n criterion = CriterionOhemDSN(thres=args.ohem_thres, min_kept=args.ohem_keep, dsn_weight=float(args.dsn_weight), use_weight=True)\r\n else:\r\n criterion = CriterionDSN(dsn_weight=float(args.dsn_weight), use_weight=True)\r\n\r\n\r\n criterion = DataParallelCriterion(criterion)\r\n criterion.cuda()\r\n cudnn.benchmark = True\r\n\r\n\r\n if not os.path.exists(args.snapshot_dir):\r\n os.makedirs(args.snapshot_dir)\r\n\r\n trainloader = data.DataLoader(get_segmentation_dataset(args.dataset, root=args.data_dir, list_path=args.data_list,\r\n max_iters=args.num_steps*args.batch_size, crop_size=input_size, \r\n scale=args.random_scale, mirror=args.random_mirror, network=args.network), \r\n batch_size=args.batch_size, shuffle=True, num_workers=1, pin_memory=True)\r\n\r\n optimizer = optim.SGD([{'params': filter(lambda p: p.requires_grad, deeplab.parameters()), 'lr': args.learning_rate }], \r\n lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)\r\n\r\n\r\n optimizer.zero_grad()\r\n\r\n for i_iter, batch in enumerate(trainloader):\r\n sys.stdout.flush()\r\n i_iter += args.start_iters\r\n images, labels, _, _ = batch\r\n images = Variable(images.cuda())\r\n labels = Variable(labels.long().cuda())\r\n optimizer.zero_grad()\r\n lr = adjust_learning_rate(optimizer, i_iter)\r\n if args.fix_lr:\r\n lr = args.learning_rate\r\n print('learning_rate: {}'.format(lr))\r\n\r\n if 'gt' in args.method:\r\n preds = model(images, labels)\r\n else:\r\n preds = model(images)\r\n loss = criterion(preds, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n if i_iter % 100 == 0:\r\n writer.add_scalar('learning_rate', lr, i_iter)\r\n writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)\r\n print('iter = {} of {} completed, loss = {}'.format(i_iter, args.num_steps, loss.data.cpu().numpy()))\r\n # mlflow logging\r\n mlflow.log_metric(key=\"loss\", value=float(loss.data.cpu().numpy()), step=int(i_iter))\r\n mlflow.log_metric(key=\"learning_rate\", value=lr, step=int(i_iter))\r\n\r\n if i_iter >= args.num_steps-1:\r\n print('save model ...')\r\n torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(args.num_steps)+'.pth'))\r\n mlflow.log_artifact(osp.join(args.snapshot_dir, 'CS_scenes_'+str(args.num_steps)+'.pth'))\r\n break\r\n\r\n if i_iter % args.save_pred_every == 0:\r\n print('taking snapshot ...')\r\n torch.save(deeplab.state_dict(),osp.join(args.snapshot_dir, 'CS_scenes_'+str(i_iter)+'.pth'))\r\n mlflow.log_artifact(osp.join(args.snapshot_dir, 'CS_scenes_'+str(i_iter)+'.pth')) \r\n\r\n end = timeit.default_timer()\r\n print(end-start,'seconds')\r\n mlflow.end_run()\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"torch.manual_seed",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rossmclane/demand_projection | [
"c43cf8e6b2e0d5edf92508223ad34849287924db"
] | [
"src/h3_utils.py"
] | [
"from h3 import h3\nimport json\nimport pandas as pd\nfrom geojson.feature import *\nfrom folium import Map, Marker, GeoJson\nfrom geojson.feature import *\nimport branca.colormap as cm\nimport geopandas as gpd\nfrom shapely.geometry import Polygon\n\n\ndef bin_by_hexagon(df: pd.DataFrame, groupby_items: list, agg_map: dict, resolution: int):\n \"\"\"\n Use h3.geo_to_h3 to join each point into the spatial index of the hex at the specified resolution.\n Use h3.h3_to_geo_boundary to obtain the geometries of these hexagons.\n adopted from: Uber https://github.com/uber/h3-py-notebooks/blob/master/notebooks/urban_analytics.ipynb\n\n parameters\n ---------\n df:pd.DataFrame - dataframe with points to be binned, including columns ['latitude'.'longitude']\n groupby_items:list - list of column names to group by. ex. ['hex_id','hour','weekday']\n agg_map:dict - dict where keys=columns to be included in groupby and values=aggrigate function\n ex. {'station_id':'count','energy':'sum'}\n resolution:int - H3 cell resolution size\n\n returns\n ---------\n df:pd.DataFrame - dataframe of resulting groupby function\n\n \"\"\"\n\n # Assign hex_ids\n df[\"hex_id\"] = df.apply(lambda row: h3.geo_to_h3(row[\"latitude\"], row[\"longitude\"], resolution), axis=1)\n\n # Groupby and aggregate\n df_aggreg = df.groupby(groupby_items).agg(agg_map)\n df_aggreg.reset_index(inplace=True)\n\n # Create geojson column\n df_aggreg[\"geojson\"] = df_aggreg.hex_id.apply(lambda x:\n {\"type\": \"Polygon\",\n \"coordinates\":\n [h3.h3_to_geo_boundary(h=x, geo_json=True)]\n }\n )\n\n return df_aggreg\n\n\ndef hexagons_dataframe_to_geojson(df_hex, file_output=None):\n \"\"\"\n Produce the GeoJSON for a dataframe that has a geometry column in geojson format ,\n along with the other columns to include such as hex_id, station_ids, station_count, etc\n adopted from: Uber https://github.com/uber/h3-py-notebooks/blob/master/notebooks/urban_analytics.ipynb\n\n \"\"\"\n\n list_features = []\n\n for i, row in df_hex.iterrows():\n feature = Feature(geometry=row[\"geometry\"], id=row[\"hex_id\"],\n properties={\n col: row[col] for col in df_hex.columns.drop('geometry', 'hex_id')\n }\n )\n\n list_features.append(feature)\n\n feat_collection = FeatureCollection(list_features)\n\n geojson_result = json.dumps(feat_collection)\n\n # optionally write to file\n if file_output is not None:\n with open(file_output, \"w\") as f:\n json.dump(feat_collection, f)\n\n return geojson_result\n\n\ndef h3_choropleth_map(df_aggreg: pd.DataFrame, value_to_map: str, kind: str, hour: int, border_color='black', fill_opacity=0.7,\n initial_map=None, map_center=[34.0522, -118.2437], with_legend=True):\n \"\"\"\n Builds a folium choropleth map from an df containing H3 hex cells and some cell value such as 'count'.\n parameters\n ----------\n df_aggreg:pd.DataFrame - df with H3 hex cells in col ['hex_id'] and at least one col ['value_to_map'] for cell color.\n value_to_map:str - column name in df to scale and color cells by\n returns\n ----------\n initial_map:folium.Map\n \"\"\"\n # take resolution from the first row\n res = h3.h3_get_resolution(df_aggreg.loc[0, 'hex_id'])\n\n if hour is not None:\n df_aggreg = df_aggreg[df_aggreg.hour == hour]\n else:\n df_aggreg = df_aggreg.groupby(['hex_id']).agg({value_to_map: 'sum', 'geometry': 'first', 'hex_id': 'first'})\n\n # create geojson data from dataframe\n geojson_data = hexagons_dataframe_to_geojson(df_hex=df_aggreg)\n\n if initial_map is None:\n initial_map = Map(location=[34.0522, -118.2437], zoom_start=11, tiles=\"cartodbpositron\",\n attr='© <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors © <a href=\"http://cartodb.com/attributions#basemaps\">CartoDB</a>'\n )\n\n if value_to_map:\n # colormap\n min_value = df_aggreg[value_to_map].min()\n max_value = df_aggreg[value_to_map].max()\n m = round((min_value + max_value) / 2, 0)\n\n # color names accepted https://github.com/python-visualization/branca/blob/master/branca/_cnames.json\n if kind == \"linear\":\n custom_cm = cm.LinearColormap(['green', 'yellow', 'red'], vmin=min_value, vmax=max_value)\n elif kind == \"outlier\":\n # for outliers, values would be -11,0,1\n custom_cm = cm.LinearColormap(['blue', 'white', 'red'], vmin=min_value, vmax=max_value)\n elif kind == \"filled_nulls\":\n custom_cm = cm.LinearColormap(['sienna', 'green', 'yellow', 'red'],\n index=[0, min_value, m, max_value], vmin=min_value, vmax=max_value)\n\n # plot on map\n name_layer = \"Choropleth \" + str(res)\n if kind != \"linear\":\n name_layer = name_layer + kind\n\n GeoJson(\n geojson_data,\n style_function=lambda feature: {\n 'fillColor': custom_cm(feature['properties'][value_to_map]),\n 'color': border_color,\n 'weight': 1,\n 'fillOpacity': fill_opacity\n },\n name=name_layer\n ).add_to(initial_map)\n\n # add legend (not recommended if multiple layers)\n if with_legend == True:\n custom_cm.add_to(initial_map)\n\n else:\n # plot on map\n name_layer = \"Choropleth \" + str(res)\n if kind != \"linear\":\n name_layer = name_layer + kind\n\n GeoJson(\n geojson_data,\n style_function=lambda feature: {\n 'fillColor': 'blue',\n 'color': 'border_color',\n 'weight': 1,\n 'fillOpacity': fill_opacity\n },\n name=name_layer\n ).add_to(initial_map)\n\n return initial_map\n\n\ndef reverse_lat_lon(hex_coords):\n geom_hex = []\n for lat_lon in hex_coords:\n geom_hex.append([lat_lon[1], lat_lon[0]])\n\n return geom_hex\n\n\ndef fill_shapefile_hexes(geojson: json, resolution: int):\n \"\"\"\n Fill a shapefile with hexes of a particular h3 resolution.\n parameters\n ----------\n geojson:json - json with structure like:\n resolution:ind - h3 resolution\n returns\n ----------\n hex_df:gpd.GeoDataFrame() - geo dataframe with hex_id and geometry\n \"\"\"\n\n # Generates hex_ids of filled polygon\n set_hexagons = h3.polyfill(geojson=geojson, res=resolution, geo_json_conformant=True)\n\n # Generate list of polygons\n list_hexagons = list(set_hexagons)\n\n # Reverse the latitude and longitude\n one_hex_of_fill = list_hexagons[0]\n one_hex_of_fill_coords_latlon = h3.h3_to_geo_boundary(h=one_hex_of_fill, geo_json=False)\n one_hex_of_fill_coords_lonlat = reverse_lat_lon(hex_coords=one_hex_of_fill_coords_latlon)\n\n # Create hex_id df, fill values with zero and assign the geometries\n df_fill_hex = pd.DataFrame({\"hex_id\": list_hexagons})\n df_fill_hex[\"value\"] = 0\n df_fill_hex['geojson'] = df_fill_hex.hex_id.apply(lambda x:\n {\"type\": \"Polygon\",\n \"coordinates\":\n [reverse_lat_lon(h3.h3_to_geo_boundary(h=x, geo_json=False))]\n }\n )\n\n # Fill the geometries and write out the final dataframe\n df_fill_hex['geometry'] = df_fill_hex['geojson'].apply(lambda x: Polygon(x['coordinates'][0]))\n df_fill_hex = gpd.GeoDataFrame(df_fill_hex, crs=\"EPSG:4326\")\n return df_fill_hex"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
betolink/its_live | [
"26ba7b78db260b970a75cbf1b6c5c2807797a577"
] | [
"notebooks/velocity_widget.py"
] | [
"# for leaflet global map\nimport json\n# for timing data access\nimport time\nimport traceback\n\nimport ipyleaflet\nimport ipywidgets\nimport numpy as np\nimport pandas as pd\nimport pyproj\n# to get and use geojson datacube catalog\nimport s3fs as s3\n# for datacube xarray/zarr access\nimport xarray as xr\nfrom IPython.display import Image, display\n# for plotting time series\nfrom matplotlib import pyplot as plt\nfrom shapely import geometry\nfrom sidecar import Sidecar\n\n\nclass timeseriesException(Exception):\n print(traceback.format_exc())\n pass\n\n\nclass ITSLIVE:\n \"\"\"\n Class to encapsulate ITS_LIVE plotting from zarr in S3\n \"\"\"\n\n VELOCITY_ATTRIBUTION = \"\"\" \\nITS_LIVE velocity mosaic\n (<a href=\"https://its-live.jpl.nasa.gov\">ITS_LIVE</a>) with funding provided by NASA MEaSUREs.\\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Map widget to plot glacier velocities\n \"\"\"\n self.catalog = {\n \"l8\": \"s3://its-live-data.jpl.nasa.gov/datacubes/v01/datacubes_100km_v01.json\",\n \"all\": \"s3://its-live-data/test_datacubes/v02/datacubes_catalog.json\",\n \"agu21\": \"s3://its-live-data/test_datacubes/AGU2021/test_datacubes_AGU2021.json\",\n }\n self.config = {\"plot\": \"v\", \"max_separation_days\": 90, \"color_by\": \"markers\"}\n self._s3fs = s3.S3FileSystem(anon=True)\n self.open_cubes = {}\n # self.outwidget = ipywidgets.Output(layout={\"border\": \"1px solid blue\"})\n\n self.color_index = 0\n self.icon_color_index = 0\n self._last_click = None\n\n self._current_catalog = \"All Satellites\"\n with self._s3fs.open(self.catalog[\"all\"], \"r\") as incubejson:\n self._json_all = json.load(incubejson)\n with self._s3fs.open(self.catalog[\"l8\"], \"r\") as incubejson:\n self._json_l8 = json.load(incubejson)\n with self._s3fs.open(self.catalog[\"agu21\"], \"r\") as incubejson:\n self._json_agu21 = json.load(incubejson)\n self.json_catalog = self._json_all\n self._initialize_widgets()\n\n def set_config(self, config):\n self.config = config\n\n def _initialize_widgets(self):\n self._control_plot_running_mean_checkbox = ipywidgets.Checkbox(\n value=True,\n description=\"Include running mean\",\n disabled=False,\n indent=False,\n tooltip=\"Plot running mean through each time series\",\n layout=ipywidgets.Layout(width=\"150px\"),\n )\n self._control_plot_running_mean_widgcntrl = ipyleaflet.WidgetControl(\n widget=self._control_plot_running_mean_checkbox, position=\"bottomright\"\n )\n self._control_clear_points_button = ipywidgets.Button(\n description=\"Clear Points\", tooltip=\"clear all picked points\"\n )\n self._control_clear_points_button.on_click(self.clear_points)\n\n self._control_clear_points_button_widgcntrl = ipyleaflet.WidgetControl(\n widget=self._control_clear_points_button, position=\"bottomright\"\n )\n\n self._control_plot_button = ipywidgets.Button(\n description=\"Make Plot\", tooltip=\"click to make plot\"\n )\n self._control_plot_button.on_click(self.plot_time_series)\n self._control_plot_button_widgcntrl = ipyleaflet.WidgetControl(\n widget=self._control_plot_button, position=\"bottomright\"\n )\n self._control_coverage_button = ipywidgets.RadioButtons(\n options=[\"All Satellites\", \"Landsat 8\", \"AGU 21\"],\n default=\"All Satellites\",\n layout={\"width\": \"max-content\"},\n description=\"Satellite:\",\n disabled=False,\n )\n\n self._control_coverage_button.observe(self.reload_catalog, \"value\")\n self._control_coverage_button_widgcntrl = ipyleaflet.WidgetControl(\n widget=self._control_coverage_button, position=\"bottomright\"\n )\n image = Image(\n (\n \"https://its-live-data.s3.amazonaws.com/documentation/\"\n \"ITS_LIVE_logo_small.png\"\n ),\n width=220,\n )\n\n self._control_logo = ipywidgets.Image(\n value=image.data, format=\"png\", width=180, height=58\n )\n self._control_logo_widgcntrl = ipyleaflet.WidgetControl(\n widget=self._control_logo, position=\"topright\"\n )\n self._map_base_layer = ipyleaflet.basemap_to_tiles(\n {\n \"url\": (\n \"https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/\"\n \"MapServer/tile/{z}/{y}/{x}.jpg\"\n ),\n \"attribution\": \"\\nImagery provided by ESRI\\n\",\n \"name\": \"ESRI basemap\",\n }\n )\n self._map_velocity_layer = ipyleaflet.basemap_to_tiles(\n {\n \"url\": \"https://glacierflow.nyc3.digitaloceanspaces.com/webmaps/vel_map/{z}/{x}/{y}.png\",\n \"attribution\": self.VELOCITY_ATTRIBUTION,\n \"name\": \"ITS_LIVE Velocity Mosaic\",\n }\n )\n self._map_coverage_layer = ipyleaflet.GeoJSON(\n data=self.json_catalog,\n name=\"ITS_LIVE datacube coverage\",\n style={\n \"opacity\": 0.8,\n \"fillOpacity\": 0.2,\n \"weight\": 1,\n \"color\": \"red\",\n \"cursor\": \"crosshair\",\n },\n hover_style={\n \"color\": \"white\",\n \"dashArray\": \"0\",\n \"fillOpacity\": 0.5,\n },\n )\n self.map = ipyleaflet.Map(\n basemap=self._map_base_layer,\n double_click_zoom=False,\n scroll_wheel_zoom=True,\n center=[64.20, -49.43],\n zoom=3,\n # layout=ipywidgets.widgets.Layout(\n # width=\"100%\", # Set Width of the map, examples: \"100%\", \"5em\", \"300px\"\n # height=\"100%\", # Set height of the map\n # ),\n )\n self._map_picked_points_layer_group = ipyleaflet.LayerGroup(\n layers=[], name=\"Picked points\"\n )\n\n # Populating the map\n\n self.map.add_layer(self._map_picked_points_layer_group)\n self.map.add_layer(self._map_velocity_layer)\n self.map.add_layer(self._map_coverage_layer)\n self.map.add_control(\n ipyleaflet.MeasureControl(\n position=\"topleft\",\n active_color=\"orange\",\n primary_length_unit=\"kilometers\",\n )\n )\n self.map.add_control(ipyleaflet.FullScreenControl())\n self.map.add_control(ipyleaflet.LayersControl())\n self.map.add_control(ipyleaflet.ScaleControl(position=\"bottomleft\"))\n self.map.add_control(self._control_plot_running_mean_widgcntrl)\n self.map.add_control(self._control_coverage_button_widgcntrl)\n self.map.add_control(self._control_clear_points_button_widgcntrl)\n self.map.add_control(self._control_plot_button_widgcntrl)\n self.map.add_control(self._control_logo_widgcntrl)\n self.map.default_style = {\"cursor\": \"crosshair\"}\n self.map.on_interaction(self._handle_map_click)\n\n def display(self, render_sidecar=True):\n\n if not hasattr(self, \"sidecar\"):\n self.sidecar = Sidecar(title=\"Map Widget\")\n\n if render_sidecar:\n self.fig, self.ax = plt.subplots(1, 1, figsize=(10, 6))\n self.sidecar.clear_output()\n with self.sidecar:\n display(self.map)\n\n def reload_catalog(self, coverage) -> None:\n self.map.remove_layer(self._map_coverage_layer)\n if \"Landsat\" in coverage[\"new\"]:\n self.json_catalog = self._json_l8\n self._current_catalog = \"Landsat 8\"\n elif \"AGU\" in coverage[\"new\"]:\n self.json_catalog = self._json_agu21\n self._current_catalog = \"AGU 21\"\n else:\n self.json_catalog = self._json_all\n self._current_catalog = \"All Satellites\"\n self._map_coverage_layer = ipyleaflet.GeoJSON(\n data=self.json_catalog,\n name=\"ITS_LIVE datacube coverage\",\n style={\n \"opacity\": 0.8,\n \"fillOpacity\": 0.2,\n \"weight\": 1,\n \"color\": \"red\",\n \"cursor\": \"crosshair\",\n },\n hover_style={\n \"color\": \"white\",\n \"dashArray\": \"0\",\n \"fillOpacity\": 0.5,\n },\n )\n self.map.add_layer(self._map_coverage_layer)\n\n def get_timeseries(self, point_xy, point_epsg_str, variable):\n\n start = time.time()\n\n if point_epsg_str != \"4326\":\n # point not in lon,lat, set up transformation and convert it to lon,lat (epsg:4326)\n inPROJtoLL = pyproj.Transformer.from_proj(\n f\"epsg:{point_epsg_str}\", \"epsg:4326\", always_xy=True\n )\n pointll = inPROJtoLL.transform(*point_xy)\n else:\n # point already lon,lat\n pointll = point_xy\n\n # create Shapely point object for inclusion test\n point = geometry.Point(*pointll) # point.coords.xy\n\n # find datacube outline that contains this point in geojson index file\n cubef = None\n\n # TODO: this should be done via the API\n for f in self.json_catalog[\"features\"]:\n polygeom = geometry.shape(f[\"geometry\"])\n if polygeom.contains(point):\n cubef = f\n break\n\n if cubef:\n print(\n f\"found datacube - elapsed time: {(time.time()-start):10.2f}\",\n flush=True,\n )\n\n if point_epsg_str == cubef[\"properties\"][\"data_epsg\"]:\n point_tilexy = point_xy\n else:\n inPROJtoTilePROJ = pyproj.Transformer.from_proj(\n f\"epsg:{point_epsg_str}\",\n cubef[\"properties\"][\"data_epsg\"],\n always_xy=True,\n )\n point_tilexy = inPROJtoTilePROJ.transform(*point_xy)\n\n print(\n f\"original xy {point_xy} {point_epsg_str} maps to datacube {point_tilexy} \"\n f\" {cubef['properties']['data_epsg']}\"\n )\n\n # now test if point is in xy box for cube (should be most of the time; could fail\n # because of boundary curvature 4326 box defined by lon,lat corners but point chosen in basemap projection)\n point_tilexy_shapely = geometry.Point(*point_tilexy)\n polygeomxy = geometry.shape(cubef[\"properties\"][\"geometry_epsg\"])\n if not polygeomxy.contains(point_tilexy_shapely):\n raise timeseriesException(\n f\"point is in lat,lon box but not {cubef['properties']['data_epsg']} box!!\"\n )\n\n # for zarr store modify URL for use in boto open - change http: to s3: and lose s3.amazonaws.com\n incubeurl = (\n cubef[\"properties\"][\"zarr_url\"]\n .replace(\"http:\", \"s3:\")\n .replace(\".s3.amazonaws.com\", \"\")\n )\n\n # if we have already opened this cube, don't do it again\n if len(self.open_cubes) > 0 and incubeurl in self.open_cubes.keys():\n ins3xr = self.open_cubes[incubeurl]\n else:\n ins3xr = xr.open_dataset(\n incubeurl, engine=\"zarr\", storage_options={\"anon\": True}\n )\n self.open_cubes[incubeurl] = ins3xr\n\n pt_variable = ins3xr[variable].sel(\n x=point_tilexy[0], y=point_tilexy[1], method=\"nearest\"\n )\n\n print(\n f\"xarray open - elapsed time: {(time.time()-start):10.2f}\", flush=True\n )\n\n pt_variable.load()\n\n print(\n f\"time series loaded {len(pt_variable)} points - elapsed time: {(time.time()-start):10.2f}\",\n flush=True,\n )\n # end for zarr store\n\n return (ins3xr, pt_variable, point_tilexy)\n\n else:\n # raise timeseriesException(f\"no datacube found for point {pointll}\")\n print(f\"No data for point {pointll}\")\n return (None, None, None)\n\n # running mean\n def runningMean(\n self,\n mid_dates,\n variable,\n minpts,\n tFreq,\n ):\n \"\"\"\n mid_dates: center dates of `variable` data [datetime64]\n variable: data to be average\n minpts: minimum number of points needed for a valid value, else filled with nan\n tFreq: the spacing between centered averages in Days, default window size = tFreq*2\n \"\"\"\n tsmin = pd.Timestamp(np.min(mid_dates))\n tsmax = pd.Timestamp(np.max(mid_dates))\n ts = pd.date_range(start=tsmin, end=tsmax, freq=f\"{tFreq}D\")\n ts = pd.to_datetime(ts).values\n idx0 = ~np.isnan(variable)\n runmean = np.empty([len(ts) - 1, 1])\n runmean[:] = np.nan\n tsmean = ts[0:-1]\n\n t_np = mid_dates.astype(np.int64)\n\n for i in range(len(ts) - 1):\n idx = (\n (mid_dates >= (ts[i] - np.timedelta64(int(tFreq / 2), \"D\")))\n & (mid_dates < (ts[i + 1] + np.timedelta64(int(tFreq / 2), \"D\")))\n & idx0\n )\n if sum(idx) >= minpts:\n runmean[i] = np.mean(variable[idx])\n tsmean[i] = np.mean(t_np[idx])\n\n tsmean = pd.to_datetime(tsmean).values\n return (runmean, tsmean)\n\n def _handle_map_click(self, **kwargs):\n if kwargs.get(\"type\") == \"click\":\n # NOTE this is the work around for the double click issue discussed above!\n # Only acknoledge the click when it is registered the second time at the same place!\n if self._last_click and (\n kwargs.get(\"coordinates\") == self._last_click.get(\"coordinates\")\n ):\n color = plt.cm.tab10(self.icon_color_index)\n print(self.icon_color_index, color)\n html_for_marker = f\"\"\"\n <div style=\"width: 3rem;height: 3rem; display: block;position: relative;transform: rotate(45deg);\"/>\n <h1 style=\"position: relative;left: -2.5rem;top: -2.5rem;font-size: 3rem;\">\n <span style=\"color: rgba({color[0]*100}%,{color[1]*100}%,{color[2]*100}%, {color[3]})\">\n <strong>+</strong>\n </span>\n </h1>\n </div>\n \"\"\"\n\n icon = ipyleaflet.DivIcon(\n html=html_for_marker, icon_anchor=[0, 0], icon_size=[0, 0]\n )\n new_point = ipyleaflet.Marker(\n location=kwargs.get(\"coordinates\"), icon=icon\n )\n\n # added points are tracked (color/symbol assigned) by the order they are added to the layer_group\n # (each point/icon is a layer by itself in ipyleaflet)\n self._map_picked_points_layer_group.add_layer(new_point)\n print(f\"point added {kwargs.get('coordinates')}\")\n self.icon_color_index += 1\n # if icon_color_index>=len(colornames):\n # icon_color_index=0\n else:\n self._last_click = kwargs\n\n def _plot_by_satellite(self, ins3xr, point_v, ax, point_xy, map_epsg):\n if self._current_catalog == \"Landsat 8\":\n print(\n \"To plot by satellite we need to select data from more than one satellite\"\n \"Please select 'All Satellites'\"\n )\n return\n else:\n sat = np.array([x[0] for x in ins3xr[\"satellite_img1\"].values])\n\n sats = np.unique(sat)\n sat_plotsym_dict = {\n \"1\": \"r+\",\n \"2\": \"b+\",\n \"8\": \"g+\",\n }\n\n sat_label_dict = {\n \"1\": \"Sentinel 1\",\n \"2\": \"Sentinel 2\",\n \"8\": \"Landsat 8\",\n }\n\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Speed (m/yr)\")\n ax.set_title(\"Ice flow speed pulled directly from S3\")\n\n max_dt = self.config[\"max_separation_days\"]\n dt = ins3xr[\"date_dt\"].values\n # TODO: document this\n dt = dt.astype(float) * 1.15741e-14\n if self._control_plot_running_mean_checkbox.value:\n runmean, ts = self.runningMean(\n ins3xr.mid_date[dt < max_dt].values,\n point_v[dt < max_dt].values,\n 5,\n 30,\n )\n ax.plot(\n ts,\n runmean,\n linestyle=\"-\",\n color=plt.cm.tab10(self.color_index),\n linewidth=2,\n )\n\n for satellite in sats[::-1]:\n if any(sat == satellite):\n ax.plot(\n ins3xr[\"mid_date\"][(sat == satellite) & (dt < max_dt)],\n point_v[(sat == satellite) & (dt < max_dt)],\n sat_plotsym_dict[satellite],\n label=sat_label_dict[satellite],\n )\n\n def _plot_by_points(self, ins3xr, point_v, ax, point_xy, map_epsg):\n point_label = f\"Point ({round(point_xy[0], 2)}, {round(point_xy[1], 2)})\"\n print(point_xy)\n\n dt = ins3xr[\"date_dt\"].values\n # TODO: document this\n dt = dt.astype(float) * 1.15741e-14\n\n max_dt = self.config[\"max_separation_days\"]\n # set the maximum image-pair time separation (dt) that will be plotted\n alpha_value = 0.75\n marker_size = 3\n if self._control_plot_running_mean_checkbox.value:\n alpha_value = 0.25\n marker_size = 2\n runmean, ts = self.runningMean(\n ins3xr.mid_date[dt < max_dt].values,\n point_v[dt < max_dt].values,\n 5,\n 30,\n )\n ax.plot(\n ts,\n runmean,\n linestyle=\"-\",\n color=plt.cm.tab10(self.color_index),\n linewidth=2,\n )\n ax.plot(\n ins3xr.mid_date[dt < max_dt],\n point_v[dt < max_dt],\n linestyle=\"None\",\n markeredgecolor=plt.cm.tab10(self.color_index),\n markerfacecolor=plt.cm.tab10(self.color_index),\n marker=\"o\",\n alpha=alpha_value,\n markersize=marker_size,\n label=point_label,\n )\n\n def plot_point_on_fig(self, point_xy, ax, map_epsg):\n\n # pointxy is [x,y] coordinate in mapfig projection (map_epsg below), nax is plot axis for time series plot\n start = time.time()\n print(\n f\"fetching timeseries for point x={point_xy[0]:10.2f} y={point_xy[1]:10.2f}\",\n flush=True,\n )\n if \"plot\" in self.config:\n variable = self.config[\"plot\"]\n else:\n variable = \"v\"\n\n ins3xr, ds_velocity_point, point_tilexy = self.get_timeseries(\n point_xy, map_epsg, variable\n ) # returns xarray dataset object (used for time axis in plot) and already loaded v time series\n if ins3xr is not None:\n # print(ins3xr)\n if self.config[\"color_by\"] == \"satellite\":\n self._plot_by_satellite(\n ins3xr, ds_velocity_point, ax, point_xy, map_epsg\n )\n else:\n self._plot_by_points(ins3xr, ds_velocity_point, ax, point_xy, map_epsg)\n plt.tight_layout()\n handles, labels = plt.gca().get_legend_handles_labels()\n by_label = dict(zip(labels, handles))\n plt.legend(\n by_label.values(), by_label.keys(), loc=\"upper left\", fontsize=10\n )\n total_time = time.time() - start\n print(\n f\"elapsed time: {total_time:10.2f} - {len(ds_velocity_point)/total_time:6.1f} points per second\",\n flush=True,\n )\n self.color_index += 1\n\n def plot_time_series(self, *args, **kwargs):\n\n # reset plot and color index\n self.ax.clear()\n self.ax.set_xlabel(\"date\")\n self.ax.set_ylabel(\"speed (m/yr)\")\n self.ax.set_title(\n f\"{self._current_catalog} ITS_LIVE ice flow speed (Zarr cube from S3 bucket)\"\n )\n self.fig.tight_layout()\n self.color_index = 0\n\n picked_points_latlon = [\n a.location for a in self._map_picked_points_layer_group.layers\n ]\n if len(picked_points_latlon) > 0:\n print(\"Plotting...\")\n for lat, lon in picked_points_latlon:\n self.plot_point_on_fig([lon, lat], self.ax, \"4326\")\n print(\"done plotting\")\n else:\n print(\"no picked points to plot yet - pick some!\")\n\n def clear_points(self, *args, **kwargs):\n self.ax.clear()\n self.color_index = 0\n self.icon_color_index = 0\n self._map_picked_points_layer_group.clear_layers()\n print(\"all points cleared\")\n\n def get_zarr_cubes(self):\n return [(k, v) for k, v in self.open_cubes.items()]\n"
] | [
[
"matplotlib.pyplot.gca",
"pandas.to_datetime",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.unique",
"numpy.isnan",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.cm.tab10",
"numpy.max",
"numpy.mean",
"pandas.date_range",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
AzipSauhabah/Riskfolio-Lib | [
"d02b03e665cfa8b12e41cb441c75c6fbcf1a4365"
] | [
"riskfolio/PlotFunctions.py"
] | [
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport scipy.stats as st\nimport riskfolio.RiskFunctions as rk\n\n__all__ = [\n \"plot_series\",\n \"plot_frontier\",\n \"plot_pie\",\n \"plot_frontier_area\",\n \"plot_risk_con\",\n \"plot_hist\",\n \"plot_drawdown\",\n \"plot_table\",\n]\n\nrm_names = [\n \"Standard Deviation\",\n \"Mean Absolute Deviation\",\n \"Semi Standard Deviation\",\n \"Value at Risk\",\n \"Conditional Value at Risk\",\n \"Entropic Value at Risk\",\n \"Worst Realization\",\n \"First Lower Partial Moment\",\n \"Second Lower Partial Moment\",\n \"Max Drawdown\",\n \"Average Drawdown\",\n \"Drawdown at Risk\",\n \"Conditional Drawdown at Risk\",\n \"Ulcer Index\",\n]\n\nrmeasures = [\n \"MV\",\n \"MAD\",\n \"MSV\",\n \"VaR\",\n \"CVaR\",\n \"EVaR\",\n \"WR\",\n \"FLPM\",\n \"SLPM\",\n \"MDD\",\n \"ADD\",\n \"DaR\",\n \"CDaR\",\n \"UCI\",\n]\n\n\ndef plot_series(returns, w, cmap=\"tab20\", height=6, width=10, ax=None):\n r\"\"\"\n Create a chart with the compound cumulated of the portfolios.\n\n Parameters\n ----------\n returns : DataFrame\n Assets returns.\n w : DataFrame of shape (n_assets, n_portfolios)\n Portfolio weights.\n cmap : cmap, optional\n Colorscale, represente the risk adjusted return ratio.\n The default is 'tab20'.\n height : float, optional\n Height of the image in inches. The default is 6.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_series(returns=Y, w=ws, cmap='tab20', height=6, width=10, ax=None)\n\n .. image:: images/Port_Series.png\n\n\n \"\"\"\n if not isinstance(returns, pd.DataFrame):\n raise ValueError(\"returns must be a DataFrame\")\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if returns.shape[1] != w.shape[0]:\n a1 = str(returns.shape)\n a2 = str(w.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n ax.grid(linestyle=\":\")\n title = \"Historical Compounded Cumulative Returns\"\n ax.set_title(title)\n\n labels = w.columns.tolist()\n\n colormap = cm.get_cmap(cmap)\n colormap = colormap(np.linspace(0, 1, 20))\n\n if cmap == \"gist_rainbow\":\n colormap = colormap[::-1]\n\n cycle = plt.cycler(\"color\", colormap)\n ax.set_prop_cycle(cycle)\n\n X = w.columns.tolist()\n index = returns.index.tolist()\n\n for i in range(len(X)):\n a = np.array(returns, ndmin=2) @ np.array(w[X[i]], ndmin=2).T\n prices = 1 + np.insert(a, 0, 0, axis=0)\n prices = np.cumprod(prices, axis=0)\n prices = np.ravel(prices).tolist()\n del prices[0]\n\n ax.plot_date(index, prices, \"-\", label=labels[i])\n\n ax.set_yticklabels([\"{:3.2f}\".format(x) for x in ax.get_yticks()])\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_frontier(\n w_frontier,\n mu,\n cov=None,\n returns=None,\n rm=\"MV\",\n rf=0,\n alpha=0.05,\n cmap=\"viridis\",\n w=None,\n label=\"Portfolio\",\n marker=\"*\",\n s=16,\n c=\"r\",\n height=6,\n width=10,\n ax=None,\n):\n \"\"\"\n Creates a plot of the efficient frontier for a risk measure specified by\n the user.\n\n Parameters\n ----------\n w_frontier : DataFrame\n Portfolio weights of some points in the efficient frontier.\n mu : DataFrame of shape (1, n_assets)\n Vector of expected returns, where n_assets is the number of assets.\n cov : DataFrame of shape (n_features, n_features)\n Covariance matrix, where n_features is the number of features.\n returns : DataFrame of shape (n_samples, n_features)\n Features matrix, where n_samples is the number of samples and\n n_features is the number of features.\n rm : str, optional\n The risk measure used to estimate the frontier.\n The default is 'MV'. Posible values are:\n\n - 'MV': Standard Deviation.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Conditional Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded returns.\n - 'DaR': Drawdown at Risk of uncompounded returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded returns.\n - 'UCI': Ulcer Index of uncompounded returns.\n\n rf : float, optional\n Risk free rate or minimum aceptable return. The default is 0.\n alpha : float, optional\n Significante level of VaR, CVaR, EVaR, DaR and CDaR.\n The default is 0.05.\n cmap : cmap, optional\n Colorscale, represente the risk adjusted return ratio.\n The default is 'viridis'.\n w : DataFrame of shape (n_assets, 1), optional\n A portfolio specified by the user. The default is None.\n label : str, optional\n Name of portfolio that appear on plot legend.\n The default is 'Portfolio'.\n marker : str, optional\n Marker of w_. The default is '*'.\n s : float, optional\n Size of marker. The default is 16.\n c : str, optional\n Color of marker. The default is 'r'.\n height : float, optional\n Height of the image in inches. The default is 6.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib Axes\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n label = 'Max Risk Adjusted Return Portfolio'\n mu = port.mu\n cov = port.cov\n returns = port.returns\n\n ax = plf.plot_frontier(w_frontier=ws, mu=mu, cov=cov, returns=returns,\n rm=rm, rf=0, alpha=0.05, cmap='viridis', w=w1,\n label='Portfolio', marker='*', s=16, c='r',\n height=6, width=10, ax=None)\n\n .. image:: images/MSV_Frontier.png\n\n \"\"\"\n\n if not isinstance(w_frontier, pd.DataFrame):\n raise ValueError(\"w_frontier must be a DataFrame\")\n\n if not isinstance(mu, pd.DataFrame):\n raise ValueError(\"mu must be a DataFrame\")\n\n if not isinstance(cov, pd.DataFrame):\n raise ValueError(\"cov must be a DataFrame\")\n\n if not isinstance(returns, pd.DataFrame):\n raise ValueError(\"returns must be a DataFrame\")\n\n if returns.shape[1] != w_frontier.shape[0]:\n a1 = str(returns.shape)\n a2 = str(w_frontier.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if w is not None:\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if w.shape[1] > 1 and w.shape[0] == 0:\n w = w.T\n elif w.shape[1] > 1 and w.shape[0] > 0:\n raise ValueError(\"w must be a column DataFrame\")\n\n if returns.shape[1] != w.shape[0]:\n a1 = str(returns.shape)\n a2 = str(w.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n mu_ = np.array(mu, ndmin=2)\n\n ax.set_ylabel(\"Expected Return\")\n item = rmeasures.index(rm)\n x_label = rm_names[item] + \" (\" + rm + \")\"\n ax.set_xlabel(\"Expected Risk - \" + x_label)\n\n title = \"Efficient Frontier Mean - \" + x_label\n ax.set_title(title)\n\n X1 = []\n Y1 = []\n Z1 = []\n\n for i in range(w_frontier.shape[1]):\n weights = np.array(w_frontier.iloc[:, i], ndmin=2).T\n risk = rk.Sharpe_Risk(\n weights, cov=cov, returns=returns, rm=rm, rf=rf, alpha=alpha\n )\n ret = mu_ @ weights\n ret = ret.item()\n ratio = (ret - rf) / risk\n\n X1.append(risk)\n Y1.append(ret)\n Z1.append(ratio)\n\n ax1 = ax.scatter(X1, Y1, c=Z1, cmap=cmap)\n\n if w is not None:\n X2 = []\n Y2 = []\n for i in range(w.shape[1]):\n weights = np.array(w.iloc[:, i], ndmin=2).T\n risk = rk.Sharpe_Risk(\n weights, cov=cov, returns=returns, rm=rm, rf=rf, alpha=alpha\n )\n ret = mu_ @ weights\n ret = ret.item()\n ratio = (ret - rf) / risk\n\n X2.append(risk)\n Y2.append(ret)\n\n ax.scatter(X2, Y2, marker=marker, s=s ** 2, c=c, label=label)\n ax.legend(loc=\"upper left\")\n\n xmin = np.min(X1) - np.abs(np.max(X1) - np.min(X1)) * 0.1\n xmax = np.max(X1) + np.abs(np.max(X1) - np.min(X1)) * 0.1\n ymin = np.min(Y1) - np.abs(np.max(Y1) - np.min(Y1)) * 0.1\n ymax = np.max(Y1) + np.abs(np.max(Y1) - np.min(Y1)) * 0.1\n\n ax.set_ylim(ymin, ymax)\n ax.set_xlim(xmin, xmax)\n\n ax.set_yticklabels([\"{:.4%}\".format(x) for x in ax.get_yticks()])\n ax.set_xticklabels([\"{:.4%}\".format(x) for x in ax.get_xticks()])\n\n ax.tick_params(axis=\"y\", direction=\"in\")\n ax.tick_params(axis=\"x\", direction=\"in\")\n\n ax.grid(linestyle=\":\")\n\n colorbar = ax.figure.colorbar(ax1)\n colorbar.set_label(\"Risk Adjusted Return Ratio\")\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_pie(\n w, title=\"\", others=0.05, nrow=25, cmap=\"tab20\", height=6, width=8, ax=None\n):\n \"\"\"\n Create a pie chart with portfolio weights.\n\n Parameters\n ----------\n w : DataFrame of shape (n_assets, 1)\n Portfolio weights.\n title : str, optional\n Title of the chart. The default is ''.\n others : float, optional\n Percentage of others section. The default is 0.05.\n nrow : int, optional\n Number of rows of the legend. The default is 25.\n cmap : cmap, optional\n Color scale used to plot each asset weight.\n The default is 'tab20'.\n height : float, optional\n Height of the image in inches. The default is 10.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis.\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_pie(w=w1, title='Portafolio', height=6, width=10, cmap=\"tab20\", ax=None)\n\n .. image:: images/Pie_Chart.png\n\n\n \"\"\"\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if w.shape[1] > 1 and w.shape[0] == 0:\n w = w.T\n elif w.shape[1] > 1 and w.shape[0] > 0:\n raise ValueError(\"w must be a column DataFrame\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n labels = w.index.tolist()\n sizes = w.iloc[:, 0].tolist()\n abs_sizes = [np.absolute(s) for s in sizes]\n sizes2 = pd.DataFrame([labels, abs_sizes, sizes]).T\n sizes2.columns = [\"labels\", \"abs_values\", \"values\"]\n sizes2 = sizes2.sort_values(by=[\"abs_values\"], ascending=False)\n sizes2.index = [i for i in range(0, len(labels))]\n sizes3 = sizes2.cumsum()\n sizes3[\"abs_values\"] = sizes3[\"abs_values\"] / sizes3[\"abs_values\"].max()\n l = sizes3[sizes3[\"abs_values\"] >= 1 - others].index.tolist()[0]\n\n a1 = sizes2[\"abs_values\"].sum() - sizes2[sizes2.index <= l][\"abs_values\"].sum()\n a2 = sizes2[\"values\"].sum() - sizes2[sizes2.index <= l][\"values\"].sum()\n item = pd.DataFrame([\"Others\", a1, a2]).T\n item.columns = [\"labels\", \"abs_values\", \"values\"]\n sizes2 = sizes2[sizes2.index <= l]\n sizes2 = sizes2.append(item)\n\n abs_sizes = sizes2[\"abs_values\"].tolist()\n sizes = sizes2[\"values\"].tolist()\n labels = sizes2[\"labels\"].tolist()\n sizes2 = [\"{0:.1%}\".format(i) for i in sizes]\n\n if title == \"\":\n title = \"Portfolio Composition\"\n\n limit = np.round(np.min(sizes), 4)\n if limit < 0:\n title += \" (Areas in Absolute Values)\"\n\n ax.set_title(title)\n\n colormap = cm.get_cmap(cmap)\n colormap = colormap(np.linspace(0, 1, 20))\n\n if cmap == \"gist_rainbow\":\n colormap = colormap[::-1]\n\n cycle = plt.cycler(\"color\", colormap)\n ax.set_prop_cycle(cycle)\n\n size = 0.4\n\n # set up style cycles\n\n wedges, texts = ax.pie(\n abs_sizes,\n radius=1,\n wedgeprops=dict(width=size, edgecolor=\"black\"),\n startangle=-15\n )\n\n # Equal aspect ratio ensures that pie is drawn as a circle.\n\n ax.axis(\"equal\")\n\n n = int(np.ceil(l / nrow))\n\n ax.legend(wedges, labels, loc=\"center left\", bbox_to_anchor=(1, 0.5), ncol=n)\n\n bbox_props = dict(boxstyle=\"square,pad=0.3\", fc=\"w\", ec=\"k\", lw=0.72)\n kw = dict(\n xycoords=\"data\",\n textcoords=\"data\",\n arrowprops=dict(arrowstyle=\"-\"),\n bbox=bbox_props,\n zorder=0,\n va=\"center\",\n )\n\n for i, p in enumerate(wedges):\n ang = (p.theta2 - p.theta1) / 2.0 + p.theta1\n y = np.sin(np.deg2rad(ang))\n x = np.cos(np.deg2rad(ang))\n horizontalalignment = {-1: \"right\", 1: \"left\"}[int(np.sign(x))]\n connectionstyle = \"angle,angleA=0,angleB={}\".format(ang)\n kw[\"arrowprops\"].update({\"connectionstyle\": connectionstyle})\n name = str(labels[i]) + \" \" + str(sizes2[i])\n ax.annotate(\n name,\n xy=(x, y),\n xytext=(1.1 * np.sign(x), 1.1 * y),\n horizontalalignment=horizontalalignment,\n **kw\n )\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_frontier_area(w_frontier, nrow=25, cmap=\"tab20\", height=6, width=10, ax=None):\n r\"\"\"\n Create a chart with the asset composition of the efficient frontier.\n\n Parameters\n ----------\n w_frontier : DataFrame\n Weights of portfolios in the efficient frontier.\n nrow : int, optional\n Number of rows of the legend. The default is 25.\n cmap : cmap, optional\n Color scale used to plot each asset weight.\n The default is 'tab20'.\n height : float, optional\n Height of the image in inches. The default is 6.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis.\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_frontier_area(w_frontier=ws, cmap=\"tab20\", height=6, width=10, ax=None)\n\n .. image:: images/Area_Frontier.png\n\n\n \"\"\"\n\n if not isinstance(w_frontier, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n ax.set_title(\"Efficient Frontier's Assets Structure\")\n labels = w_frontier.index.tolist()\n\n colormap = cm.get_cmap(cmap)\n colormap = colormap(np.linspace(0, 1, 20))\n\n if cmap == \"gist_rainbow\":\n colormap = colormap[::-1]\n\n cycle = plt.cycler(\"color\", colormap)\n ax.set_prop_cycle(cycle)\n\n X = w_frontier.columns.tolist()\n\n ax.stackplot(X, w_frontier, labels=labels, alpha=0.7, edgecolor=\"black\")\n\n ax.set_ylim(0, 1)\n ax.set_xlim(0, len(X) - 1)\n\n ax.set_yticklabels([\"{:3.2%}\".format(x) for x in ax.get_yticks()])\n ax.grid(linestyle=\":\")\n\n n = int(np.ceil(len(labels) / nrow))\n\n ax.legend(labels, loc=\"center left\", bbox_to_anchor=(1, 0.5), ncol=n)\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_risk_con(\n w,\n cov=None,\n returns=None,\n rm=\"MV\",\n rf=0,\n alpha=0.05,\n color=\"tab:blue\",\n height=6,\n width=10,\n ax=None,\n):\n r\"\"\"\n Create a chart with the risk contribution per asset of the portfolio.\n\n Parameters\n ----------\n w : DataFrame of shape (n_assets, 1)\n Portfolio weights.\n cov : DataFrame of shape (n_features, n_features)\n Covariance matrix, where n_features is the number of features.\n returns : DataFrame of shape (n_samples, n_features)\n Features matrix, where n_samples is the number of samples and\n n_features is the number of features.\n rm : str, optional\n Risk measure used to estimate risk contribution.\n The default is 'MV'. Posible values are:\n\n - 'MV': Standard Deviation.\n - 'MAD': Mean Absolute Deviation.\n - 'MSV': Semi Standard Deviation.\n - 'FLPM': First Lower Partial Moment (Omega Ratio).\n - 'SLPM': Second Lower Partial Moment (Sortino Ratio).\n - 'CVaR': Conditional Value at Risk.\n - 'EVaR': Conditional Value at Risk.\n - 'WR': Worst Realization (Minimax)\n - 'MDD': Maximum Drawdown of uncompounded returns (Calmar Ratio).\n - 'ADD': Average Drawdown of uncompounded returns.\n - 'DaR': Drawdown at Risk of uncompounded returns.\n - 'CDaR': Conditional Drawdown at Risk of uncompounded returns.\n - 'UCI': Ulcer Index of uncompounded returns.\n\n rf : float, optional\n Risk free rate or minimum aceptable return. The default is 0.\n alpha : float, optional\n Significante level of VaR, CVaR and CDaR. The default is 0.05.\n color : str, optional\n Color used to plot each asset risk contribution.\n The default is 'tab:blue'.\n height : float, optional\n Height of the image in inches. The default is 6.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis.\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_risk_con(w=w2, cov=cov, returns=returns, rm='MSV',\n rf=0, alpha=0.05, cmap=\"tab20\", height=6,\n width=10, ax=None)\n\n .. image:: images/Risk_Con.png\n\n \"\"\"\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n item = rmeasures.index(rm)\n title = \"Risk (\" + rm_names[item] + \") Contribution per Asset\"\n ax.set_title(title)\n\n X = w.index.tolist()\n\n RC = rk.Risk_Contribution(w, cov=cov, returns=returns, rm=rm, rf=rf, alpha=alpha)\n\n ax.bar(X, RC, alpha=0.7, color=color, edgecolor=\"black\")\n\n ax.set_xlim(-0.5, len(X) - 0.5)\n\n ax.set_yticks(ax.get_yticks())\n ax.set_yticklabels([\"{:3.5%}\".format(x) for x in ax.get_yticks()])\n ax.grid(linestyle=\":\")\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_hist(returns, w, alpha=0.05, bins=50, height=6, width=10, ax=None):\n r\"\"\"\n Create a histogram of portfolio returns with the risk measures.\n\n Parameters\n ----------\n returns : DataFrame\n Assets returns.\n w : DataFrame of shape (n_assets, 1)\n Portfolio weights.\n alpha : float, optional\n Significante level of VaR, CVaR and EVaR. The default is 0.05.\n bins : float, optional\n Number of bins of the histogram. The default is 50.\n height : float, optional\n Height of the image in inches. The default is 6.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis.\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_hist(returns=Y, w=w1, alpha=0.05, bins=50, height=6, width=10, ax=None)\n\n .. image:: images/Histogram.png\n\n \"\"\"\n\n if not isinstance(returns, pd.DataFrame):\n raise ValueError(\"returns must be a DataFrame\")\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if w.shape[1] > 1 and w.shape[0] == 0:\n w = w.T\n elif w.shape[1] > 1 and w.shape[0] > 0:\n raise ValueError(\"w must be a DataFrame\")\n\n if returns.shape[1] != w.shape[0]:\n a1 = str(returns.shape)\n a2 = str(w.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n a = np.array(returns, ndmin=2) @ np.array(w, ndmin=2)\n ax.set_title(\"Portfolio Returns Histogram\")\n n, bins1, patches = ax.hist(\n a, bins, density=1, edgecolor=\"skyblue\", color=\"skyblue\", alpha=0.5\n )\n mu = np.mean(a)\n sigma = np.std(a, axis=0, ddof=1).item()\n risk = [\n mu,\n mu - sigma,\n mu - rk.MAD(a),\n -rk.VaR_Hist(a, alpha),\n -rk.CVaR_Hist(a, alpha),\n -rk.EVaR_Hist(a, alpha)[0],\n -rk.WR(a),\n ]\n label = [\n \"Mean: \" + \"{0:.2%}\".format(risk[0]),\n \"Mean - Std. Dev.(\"\n + \"{0:.2%}\".format(-risk[1] + mu)\n + \"): \"\n + \"{0:.2%}\".format(risk[1]),\n \"Mean - MAD(\"\n + \"{0:.2%}\".format(-risk[2] + mu)\n + \"): \"\n + \"{0:.2%}\".format(risk[2]),\n \"{0:.2%}\".format((1 - alpha)) + \" Confidence VaR: \" + \"{0:.2%}\".format(risk[3]),\n \"{0:.2%}\".format((1 - alpha))\n + \" Confidence CVaR: \"\n + \"{0:.2%}\".format(risk[4]),\n \"{0:.2%}\".format((1 - alpha))\n + \" Confidence EVaR: \"\n + \"{0:.2%}\".format(risk[5]),\n \"Worst Realization: \" + \"{0:.2%}\".format(risk[6]),\n ]\n color = [\"b\", \"r\", \"fuchsia\", \"darkorange\", \"limegreen\", \"dodgerblue\", \"darkgrey\"]\n\n for i, j, k in zip(risk, label, color):\n ax.axvline(x=i, color=k, linestyle=\"-\", label=j)\n\n # add a 'best fit' line\n y = (1 / (np.sqrt(2 * np.pi) * sigma)) * np.exp(\n -0.5 * (1 / sigma * (bins1 - mu)) ** 2\n )\n ax.plot(\n bins1,\n y,\n \"--\",\n color=\"orange\",\n label=\"Normal: $\\mu=\"\n + \"{0:.2%}\".format(mu)\n + \"$%, $\\sigma=\"\n + \"{0:.2%}\".format(sigma)\n + \"$%\",\n )\n\n factor = (np.max(a) - np.min(a)) / bins\n\n ax.set_xticklabels([\"{:3.2%}\".format(x) for x in ax.get_xticks()])\n ax.set_yticklabels([\"{:3.2%}\".format(x * factor) for x in ax.get_yticks()])\n ax.legend(loc=\"upper right\") # , fontsize = 'x-small')\n ax.grid(linestyle=\":\")\n ax.set_ylabel(\"Probability Density\")\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_drawdown(nav, w, alpha=0.05, height=8, width=10, ax=None):\n r\"\"\"\n Create a chart with the evolution of portfolio prices and drawdown.\n\n Parameters\n ----------\n nav : DataFrame\n Cumulative assets returns.\n w : DataFrame, optional\n A portfolio specified by the user to compare with the efficient\n frontier. The default is None.\n alpha : float, optional\n Significante level of DaR and CDaR. The default is 0.05.\n height : float, optional\n Height of the image in inches. The default is 8.\n width : float, optional\n Width of the image in inches. The default is 10.\n ax : matplotlib axis of size (2,1), optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis.\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n nav=port.nav\n\n ax = plf.plot_drawdown(nav=nav, w=w1, alpha=0.05, height=8, width=10, ax=None)\n\n .. image:: images/Drawdown.png\n\n \"\"\"\n\n if not isinstance(nav, pd.DataFrame):\n raise ValueError(\"nav must be a DataFrame\")\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if w.shape[1] > 1 and w.shape[0] == 0:\n w = w.T\n elif w.shape[1] > 1 and w.shape[0] > 0:\n raise ValueError(\"w must be a DataFrame\")\n\n if nav.shape[1] != w.shape[0]:\n a1 = str(nav.shape)\n a2 = str(w.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if ax is None:\n fig = plt.gcf()\n ax = fig.subplots(nrows=2, ncols=1)\n ax = ax.flatten()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n index = nav.index.tolist()\n\n a = np.array(nav, ndmin=2)\n a = np.insert(a, 0, 0, axis=0)\n a = np.diff(a, axis=0)\n a = np.array(a, ndmin=2) @ np.array(w, ndmin=2)\n prices = 1 + np.insert(a, 0, 0, axis=0)\n prices = np.cumprod(prices, axis=0)\n prices = np.ravel(prices).tolist()\n prices2 = 1 + np.array(np.cumsum(a, axis=0))\n prices2 = np.ravel(prices2).tolist()\n del prices[0]\n\n DD = []\n peak = -99999\n for i in range(0, len(prices)):\n if prices2[i] > peak:\n peak = prices2[i]\n DD.append((peak - prices2[i]))\n DD = -np.array(DD)\n titles = [\n \"Historical Compounded Cumulative Returns\",\n \"Historical Uncompounded Drawdown\",\n ]\n data = [prices, DD]\n color1 = [\"b\", \"orange\"]\n risk = [\n -rk.MDD_Abs(a),\n -rk.ADD_Abs(a),\n -rk.DaR_Abs(a, alpha),\n -rk.CDaR_Abs(a, alpha),\n -rk.UCI_Abs(a),\n ]\n label = [\n \"Maximum Drawdown: \" + \"{0:.2%}\".format(risk[0]),\n \"Average Drawdown: \" + \"{0:.2%}\".format(risk[1]),\n \"{0:.2%}\".format((1 - alpha)) + \" Confidence DaR: \" + \"{0:.2%}\".format(risk[2]),\n \"{0:.2%}\".format((1 - alpha))\n + \" Confidence CDaR: \"\n + \"{0:.2%}\".format(risk[3]),\n \"Ulcer Index: \" + \"{0:.2%}\".format(risk[4]),\n ]\n color2 = [\"r\", \"b\", \"limegreen\", \"dodgerblue\", \"fuchsia\"]\n\n j = 0\n\n ymin = np.min(DD) * 1.5\n\n for i in ax:\n i.clear()\n i.plot_date(index, data[j], \"-\", color=color1[j])\n if j == 1:\n i.fill_between(index, 0, data[j], facecolor=color1[j], alpha=0.3)\n for k in range(0, len(risk)):\n i.axhline(y=risk[k], color=color2[k], linestyle=\"-\", label=label[k])\n i.set_ylim(ymin, 0)\n i.legend(loc=\"lower right\") # , fontsize = 'x-small')\n i.set_title(titles[j])\n i.set_yticklabels([\"{:3.2%}\".format(x) for x in i.get_yticks()])\n i.grid(linestyle=\":\")\n j = j + 1\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n\n\ndef plot_table(returns, w, MAR=0, alpha=0.05, height=9, width=12, ax=None):\n r\"\"\"\n Create a table with information about risk measures and risk adjusted\n return ratios.\n\n Parameters\n ----------\n returns : DataFrame\n Assets returns.\n w : DataFrame\n Portfolio weights.\n MAR: float, optional\n Minimum acceptable return.\n alpha: float, optional\n Significance level for VaR, CVaR, EVaR, DaR and CDaR.\n height : float, optional\n Height of the image in inches. The default is 9.\n width : float, optional\n Width of the image in inches. The default is 12.\n ax : matplotlib axis, optional\n If provided, plot on this axis. The default is None.\n\n Raises\n ------\n ValueError\n When the value cannot be calculated.\n\n Returns\n -------\n ax : matplotlib axis\n Returns the Axes object with the plot for further tweaking.\n\n Example\n -------\n ::\n\n ax = plf.plot_table(returns=Y, w=ws, MAR=0, alpha=0.05, ax=None)\n\n .. image:: images/Port_Table.png\n\n\n \"\"\"\n if not isinstance(returns, pd.DataFrame):\n raise ValueError(\"returns must be a DataFrame\")\n\n if not isinstance(w, pd.DataFrame):\n raise ValueError(\"w must be a DataFrame\")\n\n if returns.shape[1] != w.shape[0]:\n a1 = str(returns.shape)\n a2 = str(w.shape)\n raise ValueError(\"shapes \" + a1 + \" and \" + a2 + \" not aligned\")\n\n if ax is None:\n ax = plt.gca()\n fig = plt.gcf()\n fig.set_figwidth(width)\n fig.set_figheight(height)\n\n mu = returns.mean()\n cov = returns.cov()\n\n X = returns @ w\n X = X.to_numpy().ravel()\n\n rowLabels = [\n \"Profitability and Other Inputs\",\n \"Mean Return\",\n \"Compounded Cummulated Return\",\n \"Minimum Acceptable Return (MAR)\",\n \"Significance Level\",\n \"\",\n \"Risk Measures based on Returns\",\n \"Standard Deviation\",\n \"Mean Absolute Deviation (MAD)\",\n \"Semi Standard Deviation\",\n \"First Lower Partial Moment (FLPM)\",\n \"Second Lower Partial Moment (SLPM)\",\n \"Value at Risk (VaR)\",\n \"Conditional Value at Risk (CVaR)\",\n \"Entropic Value at Risk (EVaR)\",\n \"Worst Realization\",\n \"Skewness\",\n \"Kurtosis\",\n \"\",\n \"Risk Measures based on Drawdowns (*)\",\n \"Max Drawdown (MDD)\",\n \"Average Drawdown (ADD)\",\n \"Drawdown at Risk (DaR)\",\n \"Conditional Drawdown at Risk (CDaR)\",\n \"Ulcer Index\",\n \"(*) Using uncompounded cumulated returns\",\n ]\n\n indicators = [\n \"\",\n (mu @ w).to_numpy().item(),\n np.prod(1 + X) - 1,\n MAR,\n alpha,\n \"\",\n \"\",\n np.sqrt(w.T @ cov @ w).to_numpy().item(),\n rk.MAD(X),\n rk.SemiDeviation(X),\n rk.LPM(X, MAR=MAR, p=1),\n rk.LPM(X, MAR=MAR, p=2),\n rk.VaR_Hist(X, alpha=alpha),\n rk.CVaR_Hist(X, alpha=alpha),\n rk.EVaR_Hist(X, alpha=alpha)[0],\n rk.WR(X),\n st.skew(X, bias=False),\n st.kurtosis(X, bias=False),\n \"\",\n \"\",\n rk.MDD_Abs(X),\n rk.ADD_Abs(X),\n rk.DaR_Abs(X),\n rk.CDaR_Abs(X, alpha=alpha),\n rk.UCI_Abs(X),\n \"\",\n ]\n\n ratios = []\n for i in range(len(indicators)):\n if i < 6 or indicators[i] == \"\" or rowLabels[i] in [\"Skewness\", \"Kurtosis\"]:\n ratios.append(\"\")\n else:\n ratio = (indicators[1] - MAR) / indicators[i]\n ratios.append(ratio * 100)\n\n for i in range(len(indicators)):\n if indicators[i] != \"\":\n if rowLabels[i] in [\"Skewness\", \"Kurtosis\"]:\n indicators[i] = \"{:.5f}\".format(indicators[i])\n else:\n indicators[i] = \"{:.4%}\".format(indicators[i])\n if ratios[i] != \"\":\n ratios[i] = \"{:.6f}\".format(ratios[i])\n\n data = pd.DataFrame({\"A\": rowLabels, \"B\": indicators, \"C\": ratios}).to_numpy()\n\n ax.set_axis_off()\n ax.axis(\"tight\")\n ax.axis(\"off\")\n\n colLabels = [\"\", \"Values\", \"(Return - MAR)/Risk x 100\"]\n colWidths = [0.45, 0.275, 0.275]\n rowHeight = 0.07\n\n table = ax.table(\n cellText=data,\n colLabels=colLabels,\n colWidths=colWidths,\n cellLoc=\"center\",\n loc=\"upper left\",\n bbox=[-0.03, 0, 1, 1],\n )\n\n table.auto_set_font_size(False)\n\n cellDict = table.get_celld()\n k = 1\n\n rowHeight = 1 / len(rowLabels)\n\n for i in range(0, len(colLabels)):\n cellDict[(0, i)].set_text_props(weight=\"bold\", color=\"white\", size=\"x-large\")\n cellDict[(0, i)].set_facecolor(\"darkblue\")\n cellDict[(0, i)].set_edgecolor(\"white\")\n cellDict[(0, i)].set_height(rowHeight)\n for j in range(1, len(rowLabels) + 1):\n cellDict[(j, 0)].set_text_props(\n weight=\"bold\", color=\"black\", size=\"x-large\", ha=\"left\"\n )\n cellDict[(j, i)].set_text_props(color=\"black\", size=\"x-large\")\n cellDict[(j, 0)].set_edgecolor(\"white\")\n cellDict[(j, i)].set_edgecolor(\"white\")\n if k % 2 != 0:\n cellDict[(j, 0)].set_facecolor(\"whitesmoke\")\n cellDict[(j, i)].set_facecolor(\"whitesmoke\")\n if j in [6, 19]:\n cellDict[(j, 0)].set_facecolor(\"white\")\n cellDict[(j, i)].set_facecolor(\"white\")\n if j in [1, 7, 20]:\n cellDict[(j, 0)].set_text_props(color=\"white\")\n cellDict[(j, 0)].set_facecolor(\"orange\")\n cellDict[(j, i)].set_facecolor(\"orange\")\n k = 1\n k += 1\n\n cellDict[(j, i)].set_height(rowHeight)\n\n cellDict[(len(rowLabels), 0)].set_text_props(\n weight=\"normal\", color=\"black\", size=\"large\"\n )\n cellDict[(len(rowLabels), 0)].set_facecolor(\"white\")\n\n fig = plt.gcf()\n fig.tight_layout()\n\n return ax\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"numpy.cumsum",
"pandas.DataFrame",
"numpy.max",
"numpy.mean",
"numpy.exp",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"numpy.ceil",
"numpy.std",
"numpy.diff",
"numpy.insert",
"numpy.ravel",
"scipy.stats.skew",
"numpy.min",
"numpy.cumprod",
"numpy.deg2rad",
"scipy.stats.kurtosis",
"numpy.array",
"numpy.absolute",
"numpy.sign",
"numpy.prod",
"matplotlib.pyplot.cycler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
fossabot/asm2vec-pytorch | [
"3c4828d5ba80c55a36adb43e5016f4f1f3443041"
] | [
"scripts/compare.py"
] | [
"import torch\nimport torch.nn as nn\nimport click\nimport asm2vec\n\ndef cosine_similarity(v1, v2):\n return (v1 @ v2 / (v1.norm() * v2.norm())).item()\n\[email protected]()\[email protected]('-i1', '--input1', 'ipath1', help='target function 1', required=True)\[email protected]('-i2', '--input2', 'ipath2', help='target function 2', required=True)\[email protected]('-m', '--model', 'mpath', help='model path', required=True)\[email protected]('-e', '--epochs', default=10, help='training epochs', show_default=True)\[email protected]('-c', '--device', default='auto', help='hardware device to be used: cpu / cuda / auto', show_default=True)\[email protected]('-q', '--quiet', help='do not print training process', is_flag=True)\ndef cli(ipath1, ipath2, mpath, epochs, device, quiet):\n if device == 'auto':\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # load model, tokens\n model, tokens = asm2vec.utils.load_model(mpath, device=device)\n functions, tokens_new = asm2vec.utils.load_data([ipath1, ipath2])\n tokens.update(tokens_new)\n model.update(2, tokens.size())\n model = model.to(device)\n \n # train function embedding\n model = asm2vec.utils.train(functions, tokens, model=model, epochs=epochs, device=device, mode='test', quiet=quiet)\n\n # compare 2 function vectors\n v1, v2 = model.to('cpu').embeddings_f(torch.tensor([0, 1]))\n\n print(f'cosine similarity : {cosine_similarity(v1, v2):.6f}')\n\nif __name__ == '__main__':\n cli()\n"
] | [
[
"torch.cuda.is_available",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edornd/ProDA | [
"ffb092afbbde95e4ca29cb1ec199f9685f6601fb"
] | [
"LoveDA/uda/AdaptSegNet_train.py"
] | [
"import argparse\nimport torch\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport os.path as osp\n# from module.CLAN_G import Deeplabv2\nfrom module.Encoder import Deeplabv2\nfrom module.Discriminator import FCDiscriminator\nfrom data.loveda import LoveDALoader\nfrom ever.core.iterator import Iterator\nfrom utils.tools import *\nfrom tqdm import tqdm\nfrom eval import evaluate\nfrom torch.nn.utils import clip_grad\n\n\nparser = argparse.ArgumentParser(description='Run AdaptSeg methods.')\n\nparser.add_argument('--config_path', type=str,\n help='config path')\nargs = parser.parse_args()\ncfg = import_config(args.config_path)\n\n\ndef main():\n os.makedirs(cfg.SNAPSHOT_DIR, exist_ok=True)\n logger = get_console_file_logger(name='AdaptSegNet', logdir=cfg.SNAPSHOT_DIR)\n # Create network\n # model = Deeplabv2(dict(\n # backbone=dict(\n # resnet_type='resnet50',\n # output_stride=16,\n # pretrained=True,\n # multi_layer=True,\n # cascade=True)\n # ))\n model = Deeplabv2(dict(\n backbone=dict(\n resnet_type='resnet50',\n output_stride=16,\n pretrained=True,\n ),\n multi_layer=True,\n cascade=True,\n use_ppm=False,\n ppm=dict(\n num_classes=7,\n use_aux=False,\n ),\n inchannels=2048,\n num_classes=7\n ))\n model.train()\n model.cuda()\n logger.info('exp = %s' % cfg.SNAPSHOT_DIR)\n # init D\n model_D1 = FCDiscriminator(cfg.NUM_CLASSES)\n model_D2 = FCDiscriminator(cfg.NUM_CLASSES)\n\n model_D1.train()\n model_D1.cuda()\n\n model_D2.train()\n model_D2.cuda()\n count_model_parameters(model, logger)\n count_model_parameters(model_D1, logger)\n count_model_parameters(model_D2, logger)\n\n trainloader = LoveDALoader(cfg.SOURCE_DATA_CONFIG)\n trainloader_iter = Iterator(trainloader)\n targetloader = LoveDALoader(cfg.TARGET_DATA_CONFIG)\n targetloader_iter = Iterator(targetloader)\n\n epochs = cfg.NUM_STEPS_STOP / len(trainloader)\n logger.info('epochs ~= %.3f' % epochs)\n\n\n optimizer = optim.SGD(model.parameters(),\n lr=cfg.LEARNING_RATE, momentum=cfg.MOMENTUM, weight_decay=cfg.WEIGHT_DECAY)\n optimizer.zero_grad()\n\n optimizer_D1 = optim.Adam(model_D1.parameters(), lr=cfg.LEARNING_RATE_D, betas=(0.9, 0.99))\n optimizer_D1.zero_grad()\n\n optimizer_D2 = optim.Adam(model_D2.parameters(), lr=cfg.LEARNING_RATE_D, betas=(0.9, 0.99))\n optimizer_D2.zero_grad()\n\n source_label = 0\n target_label = 1\n\n for i_iter in tqdm(range(cfg.NUM_STEPS_STOP)):\n\n loss_seg_value1 = 0\n loss_adv_target_value1 = 0\n loss_D_value1 = 0\n\n loss_seg_value2 = 0\n loss_adv_target_value2 = 0\n loss_D_value2 = 0\n\n optimizer.zero_grad()\n G_lr = adjust_learning_rate(optimizer, i_iter, cfg)\n\n optimizer_D1.zero_grad()\n optimizer_D2.zero_grad()\n D_lr = adjust_learning_rate_D(optimizer_D1, i_iter, cfg)\n adjust_learning_rate_D(optimizer_D2, i_iter, cfg)\n\n for sub_i in range(cfg.ITER_SIZE):\n # train G\n # don't accumulate grads in D\n for param in model_D1.parameters():\n param.requires_grad = False\n\n for param in model_D2.parameters():\n param.requires_grad = False\n # train with source\n\n batch = trainloader_iter.next()\n images, labels = batch[0]\n images = Variable(images).cuda()\n\n pred1, pred2 = model(images)\n\n\n loss_seg1 = loss_calc(pred1, labels['cls'].cuda())\n loss_seg2 = loss_calc(pred2, labels['cls'].cuda())\n loss = loss_seg2 + cfg.LAMBDA_SEG * loss_seg1\n\n # proper normalization\n loss = loss / cfg.ITER_SIZE\n loss.backward()\n loss_seg_value1 += loss_seg1.data.cpu().numpy() / cfg.ITER_SIZE\n loss_seg_value2 += loss_seg2.data.cpu().numpy() / cfg.ITER_SIZE\n\n # train with target\n batch = targetloader_iter.next()\n images, labels = batch[0]\n images = Variable(images).cuda()\n\n pred_target1, pred_target2 = model(images)\n\n D_out1 = model_D1(F.softmax(pred_target1))\n D_out2 = model_D2(F.softmax(pred_target2))\n\n loss_adv_target1 = bce_loss(D_out1,\n Variable(torch.FloatTensor(D_out1.data.size()).fill_(source_label)).cuda())\n\n loss_adv_target2 = bce_loss(D_out2,\n Variable(torch.FloatTensor(D_out2.data.size()).fill_(source_label)).cuda())\n\n loss = cfg.LAMBDA_ADV_TARGET1 * loss_adv_target1 + cfg.LAMBDA_ADV_TARGET2 * loss_adv_target2\n loss = loss / cfg.ITER_SIZE\n loss.backward()\n loss_adv_target_value1 += loss_adv_target1.data.cpu().numpy() / cfg.ITER_SIZE\n loss_adv_target_value2 += loss_adv_target2.data.cpu().numpy() / cfg.ITER_SIZE\n\n # train D\n\n # bring back requires_grad\n for param in model_D1.parameters():\n param.requires_grad = True\n\n for param in model_D2.parameters():\n param.requires_grad = True\n\n # train with source\n pred1 = pred1.detach()\n pred2 = pred2.detach()\n\n D_out1 = model_D1(F.softmax(pred1))\n D_out2 = model_D2(F.softmax(pred2))\n\n loss_D1 = bce_loss(D_out1,\n Variable(torch.FloatTensor(D_out1.data.size()).fill_(source_label)).cuda())\n\n loss_D2 = bce_loss(D_out2,\n Variable(torch.FloatTensor(D_out2.data.size()).fill_(source_label)).cuda())\n\n loss_D1 = loss_D1 / cfg.ITER_SIZE / 2\n loss_D2 = loss_D2 / cfg.ITER_SIZE / 2\n\n loss_D1.backward()\n loss_D2.backward()\n\n loss_D_value1 += loss_D1.data.cpu().numpy()\n loss_D_value2 += loss_D2.data.cpu().numpy()\n\n # train with target\n pred_target1 = pred_target1.detach()\n pred_target2 = pred_target2.detach()\n\n D_out1 = model_D1(F.softmax(pred_target1))\n D_out2 = model_D2(F.softmax(pred_target2))\n\n loss_D1 = bce_loss(D_out1,\n Variable(torch.FloatTensor(D_out1.data.size()).fill_(target_label)).cuda())\n\n loss_D2 = bce_loss(D_out2,\n Variable(torch.FloatTensor(D_out2.data.size()).fill_(target_label)).cuda())\n\n loss_D1 = loss_D1 / cfg.ITER_SIZE / 2\n loss_D2 = loss_D2 / cfg.ITER_SIZE / 2\n\n loss_D1.backward()\n loss_D2.backward()\n\n loss_D_value1 += loss_D1.data.cpu().numpy()\n loss_D_value2 += loss_D2.data.cpu().numpy()\n\n clip_grad.clip_grad_norm_(filter(lambda p: p.requires_grad, model.parameters()), max_norm=35, norm_type=2)\n clip_grad.clip_grad_norm_(filter(lambda p: p.requires_grad, model_D1.parameters()), max_norm=35, norm_type=2)\n clip_grad.clip_grad_norm_(filter(lambda p: p.requires_grad, model_D2.parameters()), max_norm=35, norm_type=2)\n optimizer.step()\n optimizer_D1.step()\n optimizer_D2.step()\n\n if i_iter % 50 == 0:\n logger.info('exp = {}'.format(cfg.SNAPSHOT_DIR))\n logger.info(\n 'iter = %d loss_seg1 = %.3f loss_seg2 = %.3f loss_adv1 = %.3f, loss_adv2 = %.3f loss_D1 = %.3f loss_D2 = %.3f G_lr = %.5f D_lr = %.5f' % (\n i_iter, loss_seg_value1, loss_seg_value2, loss_adv_target_value1, loss_adv_target_value2, loss_D_value1, loss_D_value2, G_lr, D_lr)\n )\n\n if i_iter >= cfg.NUM_STEPS_STOP - 1:\n print('save model ...')\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n torch.save(model_D1.state_dict(), osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '_D1.pth'))\n torch.save(model_D2.state_dict(), osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(cfg.NUM_STEPS_STOP) + '_D2.pth'))\n evaluate(model, cfg, True, ckpt_path, logger)\n break\n\n if i_iter % cfg.EVAL_EVERY == 0 and i_iter != 0:\n print('taking snapshot ...')\n ckpt_path = osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '.pth')\n torch.save(model.state_dict(), ckpt_path)\n torch.save(model_D1.state_dict(), osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '_D1.pth'))\n torch.save(model_D2.state_dict(), osp.join(cfg.SNAPSHOT_DIR, cfg.TARGET_SET + str(i_iter) + '_D2.pth'))\n evaluate(model, cfg, True, ckpt_path, logger)\n model.train()\n\n\nif __name__ == '__main__':\n seed_torch(2333)\n main()\n"
] | [
[
"torch.autograd.Variable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MisterJBro/SimuRLacra | [
"525a99893aca553d3dd352b9882a294d684a6935"
] | [
"Pyrado/pyrado/algorithms/step_based/dql.py"
] | [
"# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and\n# Technical University of Darmstadt.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,\n# or Technical University of Darmstadt, nor the names of its contributors may\n# be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,\n# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;\n# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER\n# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nfrom copy import deepcopy\nfrom typing import Optional, Tuple\n\nimport numpy as np\nimport torch as to\nimport torch.nn as nn\nfrom tqdm import tqdm\n\nimport pyrado\nfrom pyrado.algorithms.base import Algorithm\nfrom pyrado.algorithms.step_based.value_based import ValueBased\nfrom pyrado.environments.base import Env\nfrom pyrado.exploration.stochastic_action import EpsGreedyExplStrat\nfrom pyrado.logger.step import StepLogger\nfrom pyrado.policies.base import Policy\nfrom pyrado.policies.feed_back.fnn import DiscreteActQValPolicy\nfrom pyrado.sampling.cvar_sampler import CVaRSampler\nfrom pyrado.sampling.parallel_rollout_sampler import ParallelRolloutSampler\n\n\nclass DQL(ValueBased):\n \"\"\"\n Deep Q-Learning (without bells and whistles)\n\n .. seealso::\n [1] V. Mnih et.al., \"Human-level control through deep reinforcement learning\", Nature, 2015\n \"\"\"\n\n name: str = \"dql\"\n\n def __init__(\n self,\n save_dir: pyrado.PathLike,\n env: Env,\n policy: DiscreteActQValPolicy,\n memory_size: int,\n eps_init: float,\n eps_schedule_gamma: float,\n gamma: float,\n max_iter: int,\n num_updates_per_step: int,\n target_update_intvl: Optional[int] = 5,\n num_init_memory_steps: Optional[int] = None,\n min_rollouts: Optional[int] = None,\n min_steps: Optional[int] = None,\n batch_size: int = 256,\n eval_intvl: int = 100,\n max_grad_norm: float = 0.5,\n lr: float = 5e-4,\n lr_scheduler=None,\n lr_scheduler_hparam: Optional[dict] = None,\n num_workers: int = 4,\n logger: Optional[StepLogger] = None,\n ):\n r\"\"\"\n Constructor\n\n :param save_dir: directory to save the snapshots i.e. the results in\n :param env: the environment which the policy operates\n :param policy: (current) Q-network updated by this algorithm\n :param memory_size: number of transitions in the replay memory buffer\n :param eps_init: initial value for the probability of taking a random action, constant if `eps_schedule_gamma=1`\n :param eps_schedule_gamma: temporal discount factor for the exponential decay of epsilon\n :param gamma: temporal discount factor for the state values\n :param max_iter: maximum number of iterations (i.e. policy updates) that this algorithm runs\n :param num_updates_per_step: number of (batched) updates per algorithm steps\n :param target_update_intvl: number of iterations that pass before updating the `qfcn_targ` network\n :param num_init_memory_steps: number of samples used to initially fill the replay buffer with, pass `None` to\n fill the buffer completely\n :param min_rollouts: minimum number of rollouts sampled per policy update batch\n :param min_steps: minimum number of state transitions sampled per policy update batch\n :param batch_size: number of samples per policy update batch\n :param eval_intvl: interval in which the evaluation rollouts are collected, also the interval in which the\n logger prints the summary statistics\n :param max_grad_norm: maximum L2 norm of the gradients for clipping, set to `None` to disable gradient clipping\n :param lr: (initial) learning rate for the optimizer which can be by modified by the scheduler.\n By default, the learning rate is constant.\n :param lr_scheduler: learning rate scheduler that does one step per epoch (pass through the whole data set)\n :param lr_scheduler_hparam: hyper-parameters for the learning rate scheduler\n :param num_workers: number of environments for parallel sampling\n :param logger: logger for every step of the algorithm, if `None` the default logger will be created\n \"\"\"\n if not isinstance(policy, DiscreteActQValPolicy):\n raise pyrado.TypeErr(given=policy, expected_type=DiscreteActQValPolicy)\n\n # Call ValueBased's constructor\n super().__init__(\n save_dir=save_dir,\n env=env,\n policy=policy,\n memory_size=memory_size,\n gamma=gamma,\n max_iter=max_iter,\n num_updates_per_step=num_updates_per_step,\n target_update_intvl=target_update_intvl,\n num_init_memory_steps=num_init_memory_steps,\n min_rollouts=min_rollouts,\n min_steps=min_steps,\n batch_size=batch_size,\n eval_intvl=eval_intvl,\n max_grad_norm=max_grad_norm,\n num_workers=num_workers,\n logger=logger,\n )\n\n self.qfcn_targ = deepcopy(self._policy).eval() # will not be trained using the optimizer\n self.eps = eps_init\n\n # Create sampler for exploration during training\n self._expl_strat = EpsGreedyExplStrat(self._policy, eps_init, eps_schedule_gamma)\n self._sampler = ParallelRolloutSampler(\n self._env,\n self._expl_strat,\n num_workers=num_workers if min_steps != 1 else 1,\n min_steps=min_steps,\n min_rollouts=min_rollouts,\n )\n\n # Q-function optimizer\n self.optim = to.optim.RMSprop([{\"params\": self._policy.parameters()}], lr=lr)\n\n # Learning rate scheduler\n self._lr_scheduler = lr_scheduler\n self._lr_scheduler_hparam = lr_scheduler_hparam\n if lr_scheduler is not None:\n self._lr_scheduler = lr_scheduler(self.optim, **lr_scheduler_hparam)\n\n @property\n def sampler(self) -> ParallelRolloutSampler:\n return self._sampler\n\n @sampler.setter\n def sampler(self, sampler: ParallelRolloutSampler):\n if not isinstance(sampler, (ParallelRolloutSampler, CVaRSampler)):\n raise pyrado.TypeErr(given=sampler, expected_type=(ParallelRolloutSampler, CVaRSampler))\n self._sampler = sampler\n\n @staticmethod\n def loss_fcn(q_vals: to.Tensor, expected_q_vals: to.Tensor) -> to.Tensor:\n r\"\"\"\n The Huber loss function on the one-step TD error $\\delta = Q(s,a) - (r + \\gamma \\max_a Q(s^\\prime, a))$.\n\n :param q_vals: state-action values $Q(s,a)$, from policy network\n :param expected_q_vals: expected state-action values $r + \\gamma \\max_a Q(s^\\prime, a)$, from target network\n :return: loss value\n \"\"\"\n return nn.functional.smooth_l1_loss(q_vals, expected_q_vals)\n\n def update(self):\n \"\"\"Update the policy's and qfcn_targ Q-function's parameters on transitions sampled from the replay memory.\"\"\"\n losses = to.zeros(self.num_batch_updates)\n policy_grad_norm = to.zeros(self.num_batch_updates)\n\n for b in tqdm(\n range(self.num_batch_updates),\n total=self.num_batch_updates,\n desc=f\"Updating\",\n unit=\"batches\",\n file=sys.stdout,\n leave=False,\n ):\n\n # Sample steps and the associated next step from the replay memory\n steps, next_steps = self._memory.sample(self.batch_size)\n steps.torch(data_type=to.get_default_dtype())\n next_steps.torch(data_type=to.get_default_dtype())\n\n # Create masks for the non-final observations\n not_done = to.from_numpy(1.0 - steps.done).to(device=self.policy.device, dtype=to.get_default_dtype())\n\n # Compute the state-action values Q(s,a) using the current DQN policy\n q_vals = self.expl_strat.policy.q_values_argmax(steps.observations)\n\n # Compute the second term of TD-error\n with to.no_grad():\n next_v_vals = self.qfcn_targ.q_values_argmax(next_steps.observations)\n expected_q_val = steps.rewards.to(self.policy.device) + not_done * self.gamma * next_v_vals\n\n # Compute the loss, clip the gradients if desired, and do one optimization step\n loss = DQL.loss_fcn(q_vals, expected_q_val)\n losses[b] = loss.data\n self.optim.zero_grad()\n loss.backward()\n policy_grad_norm[b] = Algorithm.clip_grad(self.expl_strat.policy, self.max_grad_norm)\n self.optim.step()\n\n # Update the qfcn_targ network by copying all weights and biases from the DQN policy\n if (self._curr_iter * self.num_batch_updates + b) % self.target_update_intvl == 0:\n self.qfcn_targ.load_state_dict(self.expl_strat.policy.state_dict())\n\n # Schedule the exploration parameter epsilon\n self.expl_strat.schedule_eps(self._curr_iter)\n\n # Update the learning rate if a scheduler has been specified\n if self._lr_scheduler is not None:\n self._lr_scheduler.step()\n\n # Logging\n with to.no_grad():\n self.logger.add_value(\"loss after\", to.mean(losses), 4)\n self.logger.add_value(\"expl strat eps\", self.expl_strat.eps, 4)\n self.logger.add_value(\"avg grad norm policy\", to.mean(policy_grad_norm), 4)\n if self._lr_scheduler is not None:\n self.logger.add_value(\"avg lr\", np.mean(self._lr_scheduler.get_last_lr()), 6)\n\n def reset(self, seed: Optional[int] = None):\n # Reset samplers, replay memory, exploration strategy, internal variables and the random seeds\n super().reset(seed)\n\n # Reset the learning rate scheduler\n if self._lr_scheduler is not None:\n self._lr_scheduler.last_epoch = -1\n\n def init_modules(self, warmstart: bool, suffix: str = \"\", prefix: str = \"\", **kwargs):\n # Initialize the policy\n super().init_modules(warmstart, suffix, prefix, **kwargs)\n\n if prefix == \"\":\n prefix = f\"iter_{self._curr_iter - 1}\"\n\n tpi = kwargs.get(\"target_param_init\", None)\n\n if warmstart and tpi is not None:\n self.qfcn_targ.init_param(tpi)\n elif warmstart and tpi is None and self._curr_iter > 0:\n self.qfcn_targ = pyrado.load(\n \"qfcn_target.pt\", self.save_dir, prefix=prefix, suffix=suffix, obj=self.qfcn_targ\n )\n else:\n # Reset the target Q-function\n self.qfcn_targ.init_param()\n\n def save_snapshot(self, meta_info: dict = None):\n super().save_snapshot(meta_info)\n\n if meta_info is None:\n # This algorithm instance is not a subroutine of another algorithm\n pyrado.save(self.qfcn_targ, \"qfcn_target.pt\", self.save_dir, use_state_dict=True)\n\n else:\n # This algorithm instance is a subroutine of another algorithm\n pyrado.save(\n self.qfcn_targ,\n \"qfcn_target.pt\",\n self.save_dir,\n prefix=meta_info.get(\"prefix\", \"\"),\n suffix=meta_info.get(\"suffix\", \"\"),\n use_state_dict=True,\n )\n\n def load_snapshot(self, parsed_args) -> Tuple[Env, Policy, dict]:\n env, policy, extra = super().load_snapshot(parsed_args)\n\n # Algorithm specific\n ex_dir = self._save_dir or getattr(parsed_args, \"dir\", None)\n if self.name == \"dql\":\n extra[\"qfcn_target\"] = pyrado.load(\"qfcn_target.pt\", ex_dir, obj=self.qfcn_targ, verbose=True)\n\n return env, policy, extra\n"
] | [
[
"torch.mean",
"torch.zeros",
"torch.from_numpy",
"torch.no_grad",
"torch.nn.functional.smooth_l1_loss",
"torch.get_default_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c2huc2hu/active-semi-supervised-clustering | [
"000a5b5d55ae186e547fc00cbdb440dfce91120a"
] | [
"active_semi_clustering/active/pairwise_constraints/random.py"
] | [
"import numpy as np\nfrom collections import namedtuple\n\nConstraints = namedtuple('Constraints', ['ml', 'cl'])\n\nclass Random:\n def __init__(self, n_clusters=3, **kwargs):\n self.n_clusters = n_clusters\n\n # 2-tuple containing ml and cl, which are each points (2-tuples)\n self.pairwise_constraints_ = Constraints([], [])\n\n def fit(self, X, oracle=None):\n n_elems = X.shape[0]\n\n # Quickly sample n_elems pairs of examples\n from_ = np.random.randint(n_elems, size=(oracle.max_queries_cnt))\n dist = np.random.randint(n_elems - 1, size=(oracle.max_queries_cnt)) # - 1 to avoid pairing an item with itself\n to = (from_ + dist) % n_elems\n constraints = np.vstack((from_, to)).T\n\n ml, cl = [], []\n\n for i, j in constraints:\n must_linked = oracle.query(i, j)\n if must_linked:\n ml.append((i, j))\n else:\n cl.append((i, j))\n\n self.pairwise_constraints_ = Constraints(ml, cl)\n\n return self\n"
] | [
[
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JM-IP/DGRL | [
"e8acd43b9b68ee0be5f764774e1969fe530183d0"
] | [
"main_sub_layer_not_used.py"
] | [
"import torch\nimport sys\nimport os\nimport argparse\nimport util\nfrom data import get_dataset\nfrom preprocess import get_transform\nimport torch.nn as nn\nimport torch.optim as optim\nsys.path.insert(0, './models')\nimport nin, xnor_resnet, alexnet\nfrom torch.autograd import Variable\n\ndef save_state(model, best_acc, arch):\n print('==> Saving model ...')\n state = {\n 'best_acc': best_acc,\n 'state_dict': model.state_dict(),\n }\n # for key in state['state_dict'].keys():\n # if 'module' in key:\n # state['state_dict'][key.replace('module.', '')] = \\\n # state['state_dict'].pop(key)\n torch.save(state, 'models/' + arch + 'sublayer.pth.tar')\n\ndef train(epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(trainloader):\n # process the weights including binarization\n bin_op.binarization()\n \n # forwarding\n data, target = Variable(data.cuda()), Variable(target.cuda())\n optimizer.zero_grad()\n output = model(data)\n \n # backwarding\n loss = criterion(output, target)\n loss.backward()\n \n # restore weights\n bin_op.restore()\n bin_op.updateBinaryGradWeight()\n \n optimizer.step()\n if batch_idx % 100 == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tLR: {}'.format(\n epoch, batch_idx * len(data), len(trainloader.dataset),\n 100. * batch_idx / len(trainloader), #loss.data.item(),\n loss.data[0],\n optimizer.param_groups[0]['lr']))\n return\n\n\ndef test(arch):\n global best_acc\n model.eval()\n test_loss = 0\n correct = 0\n bin_op.binarization()\n for data, target in testloader:\n data, target = Variable(data.cuda()), Variable(target.cuda())\n output = model(data)\n test_loss += criterion(output, target).data[0]\n # criterion(output, target).data.item()\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n bin_op.restore()\n acc = 100. * float(correct) / len(testloader.dataset)\n\n if acc > best_acc:\n best_acc = acc\n save_state(model, best_acc, arch)\n \n test_loss /= len(testloader.dataset)\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n test_loss * 128., correct, len(testloader.dataset),\n 100. * float(correct) / len(testloader.dataset)))\n print('Best Accuracy: {:.2f}%\\n'.format(best_acc))\n return\n\n\ndef adjust_learning_rate(optimizer, epoch):\n update_list = [80, 200, 240, 280]\n if epoch in update_list:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.3\n return\n\n\nif __name__=='__main__':\n # prepare the options\n parser = argparse.ArgumentParser()\n parser.add_argument('--cpu', action='store_true',\n help='set if only CPU is available')\n parser.add_argument('--data', action='store', default='./data/',\n help='dataset path')\n parser.add_argument('--dataset', action='store', default='cifar10',\n help='dataset path')\n parser.add_argument('--arch', action='store', default='resnet',\n help='the architecture for the network: nin')\n parser.add_argument('--gpus', default='2',\n help='gpus used for training - e.g 0,1,3')\n parser.add_argument('--lr', action='store', default='0.01',\n help='the intial learning rate')\n parser.add_argument('--pretrained', action='store', default=None,\n help='the path to the pretrained model')\n parser.add_argument('--batch_size', action='store', default='32', type=int,\n help='batch_size')\n parser.add_argument('--workers', action='store', default='8', type=int,\n help='workers')\n\n parser.add_argument('--evaluate', action='store_true',\n help='evaluate the model')\n args = parser.parse_args()\n args.gpus = [int(i) for i in args.gpus.split(',')]\n torch.cuda.set_device(args.gpus[0])\n print(\"using gpu \", torch.cuda.current_device())\n print('==> Options:', args)\n\n # set the seed\n torch.manual_seed(1)\n torch.cuda.manual_seed(1)\n\n # prepare the data\n if not os.path.isfile(args.data+'/train_data'):\n # check the data path\n raise Exception\\\n ('Please assign the correct data path with --data <DATA_PATH>')\n\n # trainset = data.dataset(root=args.data, train=True)\n #\n # trainloader = torch.utils.data.DataLoader(trainset, batch_size=128,\n # shuffle=True, num_workers=8)\n #\n # testset = data.dataset(root=args.data, train=False)\n # testloader = torch.utils.data.DataLoader(testset, batch_size=100,\n # shuffle=False, num_workers=8)\n\n # define classes\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n # define the model\n print('==> building model', args.arch, '...')\n if args.arch == 'nin':\n model = nin.Net()\n elif args.arch == 'resnet':\n model = xnor_resnet.resnet(**{'dataset': 'cifar10', 'num_classes': 10, 'depth': 18})\n elif args.arch == 'alexnet':\n model = alexnet.alexnet_sublayer()\n default_transform = {\n 'train': get_transform('cifar10',\n input_size=32, augment=True),\n 'eval': get_transform('cifar10',\n input_size=32, augment=False)\n }\n transform = getattr(model, 'input_transform', default_transform)\n regime = getattr(model, 'regime', {0: {'optimizer': 'SGD',\n 'lr': 0.01,\n 'momentum': 0.9,\n 'weight_decay': 0}})\n # define loss function (criterion) and optimizer\n criterion = getattr(model, 'criterion', nn.CrossEntropyLoss)()\n\n else:\n raise Exception(args.arch+' is currently not supported')\n\n val_data = get_dataset(args.dataset, 'val', transform['eval'])\n testloader = torch.utils.data.DataLoader(\n val_data,\n batch_size=args.batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n train_data = get_dataset(args.dataset, 'train', transform['train'])\n trainloader = torch.utils.data.DataLoader(\n train_data,\n batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n # initialize the model\n if not args.pretrained:\n print('==> Initializing model parameters ...')\n best_acc = 0\n for m in model.modules():\n if isinstance(m, nn.Conv2d):\n m.weight.data.normal_(0, 0.05)\n if m.bias is not None:\n m.bias.data.zero_()\n pretrained = './models/alexnet.pth.tar'\n pretrained_model = torch.load(pretrained)\n model_old = alexnet.alexnet()\n model_old.cuda()\n model_old = torch.nn.DataParallel(model_old, device_ids=args.gpus)\n model_old.load_state_dict(pretrained_model['state_dict'])\n best_acc_old = pretrained_model['best_acc']\n print(best_acc_old)\n count = 0\n\n new_model_state = model.state_dict()\n new_model_list = list(new_model_state.items())\n for name, params in list(model_old.state_dict().items()):\n # print(name)\n # if isinstance(params, nn.Parameter):\n # params = params.data\n\n new_model_state[new_model_list[count][0]] = model_old.state_dict().pop(name)\n # print(new_model_list[count][0])\n # print(new_model_state[new_model_list[count][0]])\n count = count + 1\n model.load_state_dict(new_model_state)\n else:\n print('==> Load pretrained model form', args.pretrained, '...')\n pretrained_model = torch.load(args.pretrained)\n best_acc = pretrained_model['best_acc']\n model.load_state_dict(pretrained_model['state_dict'])\n\n if not args.cpu:\n model.cuda()\n model = torch.nn.DataParallel(model, device_ids=args.gpus)\n\n print(model)\n\n # define solver and criterion\n base_lr = float(args.lr)\n param_dict = dict(model.named_parameters())\n params = []\n\n for key, value in param_dict.items():\n params += [{'params':[value], 'lr': base_lr,\n 'weight_decay':0.00001}]\n\n # optimizer = optim.Adam(params, lr=0.10,weight_decay=0.00001)\n optimizer = optim.SGD(params, lr=base_lr, momentum=0.9, weight_decay=0)\n criterion = nn.CrossEntropyLoss()\n\n # define the binarization operator\n bin_op = util.BinOp(model)\n\n # do the evaluation if specified\n if args.evaluate:\n test()\n exit(0)\n\n # start training\n for epoch in range(1, 250):\n adjust_learning_rate(optimizer, epoch)\n train(epoch)\n test(args.arch)\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.manual_seed",
"torch.cuda.set_device",
"torch.cuda.current_device",
"torch.manual_seed",
"torch.load",
"torch.utils.data.DataLoader",
"torch.optim.SGD",
"torch.nn.DataParallel",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
btrzcinski/AdventOfCode | [
"46012e81ba8a56cde811ad481ab14b43ce73f09f"
] | [
"AdventOfCode/Day15.py"
] | [
"from numpy import matrix\nfrom operator import itemgetter\n\ndef moves_from_x(x):\n for i in range(len(x)):\n for j in range(len(x)):\n if i < j:\n yield x[:i] + [x[i] + 1] + x[i+1:j] + [x[j] - 1] + x[j+1:]\n elif i > j:\n yield x[:j] + [x[j] - 1] + x[j+1:i] + [x[i] + 1] + x[i+1:]\n\ndef solve_for_x(fun, starting_x):\n x = starting_x.copy()\n solved = False\n while not solved:\n best_move = max([(move, fun(move)) for move in moves_from_x(x)], key=itemgetter(1))\n if best_move[1] <= fun(x):\n solved = True\n else:\n x = best_move[0]\n return x\n\n# props = [[cap, dur, flav, text], [cap, dur, flav, text], ...]\ndef generate_score_objective_function_from_properties(props):\n return lambda x: (matrix(x) * matrix(props)).clip(min=0).prod()\n\ndef generate_calorie_objective_function_from_properties(props, cals, cal_ceiling):\n return lambda x: 0.000001*generate_score_objective_function_from_properties(props)(x) - abs(calories_for_x(x, cals) - cal_ceiling)\n\ndef evaluate_x_with_props(x, props):\n return generate_score_objective_function_from_properties(props)(x)\n\ndef solve_for_x_from_props(props):\n fun = generate_score_objective_function_from_properties(props)\n starting_x = [100 // len(props)] * len(props)\n starting_x[-1] += 100 % len(props)\n return solve_for_x(fun, starting_x)\n\ndef calories_for_x(x, cals):\n return (matrix(x)*matrix(cals).T)[0,0]\n\ndef solve_for_x_with_cal_penalty(props, cals, cal_ceiling):\n starting_x = solve_for_x_from_props(props)\n fun = generate_calorie_objective_function_from_properties(props, cals, cal_ceiling)\n return solve_for_x(fun, starting_x)\n\ndef main():\n ingredients = []\n cals = []\n with open(\"Day15.txt\") as f:\n for line in f:\n # Sprinkles: capacity 2, durability 0, flavor -2, texture 0, calories 3\n _, _, cap, _, dur, _, flav, _, text, _, cal = [x.strip(\",\") for x in line.strip().split()]\n props = [int(x) for x in (cap, dur, flav, text)]\n ingredients.append(props)\n cals.append(int(cal))\n print(\"ingredients =\", ingredients)\n print(\"cals =\", cals)\n solution = solve_for_x_from_props(ingredients)\n score = evaluate_x_with_props(solution, ingredients)\n print(\"Solution:\", solution)\n print(\"Score:\", score)\n cal_solution = solve_for_x_with_cal_penalty(ingredients, cals, 500)\n cal_score = evaluate_x_with_props(cal_solution, ingredients)\n cal_solution_cals = calories_for_x(cal_solution, cals)\n print(\"Cals solution:\", cal_solution)\n print(\"Cals score:\", cal_score)\n print(\"Cals cals:\", cal_solution_cals)\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.matrix"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ess-dmsc/nexus-constructor | [
"ae0026c48f8f2d4d88d3ff00e45cb6591983853b"
] | [
"nexus_constructor/pixel_options.py"
] | [
"from typing import List, Optional, Tuple\n\nimport numpy as np\nfrom PySide2.QtCore import QObject, Signal\nfrom PySide2.QtWidgets import QDoubleSpinBox, QListWidgetItem, QSpinBox\n\nfrom nexus_constructor.geometry.geometry_loader import load_geometry\nfrom nexus_constructor.geometry.pixel_data import (\n Corner,\n CountDirection,\n PixelData,\n PixelGrid,\n PixelMapping,\n)\nfrom nexus_constructor.model.component import Component\nfrom nexus_constructor.model.geometry import (\n DETECTOR_NUMBER,\n X_PIXEL_OFFSET,\n Y_PIXEL_OFFSET,\n OFFGeometryNexus,\n)\nfrom nexus_constructor.pixel_mapping_widget import PixelMappingWidget\nfrom nexus_constructor.validators import PixelValidator\nfrom ui.pixel_options import Ui_PixelOptionsWidget\n\nBOTTOM_LEFT_TEXT = \"Bottom Left\"\nBOTTOM_RIGHT_TEXT = \"Bottom Right\"\nTOP_LEFT_TEXT = \"Top Left\"\nTOP_RIGHT_TEXT = \"Top Right\"\n\nPIXEL_GRID_STACK_INDEX = 0\nPIXEL_MAPPING_STACK_INDEX = 1\n\nINITIAL_COUNT_CORNER = {\n BOTTOM_LEFT_TEXT: Corner.BOTTOM_LEFT,\n BOTTOM_RIGHT_TEXT: Corner.BOTTOM_RIGHT,\n TOP_LEFT_TEXT: Corner.TOP_LEFT,\n TOP_RIGHT_TEXT: Corner.TOP_RIGHT,\n}\n\nROWS_TEXT = \"Rows\"\nCOLUMNS_TEXT = \"Columns\"\n\nCOUNT_DIRECTION = {ROWS_TEXT: CountDirection.ROW, COLUMNS_TEXT: CountDirection.COLUMN}\n\n\ndef data_is_an_array_with_more_than_one_element(data) -> bool:\n \"\"\"\n At the moment it appears as though a scalar can still be returned as an array when using `get_field_value` (though\n it could just be me doing the wrong thing). This function checks if an array contains more than one value so that\n Pixel Data can be edited in the case of a single Shape.\n :param data: The data value from the NeXus file.\n :return: True if the data is a scalar or an array containing a single value, False otherwise.\n \"\"\"\n if np.isscalar(data):\n return False\n\n return data.size > 1\n\n\nclass PixelOptions(Ui_PixelOptionsWidget, QObject):\n def __init__(self):\n\n QObject.__init__(self)\n\n self.pixel_mapping_widgets = []\n\n self._pixel_validator = None\n self.current_mapping_filename = None\n\n def setupUi(self, parent_widget):\n\n super().setupUi(parent_widget)\n\n self._pixel_validator = PixelValidator(\n parent_widget,\n self.single_pixel_radio_button,\n self.entire_shape_radio_button,\n )\n\n # Have the radio buttons change the visibility of the pixel options\n self.setup_visibility_signals()\n\n # Have the pixel mapping button populate the list widget if necessary\n self.entire_shape_radio_button.clicked.connect(\n self.generate_pixel_mapping_if_required\n )\n\n # Setup the pixel grid behaviour\n self.setup_pixel_grid_options()\n\n # Cause the overall Pixel Options validity to change when a different type of Pixel Layout has been selected\n self.single_pixel_radio_button.clicked.connect(self.update_pixel_input_validity)\n self.entire_shape_radio_button.clicked.connect(self.update_pixel_input_validity)\n self.no_pixels_button.clicked.connect(self.update_pixel_input_validity)\n\n # Update the validity\n self.update_pixel_input_validity()\n\n def fill_existing_entries(self, component_to_edit: Component):\n \"\"\"\n Populate the pixel fields based on what is already stored in the NeXus file.\n \"\"\"\n self.reset_pixel_mapping_list()\n\n try:\n component_to_edit.get_field_value(X_PIXEL_OFFSET)\n self.single_pixel_radio_button.setChecked(True)\n self.update_pixel_layout_visibility(True, False)\n self._fill_single_pixel_fields(component_to_edit)\n return\n except AttributeError:\n pass\n\n try:\n component_to_edit.get_field_value(DETECTOR_NUMBER)\n self.entire_shape_radio_button.setChecked(True)\n self.update_pixel_layout_visibility(False, True)\n self._fill_entire_shape_fields(component_to_edit)\n return\n except AttributeError:\n pass\n\n self.no_pixels_button.setChecked(True)\n self.pixel_options_stack.setVisible(False)\n\n def _fill_single_pixel_fields(self, component_to_edit: Component):\n \"\"\"\n Fill the \"single pixel\" fields of a component that's being edited and contains pixel information.\n :param component_to_edit: The component that's being edited.\n \"\"\"\n # Retrieve the pixel offsets and detector number from the component\n x_pixel_offset = component_to_edit.get_field_value(X_PIXEL_OFFSET)\n y_pixel_offset = component_to_edit.get_field_value(Y_PIXEL_OFFSET)\n detector_numbers = component_to_edit.get_field_value(DETECTOR_NUMBER).tolist()\n\n # Check that x offset is more than one value\n if data_is_an_array_with_more_than_one_element(x_pixel_offset):\n\n # Set the number of rows and the row height\n n_rows, row_height = self._get_row_information(y_pixel_offset)\n self.row_count_spin_box.setValue(n_rows)\n self.row_height_spin_box.setValue(row_height)\n\n # Set the number of columns and the column width\n n_cols, col_width = self._get_column_information(x_pixel_offset)\n self.column_count_spin_box.setValue(n_cols)\n self.column_width_spin_box.setValue(col_width)\n\n # Set the first ID, start counting option, and the count direction option\n (\n first_id,\n start_counting_text,\n count_along_text,\n ) = self._get_detector_number_information(detector_numbers)\n self.first_id_spin_box.setValue(first_id)\n self.start_counting_combo_box.setCurrentText(start_counting_text)\n self.count_first_combo_box.setCurrentText(count_along_text)\n\n else:\n # If the pixel offset information represents a single pixel\n pass\n\n @staticmethod\n def _get_row_information(y_pixel_offset: np.ndarray) -> Tuple[int, Optional[float]]:\n \"\"\"\n Determine the number of rows and the row height from a component that's being edited.\n :param y_pixel_offset: The array of y pixel offsets from the component.\n :return: The number of rows and the row height. If there is only one row, the row height is treated as None.\n \"\"\"\n n_rows = y_pixel_offset.shape[0]\n\n if n_rows > 1:\n return n_rows, np.abs(y_pixel_offset[0][0] - y_pixel_offset[1][0])\n\n return n_rows, None\n\n @staticmethod\n def _get_column_information(\n x_pixel_offset: np.ndarray,\n ) -> Tuple[int, Optional[float]]:\n \"\"\"\n Determine the number of columns and the column width from a component that's being edited.\n :param x_pixel_offset: The array of x pixel offsets from the component.\n :return: The number of columns and the column width. if there is only one column, column width is treated as\n None.\n \"\"\"\n n_cols = x_pixel_offset.shape[1]\n\n if n_cols > 1:\n return n_cols, np.abs(x_pixel_offset[0][1] - x_pixel_offset[0][0])\n\n return n_cols, None\n\n @staticmethod\n def _get_detector_number_information(\n detector_numbers: List[List[int]],\n ) -> Tuple[int, str, str]:\n \"\"\"\n Determine the first pixel ID, the count direction, and the location of the first pixel from a component that's\n being edited.\n :param detector_numbers: The array of detector numbers from the component.\n :return: The first ID, start counting text, and count along text that should be placed in the Edit Component\n Window.\n \"\"\"\n # Find the first pixel and its index in the detector number array\n first_id = np.amin(detector_numbers)\n first_id_index = np.where(detector_numbers == first_id)\n first_id_index = (first_id_index[0][0], first_id_index[1][0])\n\n # Find the indices that are right and left of the first ID. A pixel will not always exist in these places, so\n # a check is done before accessing the values at these indices.\n right_of_first_id = (first_id_index[0], first_id_index[1] + 1)\n left_of_first_id = (first_id_index[0], first_id_index[1] - 1)\n\n start_counting = []\n\n # Use the index from the first pixel to determine if the first ID is at the top or bottom of the grid\n if first_id_index[0] == 0:\n start_counting.append(\"Top\")\n else:\n start_counting.append(\"Bottom\")\n\n # Set the count along text to columns\n count_along_text = COLUMNS_TEXT\n\n # Find the value after the first ID\n first_id_plus_one = first_id + 1\n\n # Determine if the first ID is on the right or left of the pixel grid\n if first_id_index[1] == 0:\n start_counting.append(\"Left\")\n # If the first pixel is on the left of the grid, check if its neighbour on the right is the second pixel\n # If it is the second pixel, the count along value is Rows, otherwise it will remain as Columns\n if (\n detector_numbers[right_of_first_id[0]][right_of_first_id[1]]\n == first_id_plus_one\n ):\n count_along_text = ROWS_TEXT\n\n else:\n start_counting.append(\"Right\")\n # If the first pixel is on the right of the grid, check if its neighbour on the left is the second pixel\n # If it is the second pixel, the count along value is Rows, otherwise it will remain as columns\n if (\n detector_numbers[left_of_first_id[0]][left_of_first_id[1]]\n == first_id_plus_one\n ):\n count_along_text = ROWS_TEXT\n\n start_counting_text = \" \".join(start_counting)\n\n return first_id, start_counting_text, count_along_text\n\n def _fill_entire_shape_fields(self, component_to_edit: Component):\n \"\"\"\n Fill the \"entire shape\" fields a component that is being edited and contains pixel data.\n :param component_to_edit: The component being edited.\n \"\"\"\n shape = component_to_edit.shape[0]\n\n if isinstance(shape, OFFGeometryNexus):\n self._fill_off_geometry_pixel_mapping(shape)\n\n else:\n detector_number = shape.detector_number\n n_cylinders = shape.cylinders.size // 3\n\n if n_cylinders > 1:\n self.create_pixel_mapping_list(n_cylinders, \"cylinder\")\n # TODO: Restore pixel mapping in the case of multiple cylinders\n\n else:\n self.create_pixel_mapping_list(n_cylinders, \"cylinder\")\n self.pixel_mapping_widgets[0].id = detector_number[0]\n\n def _fill_off_geometry_pixel_mapping(self, shape: OFFGeometryNexus):\n \"\"\"\n Fill in the pixel mapping information from an OFFGeometry component.\n :param shape: The shape data from the NeXus file.\n \"\"\"\n # Retrieve the detector face information from the shape and use this to create the required number of pixel\n # mapping widgets\n n_faces, detector_faces = self._get_detector_face_information(shape)\n self.create_pixel_mapping_list(n_faces, \"face\")\n\n # Populate the pixel mapping widgets based on the contents of the detector_faces array\n for detector_face in detector_faces:\n self.pixel_mapping_widgets[detector_face[0]].id = detector_face[1]\n\n @staticmethod\n def _get_detector_face_information(\n shape: OFFGeometryNexus,\n ) -> Tuple[int, List[Tuple[int, int]]]:\n return len(shape.faces), shape.detector_faces\n\n def get_current_mapping_filename(self) -> str:\n \"\"\"\n Retrieves the filename of the mesh that has been used to generate the list of pixel mapping widgets. Used in\n order to prevent creating the same list twice should the same file be selected twice with the file dialog.\n :return: The filename of the mesh.\n \"\"\"\n return self.current_mapping_filename\n\n def setup_visibility_signals(self):\n \"\"\"\n Instructs the Single Pixel/Entire Shape/No Pixels buttons to alter the visibility of items in the\n PixelOptionsWidget.\n \"\"\"\n self.single_pixel_radio_button.clicked.connect(\n lambda: self.update_pixel_layout_visibility(True, False)\n )\n self.entire_shape_radio_button.clicked.connect(\n lambda: self.update_pixel_layout_visibility(False, True)\n )\n self.no_pixels_button.clicked.connect(self.hide_pixel_options_stack)\n\n def setup_pixel_grid_options(self):\n \"\"\"\n Deals with connecting the pixel grid's signals to methods that check for validity or enforce certain rules about\n the input.\n \"\"\"\n\n # Make the column and row count spin boxes in the Pixel Grid trigger a validity update\n self.row_count_spin_box.valueChanged.connect(self.update_pixel_grid_validity)\n self.column_count_spin_box.valueChanged.connect(self.update_pixel_grid_validity)\n\n # Make the row/column count being set to zero cause its matching distance field to become disabled\n self.column_count_spin_box.valueChanged.connect(\n lambda: self.disable_or_enable_distance_field(\n self.column_count_spin_box, self.column_width_spin_box\n )\n )\n self.row_count_spin_box.valueChanged.connect(\n lambda: self.disable_or_enable_distance_field(\n self.row_count_spin_box, self.row_height_spin_box\n )\n )\n\n # Manually add options to the \"Count first along\" combo box. This is done here because inserting these options\n # through Qt Designer doesn't work.\n self.count_first_combo_box.addItems(list(COUNT_DIRECTION.keys()))\n\n @property\n def validator(self):\n \"\"\"\n :return: The PixelOptions' PixelValidator. This is needed in the AddComponentDialog so that it has knowledge\n of the PixelOptions' validity status.\n \"\"\"\n return self._pixel_validator\n\n def generate_pixel_mapping_if_required(self):\n \"\"\"\n Informs the AddComponentDialog that the \"Entire Shape\" button has been pressed. This then causes the\n AddComponentDialog to check if a new and valid file has been given. If these conditions are met then the\n AddComponentDialog will call the method for populating the pixel mapping list. If these conditions are not meant\n then the list will remain empty.\n \"\"\"\n if self.pixel_mapping_list_widget.count() == 0:\n self.pixel_mapping_button_pressed.emit()\n\n @staticmethod\n def disable_or_enable_distance_field(\n count_spin_box: QSpinBox, distance_spin_box: QDoubleSpinBox\n ):\n \"\"\"\n Disables or enabled the matching distance field of the row/column count spin box in the pixel grid options\n depending on if the number of rows/columns has been set to zero.\n :param count_spin_box: The row/column count spin box.\n :param distance_spin_box: The matching row height/column width spin box.\n \"\"\"\n distance_spin_box.setEnabled(count_spin_box.value() != 0)\n\n def update_pixel_grid_validity(self):\n \"\"\"\n Update the OK Validator to reflect the validity of the current Pixel Grid input. A PixelGrid is valid provided\n that the rows or columns have a non-zero value. It is invalid if both are zero. The Spin Boxes enforce\n everything else so this is the only check required.\n \"\"\"\n self._pixel_validator.set_pixel_grid_valid(\n not (\n self.row_count_spin_box.value() == 0\n and self.column_count_spin_box.value() == 0\n )\n )\n\n def update_pixel_layout_visibility(self, pixel_grid: bool, pixel_mapping: bool):\n \"\"\"\n Changes the visibility of the Pixel Options stack. This displays either the Pixel Grid or the Pixel Mapping.\n :param pixel_grid: Bool indicating whether or not to show the pixel grid options.\n :param pixel_mapping: Bool indicating whether or not to show the pixel mapping options.\n \"\"\"\n self.pixel_options_stack.setVisible(True)\n\n if pixel_grid:\n self.pixel_options_stack.setCurrentIndex(PIXEL_GRID_STACK_INDEX)\n elif pixel_mapping:\n self.pixel_options_stack.setCurrentIndex(PIXEL_MAPPING_STACK_INDEX)\n\n def populate_pixel_mapping_list_with_mesh(self, filename: str):\n \"\"\"\n Populates the Pixel Mapping list with widgets depending on the number of faces in the current geometry file for\n an NXoff_geometry.\n \"\"\"\n if self.pixel_mapping_not_visible():\n return\n\n n_faces = self.get_number_of_faces_from_mesh_file(filename)\n\n self.reset_pixel_mapping_list()\n\n # Use the faces information from the geometry file to add fields to the pixel mapping list\n self.create_pixel_mapping_list(n_faces, \"face\")\n\n # Record the filename of the current mapping to prevent the widgets from being created twice\n self.current_mapping_filename = filename\n\n def populate_pixel_mapping_list_with_cylinder_number(self, cylinder_number: int):\n \"\"\"\n Populates the pixel mapping list based on a number of cylinders for the NXcylindrical_geometry.\n :param cylinder_number: The number of cylinders.\n \"\"\"\n if self.pixel_mapping_not_visible():\n return\n\n # Set the mapping filename to None as cylinder mappings are not based on a mesh file.\n self.reset_pixel_mapping_list()\n self.create_pixel_mapping_list(cylinder_number, \"cylinder\")\n\n @staticmethod\n def get_number_of_faces_from_mesh_file(filename: str) -> int:\n \"\"\"\n Creates a temporary geometry and uses this is order to determine the number of faces in the file.\n :param filename: The filename for the mesh.\n :return: The number of faces in the mesh.\n \"\"\"\n temp_geometry = load_geometry(filename, \"m\")\n return len(temp_geometry.faces)\n\n def hide_pixel_options_stack(self):\n \"\"\"\n Conceals the Pixel Options stack (containing the Pixel Grid and Pixel Mapping options). This takes place when\n the No Pixels button has been pressed.\n \"\"\"\n self.pixel_options_stack.setVisible(False)\n\n def get_pixel_mapping_ids(self) -> List[int]:\n \"\"\"\n :return: A list of the IDs for the current PixelMappingWidgets.\n \"\"\"\n return [\n pixel_mapping_widget.id\n for pixel_mapping_widget in self.pixel_mapping_widgets\n ]\n\n def update_pixel_mapping_validity(self):\n \"\"\"\n Checks that at least one ID has been given in the Pixel Mapping and then updates the PixelValidator.\n \"\"\"\n nonempty_ids = [widget.id is not None for widget in self.pixel_mapping_widgets]\n self._pixel_validator.set_pixel_mapping_valid(any(nonempty_ids))\n\n def generate_pixel_data(self) -> PixelData:\n \"\"\"\n Creates the appropriate PixelData object depending on user selection then gives it the information that the\n user entered in the relevant fields. If the \"No Pixel\" button has been pressed then the method returns None.\n In the case of a PixelGrid where either rows/columns has been set to zero, this also causes the matching\n distance value to be recorded as zero.\n :return: A PixelData object or None.\n \"\"\"\n if self.single_pixel_radio_button.isChecked():\n\n return PixelGrid(\n rows=self.row_count_spin_box.value(),\n columns=self.column_count_spin_box.value(),\n row_height=self.row_height_spin_box.value(),\n col_width=self.column_width_spin_box.value(),\n first_id=self.first_id_spin_box.value(),\n count_direction=COUNT_DIRECTION[\n self.count_first_combo_box.currentText()\n ],\n initial_count_corner=INITIAL_COUNT_CORNER[\n self.start_counting_combo_box.currentText()\n ],\n )\n\n if self.entire_shape_radio_button.isChecked():\n return PixelMapping(self.get_pixel_mapping_ids())\n\n return None\n\n def update_pixel_input_validity(self):\n \"\"\"\n Changes the state of the OK Validator depending on whether or not the pixel input is valid. If The No Pixel\n option has been selected then there is nothing to do outside of calling `validate_pixels` again.\n \"\"\"\n if self.single_pixel_radio_button.isChecked():\n self.update_pixel_grid_validity()\n elif self.entire_shape_radio_button.isChecked():\n self.update_pixel_mapping_validity()\n else:\n self._pixel_validator.inform_ok_validator()\n\n def pixel_mapping_not_visible(self) -> bool:\n \"\"\"\n Checks if the pixel mapping options are visible. This is used to determine if it is necessary to generate a\n pixel mapping list.\n :return: A bool indicating the current index of the PixelOptions stack.\n \"\"\"\n return self.pixel_options_stack.currentIndex() != PIXEL_MAPPING_STACK_INDEX\n\n def reset_pixel_mapping_list(self):\n \"\"\"\n Clear the current pixel mapping list and widget. Used when the mesh file changes in the case of NXoff_geometry,\n when the number of cylinders change in the case of NXcylindrical_geometry, or when the user switches between\n mesh and cylinder.\n \"\"\"\n self.pixel_mapping_widgets = []\n self.pixel_mapping_list_widget.clear()\n self.current_mapping_filename = None\n\n def create_pixel_mapping_list(self, n_items: int, text: str):\n \"\"\"\n Creates a list of pixel mapping widgets.\n :param n_items: The number of widgets to create.\n :param text: The label to be displayed next to the line edit. This is either faces or cylinders.\n \"\"\"\n self.reset_pixel_mapping_list()\n\n for i in range(n_items):\n pixel_mapping_widget = PixelMappingWidget(\n self.pixel_mapping_list_widget, i, text\n )\n pixel_mapping_widget.pixelIDLineEdit.textEdited.connect(\n self.update_pixel_mapping_validity\n )\n\n # Make sure the list item is as large as the widget\n list_item = QListWidgetItem()\n list_item.setSizeHint(pixel_mapping_widget.sizeHint())\n\n self.pixel_mapping_list_widget.addItem(list_item)\n self.pixel_mapping_list_widget.setItemWidget(\n list_item, pixel_mapping_widget\n )\n\n # Keep the PixelMappingWidget so that its ID can be retrieved easily when making a PixelMapping object.\n self.pixel_mapping_widgets.append(pixel_mapping_widget)\n\n pixel_mapping_button_pressed = Signal()\n"
] | [
[
"numpy.amin",
"numpy.where",
"numpy.abs",
"numpy.isscalar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nden/mirage | [
"eaa39c4c872673f8509c89597d89fa3e07853f3c"
] | [
"tests/test_niriss_imaging_modes.py"
] | [
"\"\"\"System test of mirage/NIRISS for regular and NRM imaging.\n\nAuthors\n-------\n - Kevin Volk\n\nUse\n---\n >>> pytest -s test_niriss_imaging_modes.py\n\nDescription of the test:\n------------------------\n\nThe test runs mirage for the same NIRISs imaging scene in regular imaging and\nin the NRM imaging mode. There is only one star in the scene of the same\nmagnitude in both instances. The code reads the two pointsource output list\nfiles to get the count rates for the two cases and verifies that the proper\nscaling has been used. The count rate ratio should be exactly 0.15/0.84\nbetween the NRM case and the regular imaging case. Due to limitations on the\nprecision of the output format, the values differ by a fraction amount of\nabout 1.e-08 in my test. Here the threhsold for agreement is a deviaiton of\nless than 1.e-06.\n\n\"\"\"\n\nimport os\nimport pytest\nimport numpy\n\nfrom mirage import imaging_simulator\n\nos.environ['TEST_DATA'] = os.path.join(os.path.dirname(__file__), 'test_data/NIRISS')\n\n# Determine if tests are being run on Travis\nON_TRAVIS = 'travis' in os.path.expanduser('~')\n\n\[email protected](ON_TRAVIS,\n reason=\"Cannot access mirage data in the central storage directory from Travis CI.\")\ndef test_niriss_imaging():\n nis = imaging_simulator.ImgSim(offline=True)\n nis.paramfile = os.path.join(os.path.dirname(__file__), 'test_data/NIRISS/niriss_imaging_test.yaml')\n nis.create()\n nis.paramfile = os.path.join(os.path.dirname(__file__), 'test_data/NIRISS/niriss_nrm_test.yaml')\n nis.create()\n value1 = numpy.loadtxt('V88888024002P000000000112o_NIS_F480M_uncal_pointsources.list',usecols=[8,])\n value2 = numpy.loadtxt('V88888024002P000000000112o_NIS_NRM_F480M_uncal_pointsources.list',usecols=[8,])\n fluxratio = value2 / value1\n\n # The 0.15 factor for the NRM and the 0.84 factor from the CLEARP element\n # are now baked into the PSF from WebbPSF.\n #targetratio = 0.15 / 0.84\n targetratio = 1.0\n deviation = abs(fluxratio/targetratio - 1.)\n assert deviation < 1.e-06\n # clean up the output files in the test directory from this test.\n os.system('/bin/rm V88888024002P000000000112o*')\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeoYoung1996/Experiment | [
"e3e875e0fd9b0367b761c51d9862b9da5e448576"
] | [
"test.py"
] | [
"\"\"\"\n@Time : 2021/12/23 6:47 下午\n@Author : LeoYoung\n@FileName: test.py\n@Software: PyCharm\n@description: 任何程序的测试\n\"\"\"\n\nimport torch\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '0,1,2,3'\nprint(torch.__version__)\nprint(torch.cuda.is_available())"
] | [
[
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rlworkgroup/metaworlds | [
"be03cfed9890a37b84283c597209b849e8a086cc"
] | [
"src/metaworlds/envs/mujoco/hopper_env.py"
] | [
"import numpy as np\n\nfrom metaworlds.core import Serializable\nfrom metaworlds.envs import Step\nfrom metaworlds.envs.mujoco import MujocoEnv\nfrom metaworlds.misc import autoargs\nfrom metaworlds.misc import logger\nfrom metaworlds.misc.overrides import overrides\n\n# states: [\n# 0: z-coord,\n# 1: x-coord (forward distance),\n# 2: forward pitch along y-axis,\n# 6: z-vel (up = +),\n# 7: xvel (forward = +)\n\n\nclass HopperEnv(MujocoEnv, Serializable):\n\n FILE = 'hopper.xml'\n\n @autoargs.arg(\n 'alive_coeff', type=float, help='reward coefficient for being alive')\n @autoargs.arg(\n 'ctrl_cost_coeff', type=float, help='cost coefficient for controls')\n def __init__(self, alive_coeff=1, ctrl_cost_coeff=0.01, *args, **kwargs):\n self.alive_coeff = alive_coeff\n self.ctrl_cost_coeff = ctrl_cost_coeff\n\n super().__init__(*args, **kwargs)\n\n # Always call Serializable constructor last\n Serializable.quick_init(self, locals())\n\n @overrides\n def get_current_obs(self):\n return np.concatenate([\n self.sim.data.qpos[0:1].flat,\n self.sim.data.qpos[2:].flat,\n np.clip(self.sim.data.qvel, -10, 10).flat,\n np.clip(self.sim.data.qfrc_constraint, -10, 10).flat,\n self.get_body_com(\"torso\").flat,\n ])\n\n @overrides\n def step(self, action):\n self.forward_dynamics(action)\n next_obs = self.get_current_obs()\n lb, ub = self.action_bounds\n scaling = (ub - lb) * 0.5\n vel = self.get_body_comvel(\"torso\")[0]\n reward = vel + self.alive_coeff - \\\n 0.5 * self.ctrl_cost_coeff * np.sum(np.square(action / scaling))\n state = self._state\n notdone = np.isfinite(state).all() and \\\n (np.abs(state[3:]) < 100).all() and (state[0] > .7) and \\\n (abs(state[2]) < .2)\n done = not notdone\n return Step(next_obs, reward, done)\n\n @overrides\n def log_diagnostics(self, paths):\n progs = [\n path[\"observations\"][-1][-3] - path[\"observations\"][0][-3]\n for path in paths\n ]\n logger.record_tabular('AverageForwardProgress', np.mean(progs))\n logger.record_tabular('MaxForwardProgress', np.max(progs))\n logger.record_tabular('MinForwardProgress', np.min(progs))\n logger.record_tabular('StdForwardProgress', np.std(progs))\n"
] | [
[
"numpy.square",
"numpy.abs",
"numpy.isfinite",
"numpy.clip",
"numpy.min",
"numpy.max",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
on-merrit/ON-MERRIT | [
"a21324a54a6365f2f769b5952b0cf5347a97d480"
] | [
"WP3/Task3.1/notebooks/Q3_Gender_Distribution.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # This will create plots for institutions of universities in THE WUR univs only and for the period of 2007-2017. The input dataset contains info of THE WUR univs only but for any period of time.\n\n# #### This is to be compatible with other analysis questions which used dataset from the period of 2007 to 2017\n\n# #### Note: \n# \n# #### The gender in the csv are i)unknown (name not found), ii) andy (androgynous), iii) male, iv) female, v) mostly_male and vi) mostly_female following the schema used by the external library we used to determine the gender -- https://pypi.org/project/gender-guesser/ . The difference between andy and unknown is that the former is found to have the same probability to be male than to be female, while the later means that the name wasn’t found in the database.\n# \n# #### For our purposes, i)unknow/andy --> unknown ii)male/mostly_male --> male and iii)female/mostly_female --> female\n\n# ## Question : What is the gender distribution in authorship of papers published by the universities?\n\n# In[4]:\n\n\n# standard path wrangling to be able to import project config and sources\nimport os\nimport sys\nfrom os.path import join\nroot = os.path.dirname(os.getcwd())\nsys.path.append(root)\nprint('Project root: {}'.format(root))\n\n\n# In[5]:\n\n\nsys.path.append(join(root,\"spark/shared/\"))\nfrom MAG_utils import *\n\n\n# In[ ]:\n\n\n\n\n\n# In[6]:\n\n\n# Built-in\nimport json\n\n# Installed\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib import rc,rcParams\nfrom matplotlib.patches import Rectangle\nimport swifter\n\nimport unicodedata\nimport re\nfrom statistics import mean\n\nimport ast\n\n\n# In[7]:\n\n\ncfg = None\nwith open(join(root,\"spark/config.json\")) as fp:\n cfg = json.load(fp)\n\n\n# In[8]:\n\n\n# cfg\n\n\n# In[9]:\n\n\ncnames_for_plot = {\n \"austria\" : \"Austria\",\n \"brazil\" : \"Brazil\",\n \"germany\" : \"Germany\",\n \"india\" : \"India\",\n \"portugal\" : \"Portugal\",\n \"russia\" : \"Russia\",\n \"uk\" : \"UK\",\n \"usa\" : \"USA\"\n}\n\n\n# In[10]:\n\n\noutput_dir = join(root,\"documents/analysis/dataset_selection_question3\")\n\n\n# In[8]:\n\n\n# Create a new directory to save results\nos.makedirs(output_dir)\n\n\n# In[11]:\n\n\nstudy_years = [2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017]\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # Extraction of count of papers in each of the three gender categories for publications coming from each university.\n\n# In[10]:\n\n\ndef get_univ_authors_gender_counts(country_papers_fos_df, univs_name): \n '''\n Get the plot of count of papers in each discipline from each university in the input country.\n '''\n univs_info = {}\n \n univs_not_found = []\n univs_found = []\n \n for org_univ_name in set(univs_name): # remove duplicate univ names in the THE list, if any\n# print(org_univ_name)\n\n THE_univ_name_normalised = mag_normalisation_institution_names(org_univ_name)\n \n '''\n The dataframe that will be selected for the current univ is either :\n 1. When the MAG normalizedname column matches to THE_univ_name_normalised\n or\n 2. When the MAG normalised(wikiname) matches to THE_univ_name_normalised -- this matches English names (in MAG wiki links as well as THE) of non English name (in MAG normalisedname or displayname) universities.\n '''\n univ_papers_df_set1 = country_papers_fos_df[country_papers_fos_df['normalizedname']==THE_univ_name_normalised]\n \n univ_papers_df_set2 = country_papers_fos_df[country_papers_fos_df['normalizedwikiname']==THE_univ_name_normalised]\n \n # The records in two sets can be the exactly the same \n # Concat and remove exact duplicates -- https://stackoverflow.com/a/21317570/530399\n univ_papers_df = pd.concat([univ_papers_df_set1, univ_papers_df_set2]).drop_duplicates().reset_index(drop=True)\n\n# Put additional criteria that these papers are from 2007 till 2017\n univ_papers_df = univ_papers_df[univ_papers_df['year'].isin(study_years)]\n \n \n # For those I couldn't match/find their name, it is not fair to say that their count of papers in any discipline was 0. Should be excluded from the graph.\n if len(univ_papers_df)==0:\n univs_not_found.append(org_univ_name+\" @ \"+THE_univ_name_normalised)\n else:\n univs_found.append(org_univ_name)\n \n # here, we are going to count gender against the total records count -- because each paper could have multiple male (or female) authors. This dataset is about authorship, not about paper.\n count_total_authors = len(univ_papers_df)\n \n count_male_authors = len(univ_papers_df[univ_papers_df['gender']==\"male\"])+len(univ_papers_df[univ_papers_df['gender']==\"mostly_male \"])\n count_female_authors = len(univ_papers_df[univ_papers_df['gender']==\"female\"])+len(univ_papers_df[univ_papers_df['gender']==\"mostly_female\"])\n count_unknown_authors = len(univ_papers_df[univ_papers_df['gender']==\"unknown\"])+len(univ_papers_df[univ_papers_df['gender']==\"andy\"])\n \n \n univs_info[org_univ_name] = {}\n \n \n \n \n univ_male_percent = (count_male_authors*100.00)/count_total_authors\n univ_female_percent = (count_female_authors*100.00)/count_total_authors\n univ_unknown_percent = (count_unknown_authors*100.00)/count_total_authors\n \n \n \n \n univs_info[org_univ_name][\"count_male_authors\"] = count_male_authors\n univs_info[org_univ_name][\"percent_male_authors\"] = univ_male_percent\n \n univs_info[org_univ_name][\"count_female_authors\"] = count_female_authors\n univs_info[org_univ_name][\"percent_female_authors\"] = univ_female_percent\n \n univs_info[org_univ_name][\"count_unknown_authors\"] = count_unknown_authors\n univs_info[org_univ_name][\"percent_unknown_authors\"] = univ_unknown_percent\n \n univs_info[org_univ_name][\"count_total_authors\"] = count_total_authors\n \n return univs_info, univs_not_found, univs_found\n\n\n# In[12]:\n\n\nall_countries_all_univs_gender_info = {}\nall_countries_univs_found_not_found = {}\n\nfor country_name,univs_name in cfg['data']['all_THE_WUR_institutions_by_country'].items():\n print(\"\\nProcesing for dataset of univs in \"+country_name+\"\\n\")\n all_countries_univs_found_not_found[country_name] = {}\n \n # CSV has repeated header from multiple partitions of the merge on pyspark csv output. Hence need to treat as string.\n country_papers_authors_df = pd.read_csv(join(root,\"data/processed/author_gender_\"+country_name+\"_papers.csv\"), header=0, sep=\",\", dtype={\"year\": object}) # object means string\n \n \n # Then eliminate problematic lines\n # temp fix until spark csv merge header issue is resolved -- the header line is present in each re-partition's output csv\n country_papers_authors_df.drop(country_papers_authors_df[country_papers_authors_df.paperid == \"paperid\"].index, inplace=True)\n # Then reset dtypes as needed.\n country_papers_authors_df = country_papers_authors_df.astype({'year':int})\n \n \n univs_info, univs_not_found, univs_found = get_univ_authors_gender_counts(country_papers_authors_df, univs_name)\n \n all_countries_all_univs_gender_info[country_name] = univs_info\n \n count_total_univs = len(univs_not_found) + len(univs_found)\n \n not_found_details = {}\n not_found_details['univ_names'] = univs_not_found\n not_found_details['count_univs'] = len(univs_not_found)\n not_found_details['percent_univs'] = (len(univs_not_found)*100.00)/count_total_univs\n \n found_details = {}\n found_details['univ_names'] = univs_found\n found_details['count_univs'] = len(univs_found)\n found_details['percent_univs'] = (len(univs_found)*100.00)/count_total_univs\n \n \n all_details = {}\n all_details['count_univs'] = count_total_univs\n \n all_countries_univs_found_not_found[country_name]['not_found'] = not_found_details\n all_countries_univs_found_not_found[country_name]['found'] = found_details\n all_countries_univs_found_not_found[country_name]['all'] = all_details\n \n \n \n print(\"Computed gender distribution of authors in all univs in \"+country_name+\"\\n\")\n\n\n# In[13]:\n\n\n# Write text files with the infos\n\nwith open(join(output_dir,'all_countries_univs_found_not_found.txt'), 'w') as file:\n file.write(json.dumps(all_countries_univs_found_not_found, sort_keys=True, indent=4, ensure_ascii=False))\n \nwith open(join(output_dir,'all_countries_all_univs_gender_info.txt'), 'w') as file:\n file.write(json.dumps(all_countries_all_univs_gender_info, sort_keys=True, indent=4, ensure_ascii=False)) \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# # Load data from previously saved files\n\n# In[12]:\n\n\nwith open(join(output_dir,'all_countries_all_univs_gender_info.txt')) as file:\n all_countries_all_univs_gender_info = json.load(file)\n \nprint(all_countries_all_univs_gender_info)\n\n\n# # Create bar plot for each of the countries\n\n# In[24]:\n\n\ndef label_bar_with_value(ax, rects, value_labels):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for i in range(len(rects)):\n rect = rects[i]\n label_value = value_labels[i]\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*rect.get_height(),\n '%s' % label_value,\n ha='center', va='bottom')\n\ndef create_sorted_plot(input_df, sorting_field_count, sorting_field_percent, other_fields_count, colors2plot, ylabel, xlabel, legend_text, save_fname, save_file):\n \n second_field_count = other_fields_count[0]\n third_field_count = other_fields_count[1]\n \n # sort the df based on the sorting_field_percent\n df = input_df.sort_values(sorting_field_percent, ascending=False)[['univs_name', sorting_field_count, second_field_count, third_field_count, sorting_field_percent]]\n\n\n # Setting the positions and width for the bars\n pos = list(range(len(df['univs_name']))) \n width = 0.25 \n\n # Plotting the bars\n fig, ax = plt.subplots(figsize=(25,10))\n\n # Create a bar with sorting_field_count data,\n # in position pos,\n sorting_field_bar = ax.bar(pos, \n #using df['proportion_univs_agriculture'] data,\n df[sorting_field_count], \n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color= colors2plot[sorting_field_count], \n )\n # Create labels with percentage values\n sorting_field_proportion_value_labels = [str(int(x))+\"%\" for x in df[sorting_field_percent].values.tolist()]\n\n\n # Create a bar with second_field_count data,\n # in position pos + some width buffer,\n plt.bar([p + width for p in pos], \n #using df['univs_climatology_counts'] data,\n df[second_field_count],\n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color=colors2plot[second_field_count],\n )\n \n # Create a bar with third_field_count data,\n # in position pos + 2*some width buffer,\n plt.bar([p + 2*width for p in pos], \n #using df['univs_medicine_counts'] data,\n df[third_field_count],\n # of width\n width, \n # with alpha 0.5\n alpha=0.5, \n # with color\n color=colors2plot[third_field_count],\n )\n \n \n \n\n # Set the y axis label\n ax.set_ylabel(ylabel)\n\n # Set the x axis label\n ax.set_xlabel(xlabel)\n\n # Set the position of the x ticks\n ax.set_xticks([p + 0.5 * width for p in pos])\n\n # Set the labels for the x ticks\n ax.set_xticklabels(df['univs_name'], rotation='vertical')\n\n # Setting the x-axis and y-axis limits\n plt.xlim(min(pos)-width, max(pos)+width*4)\n plt.ylim([0, max(df[sorting_field_count] + df[second_field_count] + df[third_field_count])] )\n\n # Adding the legend and showing the plot\n plt.legend(legend_text, loc='upper left')\n plt.grid()\n \n label_bar_with_value(ax, sorting_field_bar, sorting_field_proportion_value_labels)\n \n if save_file:\n plt.savefig(save_fname+\".png\", bbox_inches='tight', dpi=300)\n plt.savefig(save_fname+\".pdf\", bbox_inches='tight', dpi=900)\n \n plt.close()\n return fig\n\ndef create_gender_count_distribution_bar_chart(univs_details, save_fname, x_label, save_file=True):\n # https://chrisalbon.com/python/data_visualization/matplotlib_grouped_bar_plot/\n # https://stackoverflow.com/a/42498711/530399\n \n univs_name = [x for x in univs_details.keys()]\n univs_data = univs_details.values()\n univs_male_counts = [x['count_male_authors'] for x in univs_data]\n univs_female_counts = [x['count_female_authors'] for x in univs_data]\n univs_unknown_counts = [x['count_unknown_authors'] for x in univs_data]\n percent_male_authors = [x['percent_male_authors'] for x in univs_data]\n percent_female_authors = [x['percent_female_authors'] for x in univs_data]\n percent_unknown_authors = [x['percent_unknown_authors'] for x in univs_data]\n \n \n raw_data = {'univs_name': univs_name,\n 'univs_male_counts': univs_male_counts,\n 'univs_female_counts' : univs_female_counts,\n 'univs_unknown_counts': univs_unknown_counts,\n 'percent_male_authors': percent_male_authors,\n 'percent_female_authors': percent_female_authors,\n 'percent_unknown_authors': percent_unknown_authors\n }\n df = pd.DataFrame(raw_data, columns = ['univs_name', 'univs_male_counts', 'univs_female_counts', 'univs_unknown_counts', 'percent_male_authors', 'percent_female_authors', 'percent_unknown_authors']) \n\n # print(df)\n\n colors2plot={'univs_male_counts':'green', 'univs_female_counts':'red', 'univs_unknown_counts':'blue'}\n xlabel = x_label + \" -- Ranked by \"\n ylabel = \"Authorship Counts\"\n \n \n \n sorted_plot1 = create_sorted_plot(df, 'univs_male_counts', 'percent_male_authors', ['univs_female_counts', 'univs_unknown_counts'], colors2plot, ylabel, xlabel + \"Male\"+\" Authorship Counts\", legend_text=['Male Authorship Counts', 'Female Authorship Counts', 'Unknown Authorship Counts'], save_fname = save_fname+\"_sorted_Male\", save_file=save_file)\n \n sorted_plot2 = create_sorted_plot(df, 'univs_female_counts', 'percent_female_authors', ['univs_unknown_counts', 'univs_male_counts'], colors2plot, ylabel, xlabel + \"Female\"+\" Authorship Counts\", legend_text=['Female Authorship Counts', 'Unknown Authorship Counts', 'Male Authorship Counts'], save_fname = save_fname+\"_sorted_Female\", save_file=save_file)\n \n sorted_plot3 = create_sorted_plot(df, 'univs_unknown_counts', 'percent_unknown_authors', ['univs_male_counts', 'univs_female_counts'], colors2plot, ylabel, xlabel + \"Unknown\"+\" Authorship Counts\", legend_text=['Unknown Authorship Counts', 'Male Authorship Counts', 'Female Authorship Counts'], save_fname = save_fname+\"_sorted_Unknown\", save_file=save_file)\n \n return sorted_plot1, sorted_plot2, sorted_plot3\n\n\n# In[25]:\n\n\ncountry_name = 'austria'\nunivs_details = all_countries_all_univs_gender_info[country_name]\n\n\nsorted_plot1, sorted_plot2, sorted_plot3 = create_gender_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+\"_\"+'gender_count_distribution'), x_label = (\"Universities in \"+cnames_for_plot[country_name]), save_file=False)\n\n\n# In[26]:\n\n\nsorted_plot1\n\n\n# In[27]:\n\n\nsorted_plot2\n\n\n# In[28]:\n\n\nsorted_plot3\n\n\n# In[29]:\n\n\nfor country_name, univs_details in all_countries_all_univs_gender_info.items():\n create_gender_count_distribution_bar_chart(univs_details, save_fname = join(output_dir,country_name+\"_\"+'gender_count_distribution'), x_label = (\"Universities in \"+cnames_for_plot[country_name]), save_file=True)\n\n\n# In[ ]:\n\n\n\n\n\n# In[30]:\n\n\nprint(\"\\n\\n\\nCompleted!!!\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.concat",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Blind-Aid/sentiment-discovery | [
"081c7c855e00864b52e97cac0b0e097cc86d9731"
] | [
"reparameterization/weight_norm.py"
] | [
"import torch\r\nfrom torch.nn.parameter import Parameter\r\n#from ..utils import FusedNorm\r\nimport time\r\n\r\nfrom .reparameterization import Reparameterization\r\n\r\ndef _norm(p, dim):\r\n \"\"\"Computes the norm over all dimensions except dim\"\"\"\r\n if dim is None:\r\n return p.norm()\r\n elif dim == 0:\r\n output_size = (p.size(0),) + (1,) * (p.dim() - 1)\r\n return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)\r\n elif dim == p.dim() - 1:\r\n output_size = (1,) * (p.dim() - 1) + (p.size(-1),)\r\n return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)\r\n return _norm(p.transpose(0, dim), 0).transpose(0, dim)\r\n\r\nHALF_TYPES = (torch.cuda.HalfTensor, torch.HalfTensor)\r\n\r\nclass WeightNorm(Reparameterization):\r\n \"\"\"\r\n Weight normalization is a reparameterization that decouples the magnitude\r\n of a weight tensor from its direction. This replaces the parameter specified\r\n by `name` (e.g. \"weight\") with two parameters: one specifying the magnitude\r\n (e.g. \"weight_g\") and one specifying the direction (e.g. \"weight_v\").\r\n Weight normalization is implemented via a hook that recomputes the weight\r\n tensor from the magnitude and direction before every :meth:`~Module.forward`\r\n call.\r\n\r\n .. math::\r\n \\mathbf{w} = g \\dfrac{\\mathbf{v}}{\\|\\mathbf{v}\\|}\r\n\r\n By default, with `dim=0`, the norm is computed independently per output\r\n channel/plane. To compute a norm over the entire weight tensor, use\r\n `dim=None`.\r\n \"\"\"\r\n def compute_weight(self, module=None, name=None):\r\n \"\"\"\r\n Computes weight normalized weight value to assign value to module attribute\r\n with name `name`.\r\n Arguments:\r\n module (nn.Module): module with weight we'd like to reparameterize\r\n Returns:\r\n w (Tensor): Tensor object containing value of reparameterized weight\r\n \"\"\"\r\n if module is None:\r\n module = self.module\r\n if name is None:\r\n name = self.name\r\n module, name = Reparameterization.get_module_and_name(module, name)\r\n g = getattr(module, name + '_g')\r\n v = getattr(module, name + '_v')\r\n\r\n w = (v * (g / _norm(v, self.dim)))\r\n\r\n #fused_norm = FusedNorm.apply\r\n #v = v.contiguous()\r\n #w = g*fused_norm(v)\r\n\r\n\r\n return w\r\n\r\n def reparameterize(self, name, weight, dim):\r\n \"\"\"\r\n Creates Parameters v and gto be used for weight normalization\r\n and creates names that for attributes for the module these Parameters\r\n will correspond to. The parameters will be registered according to the names\r\n provided.\r\n Arguments:\r\n module (nn.Module): module with weight we'd like to reparameterize\r\n name (str, optional): name of weight parameter\r\n dim (int, optional): dimension over which to compute parameterization\r\n Returns:\r\n names (list, str): names of Parameters to be used for reparameterization\r\n params (list, Parameter): Parameters to be used for reparameterization\r\n \"\"\"\r\n names = [name + '_g', name + '_v']\r\n params = [Parameter(_norm(weight, dim).data), Parameter(weight.data)]\r\n return names, params\r\n"
] | [
[
"torch.nn.parameter.Parameter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DucNguyenAnh/Web-Attack-Detection | [
"e799055efb1976d70d0c51916be97793b72b2954"
] | [
"utils/reader.py"
] | [
"from __future__ import print_function\nimport random\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\n\nfrom vocab import Vocabulary\nfrom utils import get_requests_from_file, batch_generator, one_by_one_generator\n\n\nclass Reader(object):\n\n def __init__(self, data_path, vocab=Vocabulary()):\n self.vocab = vocab\n\n data = get_requests_from_file(data_path)\n\n print(\"Downloaded {} samples\".format(len(data)))\n map_result = map(self._process_request, data)\n self.data = [x[0] for x in map_result]\n self.lengths = [x[0] for x in map_result]\n for i in self.data:\n self.lengths.append(len(i))\n assert len(self.data) == len(self.lengths)\n\n def _process_request(self, req):\n \"\"\"\n Splits a request into lines and convert a string into ints.\n \"\"\"\n seq = self.vocab.string_to_int(req)\n l = len(seq)\n return seq, l\n\n\nclass Data(Reader):\n\n def __init__(self, data_path, vocab=Vocabulary(), predict=False):\n \"\"\"\n Creates an object that gets data from a file.\n \"\"\"\n super(Data, self).__init__(data_path, vocab)\n\n if not predict:\n self._train_test_split()\n\n def _train_test_split(self):\n \"\"\"\n Train/val/test split for anomaly detection problem.\n \"\"\"\n # Shuffle requests\n data, lengths = self._shuffle(self.data, self.lengths)\n\n # Split into train/val/test\n X_train, X_test, l_train, l_test = train_test_split(data, lengths, test_size=0.1)\n X_train, X_val, l_train, l_val = train_test_split(X_train, l_train, test_size=0.2)\n\n self.X_train, self.l_train = X_train, l_train\n self.X_val, self.l_val = X_val, l_val\n self.X_test, self.l_test = X_test, l_test\n\n self.train_size = len(X_train)\n self.val_size = len(X_val)\n self.test_size = len(X_test)\n\n def _shuffle(self, data, lengths):\n temp = list(zip(data, lengths))\n random.shuffle(temp)\n data, lengths = zip(*temp)\n \n return data, lengths\n\n def train_generator(self, batch_size, num_epochs):\n return batch_generator(\n self.X_train,\n self.l_train,\n num_epochs,\n batch_size,\n self.vocab)\n\n def val_generator(self):\n return one_by_one_generator(\n self.X_val,\n self.l_val,\n self.vocab)\n\n def test_generator(self):\n return one_by_one_generator(\n self.X_test,\n self.l_test,\n self.vocab)\n\n def predict_generator(self):\n return one_by_one_generator(\n self.data,\n self.lengths,\n self.vocab)\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
joevin-slq/fma | [
"162b76915c3bc905203cd533a1d30d20f0b7ec05"
] | [
"features.py"
] | [
"#!/usr/bin/env python3\n\n# FMA: A Dataset For Music Analysis\n# Michaël Defferrard, Kirell Benzi, Pierre Vandergheynst, Xavier Bresson, EPFL LTS2.\n\n# All features are extracted using [librosa](https://github.com/librosa/librosa).\n# Alternatives:\n# * [MARSYAS](https://github.com/marsyas/marsyas) (C++ with Python bindings)\n# * [RP extract](http://www.ifs.tuwien.ac.at/mir/downloads.html) (Matlab, Java, Python)\n# * [jMIR jAudio](http://jmir.sourceforge.net) (Java)\n# * [MIRtoolbox](https://www.jyu.fi/hum/laitokset/musiikki/en/research/coe/materials/mirtoolbox) (Matlab)\n\nimport os\nimport multiprocessing\nimport warnings\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport librosa\nfrom tqdm import tqdm\nimport utils\n\n\ndef columns():\n feature_sizes = dict(chroma_stft=12, chroma_cqt=12, chroma_cens=12,\n tonnetz=6, mfcc=20, rmse=1, zcr=1,\n spectral_centroid=1, spectral_bandwidth=1,\n spectral_contrast=7, spectral_rolloff=1)\n moments = ('mean', 'std', 'skew', 'kurtosis', 'median', 'min', 'max')\n\n columns = []\n for name, size in feature_sizes.items():\n for moment in moments:\n it = ((name, moment, '{:02d}'.format(i+1)) for i in range(size))\n columns.extend(it)\n\n names = ('feature', 'statistics', 'number')\n columns = pd.MultiIndex.from_tuples(columns, names=names)\n\n # More efficient to slice if indexes are sorted.\n return columns.sort_values()\n\n\ndef compute_features(tid):\n\n features = pd.Series(index=columns(), dtype=np.float32, name=tid)\n\n # Catch warnings as exceptions (audioread leaks file descriptors).\n warnings.filterwarnings('error', module='librosa')\n\n def feature_stats(name, values):\n features[name, 'mean'] = np.mean(values, axis=1)\n features[name, 'std'] = np.std(values, axis=1)\n features[name, 'skew'] = stats.skew(values, axis=1)\n features[name, 'kurtosis'] = stats.kurtosis(values, axis=1)\n features[name, 'median'] = np.median(values, axis=1)\n features[name, 'min'] = np.min(values, axis=1)\n features[name, 'max'] = np.max(values, axis=1)\n\n try:\n filepath = utils.get_audio_path(os.environ.get('AUDIO_DIR'), tid)\n x, sr = librosa.load(filepath, sr=None, mono=True) # kaiser_fast\n\n f = librosa.feature.zero_crossing_rate(x, frame_length=2048, hop_length=512)\n feature_stats('zcr', f)\n\n cqt = np.abs(librosa.cqt(x, sr=sr, hop_length=512, bins_per_octave=12,\n n_bins=7*12, tuning=None))\n assert cqt.shape[0] == 7 * 12\n assert np.ceil(len(x)/512) <= cqt.shape[1] <= np.ceil(len(x)/512)+1\n\n f = librosa.feature.chroma_cqt(C=cqt, n_chroma=12, n_octaves=7)\n feature_stats('chroma_cqt', f)\n f = librosa.feature.chroma_cens(C=cqt, n_chroma=12, n_octaves=7)\n feature_stats('chroma_cens', f)\n f = librosa.feature.tonnetz(chroma=f)\n feature_stats('tonnetz', f)\n\n del cqt\n stft = np.abs(librosa.stft(x, n_fft=2048, hop_length=512))\n assert stft.shape[0] == 1 + 2048 // 2\n assert np.ceil(len(x)/512) <= stft.shape[1] <= np.ceil(len(x)/512)+1\n del x\n\n f = librosa.feature.chroma_stft(S=stft**2, n_chroma=12)\n feature_stats('chroma_stft', f)\n\n f = librosa.feature.rmse(S=stft)\n feature_stats('rmse', f)\n\n f = librosa.feature.spectral_centroid(S=stft)\n feature_stats('spectral_centroid', f)\n f = librosa.feature.spectral_bandwidth(S=stft)\n feature_stats('spectral_bandwidth', f)\n f = librosa.feature.spectral_contrast(S=stft, n_bands=6)\n feature_stats('spectral_contrast', f)\n f = librosa.feature.spectral_rolloff(S=stft)\n feature_stats('spectral_rolloff', f)\n\n mel = librosa.feature.melspectrogram(sr=sr, S=stft**2)\n del stft\n f = librosa.feature.mfcc(S=librosa.power_to_db(mel), n_mfcc=20)\n feature_stats('mfcc', f)\n\n except Exception as e:\n print('{}: {}'.format(tid, repr(e)))\n\n return features\n\n\ndef main():\n tracks = utils.load('tracks.csv')\n features = pd.DataFrame(index=tracks.index,\n columns=columns(), dtype=np.float32)\n\n # More than usable CPUs to be CPU bound, not I/O bound. Beware memory.\n nb_workers = int(1.5 * len(os.sched_getaffinity(0)))\n\n # Longest is ~11,000 seconds. Limit processes to avoid memory errors.\n table = ((5000, 1), (3000, 3), (2000, 5), (1000, 10), (0, nb_workers))\n for duration, nb_workers in table:\n print('Working with {} processes.'.format(nb_workers))\n\n tids = tracks[tracks['track', 'duration'] >= duration].index\n tracks.drop(tids, axis=0, inplace=True)\n\n pool = multiprocessing.Pool(nb_workers)\n it = pool.imap_unordered(compute_features, tids)\n\n for i, row in enumerate(tqdm(it, total=len(tids))):\n features.loc[row.name] = row\n\n if i % 1000 == 0:\n save(features, 10)\n\n save(features, 10)\n test(features, 10)\n\n\ndef save(features, ndigits):\n\n # Should be done already, just to be sure.\n features.sort_index(axis=0, inplace=True)\n features.sort_index(axis=1, inplace=True)\n\n features.to_csv('features.csv', float_format='%.{}e'.format(ndigits))\n\n\ndef test(features, ndigits):\n\n indices = features[features.isnull().any(axis=1)].index\n if len(indices) > 0:\n print('Failed tracks: {}'.format(', '.join(str(i) for i in indices)))\n\n tmp = utils.load('features.csv')\n np.testing.assert_allclose(tmp.values, features.values, rtol=10**-ndigits)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.min",
"numpy.median",
"pandas.MultiIndex.from_tuples",
"numpy.max",
"numpy.std",
"numpy.mean",
"numpy.testing.assert_allclose",
"scipy.stats.kurtosis",
"scipy.stats.skew"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
seungjaeryanlee/playing-hard-exploration-games-by-watching-youtube | [
"93eeec7647784b2c92206b6279dfe3fab8e23088"
] | [
"networks/tdc.py"
] | [
"\"\"\"TDC network for embedding video.\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"\n A convolutional block with residual connections.\n\n Parameters\n ----------\n in_channels : int\n Number of channels in the input.\n out_channels : int\n Number of features in the output.\n\n \"\"\"\n\n def __init__(self, in_channels: int, out_channels: int) -> None:\n super().__init__()\n self.layers = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, padding=1),\n nn.BatchNorm2d(out_channels),\n )\n self.final_relu = nn.ReLU(inplace=True)\n\n def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102\n out = self.layers(x) + x\n return self.final_relu(out)\n\n\nclass TDC(nn.Module):\n \"\"\"\n Embedding network for video frames.\n\n Parameters\n ----------\n in_channels : int\n Number of channels in the input framestack. Defaults to 12.\n out_channels : int\n Number of features in the output layer. Defaults to 1024.\n\n \"\"\"\n\n def __init__(self, in_channels: int = 12, out_channels: int = 1024) -> None:\n # Below is a paragraph from the original paper:\n #\n # The visual embedding function, φ, is composed of three spatial,\n # padded, 3x3 convolutional layers with (32, 64, 64) channels and 2x2\n # max-pooling, followed by three residual-connected blocks with 64\n # channels and no down-sampling. Each layer is ReLU-activated and batch\n # -normalized, and the output fed into a 2-layer 1024-wide MLP. The\n # network input is a 128x128x3x4 tensor constructed by random spatial\n # cropping of a stack of four consecutive 140x140 RGB images, sampled\n # from our dataset. The final embedding vector is l2-normalized.\n #\n # From Section 5: Implementation Details\n # TODO Stride instead of max pooling?\n super().__init__()\n self.conv_layers = nn.Sequential(\n nn.Conv2d(in_channels, 32, 3, padding=1),\n nn.BatchNorm2d(32),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Conv2d(32, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(2),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, padding=1),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(2),\n nn.ReLU(),\n ResidualBlock(64, 64),\n ResidualBlock(64, 64),\n ResidualBlock(64, 64),\n )\n self.fc_layers = nn.Sequential(\n nn.Linear(16384, 1024), nn.Linear(1024, out_channels)\n )\n\n def forward(self, x: torch.FloatTensor) -> torch.FloatTensor: # noqa: D102\n out = self.conv_layers(x)\n out = out.view(out.size(0), -1)\n out = self.fc_layers(out)\n return out\n"
] | [
[
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TommasoBendinelli/spatial_relations_experiments | [
"cd165437835a37c947ccf13a77531a5a42d4c925"
] | [
"learning_experiments/src/data_generator_robot_data.py"
] | [
"\"\"\"\ntitle :data_generator_robot_data.py\ndescription :Loads the spatial dataset contained in numpy arrays under train,unseen,ulabelled \n :folders under learning_experiments/data/.\nauthor :Yordan Hristov <[email protected]\ndate :10/2018\npython_version :2.7.6\n==============================================================================\n\"\"\"\n\nimport os\nimport os.path as osp\nimport cv2\nimport numpy as np\nimport json\nimport shutil\nimport glob\nimport yaml\nimport pprint\nfrom tqdm import tqdm\n\nimport chainer\n# import chainer_mask_rcnn as cmr\n\n# remove the following imports\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom chainer.datasets import TupleDataset\n\nclass DataGenerator(object):\n def __init__(self, label_mode=None, augment_flag=True, folder_names=[\"yordan_experiments\"], data_split=0.8, include_eef=False):\n self.label_mode = label_mode\n self.augment_flag = augment_flag\n self.folder_names = folder_names\n self.data_split = data_split\n self.mask_mode = 'loading'\n self.include_eef = include_eef\n\n self.mask_rcnn = None\n\n\n def segment_mask(self, bgr, expected_labels):\n \n batch = np.array(np.transpose(bgr, (2,0,1)))\n batch = batch[np.newaxis, :]\n bboxes, masks, labels, scores = self.mask_rcnn.predict(batch)\n\n indecies = scores[0] >= self.score_threshold\n bboxes = bboxes[0][indecies]\n masks = masks[0][indecies]\n labels = labels[0][indecies]\n labels = self.class_names[labels]\n scores = scores[0][indecies]\n\n if len(masks != 0):\n tuples = [(i, label, score) for i, (label, score) in enumerate(zip(labels, scores))]\n\n good_indecies = []\n for expected_label in expected_labels:\n filtered = [(i, label, score) for (i, label, score) in tuples if label == expected_label]\n try:\n good_indecies.append(sorted(filtered, key=lambda x: x[2], reverse=True)[0][0])\n except:\n print(\"BAD; a mask for {0} is missing\".format(expected_label))\n\n masks = np.take(masks, good_indecies, axis=0)\n labels = np.take(labels, good_indecies)\n scores = np.take(scores, good_indecies)\n\n for i in range(len(masks)):\n if labels[i] in expected_labels:\n masks[i] = self.fill_holes_get_max_cnt(masks[i])\n\n return masks, labels, scores\n\n\n def fill_holes_get_max_cnt(self, mask, fill=-1):\n \"\"\"Given a binary mask, fills in any holes in the contours, selects the contour with max area\n and returns a mask with only it drawn + its bounding box\n \"\"\"\n\n # cv2.imshow(\"mask orig\", (mask*255).astype(np.uint8))\n\n # kernel = np.ones((5, 5), np.uint8)\n # mask = cv2.erode(mask.astype(np.uint8), kernel, iterations=1)\n\n canvas = np.zeros(mask.shape, dtype=np.uint8)\n cnts, hier = cv2.findContours(mask.astype(np.uint8),cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)\n areas = [cv2.contourArea(cnt) for cnt in cnts]\n\n (cnt, area) = sorted(zip(cnts, areas), key=lambda x: x[1], reverse=True)[0]\n cv2.drawContours(canvas,[cnt],0,1,fill)\n\n mask = canvas.astype(np.uint8)\n\n # cv2.imshow(\"mask\", mask*255)\n # cv2.waitKey()\n\n return mask.astype(np.uint8)\n\n\n def load_model(self, folder_name=\"./maskrcnn_model\", gpu_id=0):\n\n # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n # param\n params = yaml.load(open(osp.join(folder_name, 'params.yaml')))\n # print('Training config:')\n # print('# ' + '-' * 77)\n # pprint.pprint(params)\n # print('# ' + '-' * 77)\n\n # dataset\n if 'class_names' in params:\n class_names = params['class_names']\n else:\n raise ValueError\n\n # model\n\n if params['dataset'] == 'voc':\n if 'min_size' not in params:\n params['min_size'] = 600\n if 'max_size' not in params:\n params['max_size'] = 1000\n if 'anchor_scales' not in params:\n params['anchor_scales'] = (1, 2, 4, 8, 16, 32)\n elif params['dataset'] == 'coco':\n if 'min_size' not in params:\n params['min_size'] = 800\n if 'max_size' not in params:\n params['max_size'] = 1333\n if 'anchor_scales' not in params:\n params['anchor_scales'] = (1, 2, 4, 8, 16, 32)\n else:\n assert 'min_size' in params\n assert 'max_size' in params\n assert 'anchor_scales' in params\n\n if params['pooling_func'] == 'align':\n pooling_func = cmr.functions.roi_align_2d\n elif params['pooling_func'] == 'pooling':\n pooling_func = cmr.functions.roi_pooling_2d\n elif params['pooling_func'] == 'resize':\n pooling_func = cmr.functions.crop_and_resize\n else:\n raise ValueError(\n 'Unsupported pooling_func: {}'.format(params['pooling_func'])\n )\n\n model_name = [x for x in os.listdir(folder_name) if \".npz\" in x][0]\n pretrained_model = osp.join(folder_name, model_name)\n print('Using pretrained_model: %s' % pretrained_model)\n\n model = params['model']\n self.mask_rcnn = cmr.models.MaskRCNNResNet(\n n_layers=int(model.lstrip('resnet')),\n n_fg_class=len(class_names),\n pretrained_model=pretrained_model,\n pooling_func=pooling_func,\n anchor_scales=params['anchor_scales'],\n mean=params.get('mean', (123.152, 115.903, 103.063)),\n min_size=params['min_size'],\n max_size=params['max_size'],\n roi_size=params.get('roi_size', 7),\n )\n \n self.class_names = np.array(class_names)\n self.score_threshold = 0.05\n\n # self.mask_rcnn.to_cpu()\n chainer.cuda.get_device_from_id(gpu_id).use()\n self.mask_rcnn.to_gpu()\n\n\n def quaternion_to_euler(self, quat):\n\n (x,y,z,w) = quat / np.linalg.norm(quat)\n \n import math\n t0 = +2.0 * (w * x + y * z)\n t1 = +1.0 - 2.0 * (x * x + y * y)\n X = math.degrees(math.atan2(t0, t1)) + 180\n X /= (360.)\n\n t2 = +2.0 * (w * y - z * x)\n t2 = +1.0 if t2 > +1.0 else t2\n t2 = -1.0 if t2 < -1.0 else t2\n Y = math.degrees(math.asin(t2)) + 180\n Y /= (360.)\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n Z = math.degrees(math.atan2(t3, t4)) + 180\n Z /= (360.)\n\n return X, Y, Z\n\n\n def generate_dataset(self, ignore=[\"unlabelled\"], args=None):\n \n crop_size = 128\n data_dimensions = [crop_size, crop_size, 7]\n\n seed = 0\n np.random.seed(seed)\n \n possible_groups = [['off', 'on'], \n ['nonfacing', 'facing'],\n ['out', 'in']]\n\n object_colors = ['red', 'blue', 'yellow', 'purple']\n object_shapes = ['cube', 'cup', 'bowl']\n\n # self.groups_rel = {0 : possible_groups[0]}\n # 1 : possible_groups[1],\n # 2 : possible_groups[2]}\n self.groups_rel = {i : possible_groups[i] for i in range(len(possible_groups))}\n\n self.groups_obj = {0 : object_colors,\n 1 : object_shapes}\n\n self.cutoff_for_labels = 30\n \n print(self.groups_rel)\n print(self.groups_obj)\n\n expected_labels_list = [['purple_cup', 'red_cube'], ['purple_cup', 'blue_cup'], ['purple_bowl', 'yellow_cube']]\n # expected_labels_list = [['purple_cup', 'blue_cup'], ['purple_bowl', 'yellow_cube']]\n relationships_start = []\n relationships = {'off':[[1],[]], \n 'on':[[],[]], \n 'nonfacing':[[],[]], \n 'facing':[[],[]], \n 'out':[[],[]], \n 'in':[[],[]]}\n relationships_start.append(relationships)\n\n relationships = {'off':[[],[]], \n 'on':[[],[]], \n 'nonfacing':[[1],[]], \n 'facing':[[],[]], \n 'out':[[],[]], \n 'in':[[],[]]}\n relationships_start.append(relationships)\n\n relationships = {'off':[[],[]], \n 'on':[[],[]], \n 'nonfacing':[[],[]], \n 'facing':[[],[]], \n 'out':[[1],[]], \n 'in':[[],[]]}\n relationships_start.append(relationships)\n\n relationships_end = []\n relationships = {'off':[[],[]], \n 'on':[[1],[]], \n 'nonfacing':[[],[]], \n 'facing':[[],[]], \n 'out':[[],[]], \n 'in':[[],[]]}\n relationships_end.append(relationships)\n\n relationships = {'off':[[],[]], \n 'on':[[],[]], \n 'nonfacing':[[],[]], \n 'facing':[[1],[]], \n 'out':[[],[]], \n 'in':[[],[]]}\n relationships_end.append(relationships)\n\n relationships = {'off':[[],[]], \n 'on':[[],[]], \n 'nonfacing':[[],[]], \n 'facing':[[],[]], \n 'out':[[],[]], \n 'in':[[1],[]]}\n relationships_end.append(relationships)\n\n scene_objs_all = []\n scene_objs_all.append([{'color' : 'purple', 'shape' : 'cup'},\n {'color' : 'red', 'shape' : 'cube'}])\n scene_objs_all.append([{'color' : 'purple', 'shape' : 'cup'},\n {'color' : 'blue', 'shape' : 'cup'}])\n scene_objs_all.append([{'color' : 'purple', 'shape' : 'bowl'},\n {'color' : 'yellow', 'shape' : 'cube'}])\n\n train = []\n train_labels = []\n train_vectors = []\n train_masks = []\n train_object_vectors = []\n train_object_vector_masks = []\n train_eefs = []\n \n test = []\n test_labels = []\n test_vectors = []\n test_masks = []\n test_object_vectors = []\n test_object_vector_masks = []\n test_eefs = []\n\n unseen = []\n unseen_labels = []\n unseen_vectors = []\n unseen_masks = []\n unseen_object_vectors = []\n unseen_object_vector_masks = []\n\n for folder_name in self.folder_names[:]:\n\n demonstrations = sorted(os.listdir(folder_name))\n\n # if \"_2\" not in folder_name and \"_3\" not in folder_name:\n # demonstrations = demonstrations[:]\n # else:\n # demonstrations = demonstrations[:][:-5]\n\n for demonstration in demonstrations[:]:\n # for demonstration in demonstrations[:10]:\n # print(len(train))\n # print(len(test))\n files = glob.glob(osp.join(folder_name, demonstration, \"kinect2_qhd_image_color*.jpg\"))\n files = sorted([x.split('/')[-1].replace('kinect2_qhd_image_color_rect_', '').replace('.jpg', '') for x in files])\n\n if osp.exists(osp.join(folder_name, demonstration, 'segmented_masks.npy')):\n self.mask_mode = 'loading'\n masks_loaded_array = np.load(osp.join(folder_name, demonstration, 'segmented_masks.npy'))\n else:\n self.mask_mode = 'segmenting'\n masks_output_array = {}\n\n if self.mask_rcnn == None:\n # import chainer_mask_rcnn as cmr\n self.load_model(gpu_id=0)\n\n # file_list_train = files[:10] + files[-20:]\n file_list_train = files[:]\n\n number_of_files = len(file_list_train)\n train_n = int(self.data_split * number_of_files)\n test_n = number_of_files - train_n\n \n train_indecies = np.random.choice(range(number_of_files), train_n, replace=False)\n test_indecies = np.array(filter(lambda x : x not in train_indecies, range(number_of_files)))\n\n train_files = np.take(file_list_train, train_indecies)\n test_files = np.take(file_list_train, test_indecies)\n\n if \"train\" not in ignore:\n for file_idx in tqdm(range(len(file_list_train))):\n\n if file_idx > self.cutoff_for_labels and len(file_list_train) - file_idx > self.cutoff_for_labels:\n if file_idx % 3 != 0:\n continue\n\n file_name = file_list_train[file_idx]\n\n if (file_list_train.index(file_name)) == 0:\n print(\"Processing FOLDER {0}, {1}/{2}\".format(folder_name, \n demonstrations.index(demonstration) + 1, \n len(demonstrations)))\n\n orig_dims = [540, 960]\n\n if \"_2\" not in folder_name and \"_3\" not in folder_name:\n desired_dim = 384\n else:\n desired_dim = 512\n\n if \"_3\" in folder_name and 'facing' in folder_name:\n desired_dim = 384\n\n crop_window = [[], []]\n crop_window[0].append(orig_dims[0]/2 - desired_dim/2)\n crop_window[0].append(orig_dims[0]/2 + desired_dim/2)\n crop_window[1].append(orig_dims[1]/2 - desired_dim/2)\n crop_window[1].append(orig_dims[1]/2 + desired_dim/2)\n\n bgr = cv2.imread(osp.join(folder_name, demonstration, \n \"kinect2_qhd_image_color_rect_\" + file_name + \".jpg\"))\n bgr = bgr[crop_window[0][0] : crop_window[0][1], \n crop_window[1][0] : crop_window[1][1], \n :]\n bgr = bgr / 255.\n\n\n # if file_idx < self.cutoff_for_labels or len(file_list_train) - file_idx < self.cutoff_for_labels:\n # cv2.imshow(\"bgr\", (bgr * 255).astype(np.uint8))\n # cv2.waitKey(50)\n # continue\n\n depth_orig = cv2.imread(osp.join(folder_name, demonstration, \n \"kinect2_qhd_image_depth_rect_\" + file_name + \".jpg\"))\n depth_orig = depth_orig[crop_window[0][0] : crop_window[0][1], \n crop_window[1][0] : crop_window[1][1], \n :]\n # depth_orig = depth_orig / 255.\n depth_orig = depth_orig / float(np.max(depth_orig))\n \n if self.include_eef:\n eef_pose = []\n\n if file_idx < self.cutoff_for_labels:\n eef_file_name = file_list_train[-file_idx]\n elif len(file_list_train) - file_idx < self.cutoff_for_labels:\n eef_file_name = file_list_train[len(file_list_train) - file_idx]\n else:\n eef_file_name = file_name\n\n f = open(osp.join(folder_name, demonstration, \n \"r_wrist_roll_link_\" + file_name + '.txt'))\n for line in f: \n eef_pose.append(float(line))\n\n r, p, yaw = self.quaternion_to_euler(eef_pose[-4:])\n \n x = eef_pose[0]\n y = eef_pose[1] + 0.5\n z = eef_pose[2]\n\n if self.augment_flag:\n\n dist_range = 0\n r += np.random.uniform(-dist_range, dist_range, size=1)\n p += np.random.uniform(-dist_range, dist_range, size=1)\n yaw += np.random.uniform(-dist_range, dist_range, size=1)\n\n dist_range = 0\n x += np.random.uniform(-dist_range, dist_range, size=1)\n y += np.random.uniform(-dist_range, dist_range, size=1)\n z += np.random.uniform(-dist_range, dist_range, size=1)\n\n eef_pose = [x[0], y[0], z[0], r[0], p[0], yaw[0]]\n\n else:\n eef_pose = [x, y, z, r, p, yaw]\n\n # print(eef_pose)\n\n if self.mask_mode == 'loading':\n dict_entry = masks_loaded_array.item().get(file_name)\n\n if dict_entry == None:\n print(\"BAD\")\n continue\n\n masks = dict_entry.values()\n labels = dict_entry.keys()\n scores = np.zeros(len(masks))\n\n # if file_idx < self.cutoff_for_labels or len(file_list_train) - file_idx < self.cutoff_for_labels:\n # cv2.imshow(\"bgr\", (bgr * 255).astype(np.uint8))\n # cv2.imshow(\"depth\", (depth_orig * 255).astype(np.uint8))\n\n # for mask_idx, mask in enumerate(masks):\n # cv2.imshow(\"mask \" + labels[mask_idx], (mask * 255).astype(np.uint8))\n\n # cv2.waitKey(50)\n\n elif self.mask_mode == 'segmenting':\n expected_labels = expected_labels_list[self.folder_names.index(folder_name)]\n masks, labels, scores = self.segment_mask((bgr * 255).astype(np.uint8), expected_labels)\n \n if len(masks) != 2:\n continue\n\n masks_output_array[file_name] = {label : mask for label,mask in zip(labels, masks)}\n\n continue\n\n\n\n n_obj = 2\n init_rel = np.array(['unlabelled' for _ in range(3)])\n rel_index = {x : {y : init_rel.copy() for y in np.delete(np.arange(n_obj), x)} for x in np.arange(n_obj)}\n\n if file_idx < self.cutoff_for_labels:\n rels = relationships_end[self.folder_names.index(folder_name)]\n elif len(file_list_train) - file_idx < self.cutoff_for_labels:\n rels = relationships_start[self.folder_names.index(folder_name)]\n else:\n rels = {'off':[[],[]], \n 'on':[[],[]], \n 'nonfacing':[[],[]], \n 'facing':[[],[]], \n 'out':[[],[]], \n 'in':[[],[]]}\n\n scene_objs = scene_objs_all[self.folder_names.index(folder_name)]\n\n for rel_name, obj_list in rels.items():\n for i in self.groups_rel:\n if rel_name in self.groups_rel[i]:\n group_idx = i\n for ref_idx, target_list in enumerate(obj_list):\n for target_idx in target_list:\n rel_index[ref_idx][target_idx][group_idx] = rel_name\n\n\n for (ref_idx, target_list) in rel_index.items():\n for (target_idx, rel_list) in target_list.items():\n \n # scale = 0.125\n # scale = 1\n if \"_2\" not in folder_name and \"_3\" not in folder_name:\n scale = 0.3333333333333\n else:\n scale = 0.25\n\n if \"_3\" in folder_name and 'facing' in folder_name:\n scale = 0.3333333333333\n\n color = cv2.resize(bgr.copy(), (0,0), fx=scale, fy=scale)\n depth = cv2.resize(depth_orig.copy(), (0,0), fx=scale, fy=scale)\n bg = np.zeros((depth.shape[0], depth.shape[1]))\n ref = cv2.resize(masks[ref_idx].copy().astype(np.uint8), (0,0), fx=scale, fy=scale)\n ref = ref.astype(np.float32)\n tar = cv2.resize(masks[target_idx].copy().astype(np.uint8), (0,0), fx=scale, fy=scale)\n tar = tar.astype(np.float32)\n\n # print(color.shape)\n # print(depth.shape)\n # print(ref.shape)\n # print(tar.shape)\n\n if np.sum(tar) == 0 or np.sum(ref) == 0 or (ref == tar).all():\n continue\n\n pixels = np.concatenate((color, depth[...,0,None], bg[...,None], ref[...,None], tar[...,None]), axis=2)\n\n if file_name in train_files:\n \n train += [pixels]\n train_labels.append(rel_list)\n mask = []\n vector = []\n\n for i, rel_name in enumerate(rel_list):\n\n if rel_name != \"unlabelled\":\n vector.append(self.groups_rel[i].index(rel_name))\n mask.append(1)\n else:\n vector.append(0)\n mask.append(0)\n\n train_masks.append(mask)\n train_vectors.append(vector)\n\n train_object_vectors.append([])\n train_object_vector_masks.append([])\n\n for idx in [ref_idx, target_idx]:\n color = scene_objs[idx]['color']\n shape = scene_objs[idx]['shape']\n train_object_vectors[-1].append([object_colors.index(color),\\\n object_shapes.index(shape)])\n train_object_vector_masks[-1].append([1, 1])\n \n if self.include_eef:\n train_eefs.append(eef_pose)\n \n elif file_name in test_files:\n \n test += [pixels]\n test_labels.append(rel_list)\n mask = []\n vector = []\n\n for i, rel_name in enumerate(rel_list):\n\n if rel_name != \"unlabelled\":\n vector.append(self.groups_rel[i].index(rel_name))\n mask.append(1)\n else:\n vector.append(0)\n mask.append(0)\n \n test_masks.append(mask)\n test_vectors.append(vector)\n\n test_object_vectors.append([])\n test_object_vector_masks.append([])\n\n for idx in [ref_idx, target_idx]:\n color = scene_objs[idx]['color']\n shape = scene_objs[idx]['shape']\n test_object_vectors[-1].append([object_colors.index(color),\\\n object_shapes.index(shape)])\n test_object_vector_masks[-1].append([1, 1])\n\n if self.include_eef: \n test_eefs.append(eef_pose)\n\n if self.mask_mode == 'segmenting':\n path = osp.join(folder_name, demonstration, 'segmented_masks.npy')\n np.save(path, masks_output_array)\n \n train = np.array(train, dtype=np.float32)\n train_labels = np.array(train_labels)\n train_vectors = np.array(train_vectors)\n train_masks = np.array(train_masks)\n train_object_vectors = np.array(train_object_vectors)\n train_object_vector_masks = np.array(train_object_vector_masks)\n train_eefs = np.array(train_eefs)\n\n test = np.array(test, dtype=np.float32)\n test_labels = np.array(test_labels)\n test_vectors = np.array(test_vectors)\n test_masks = np.array(test_masks)\n test_object_vectors = np.array(test_object_vectors)\n test_object_vector_masks = np.array(test_object_vector_masks)\n test_eefs = np.array(test_eefs)\n\n print(train.shape)\n print(test.shape)\n \n unseen = np.array(unseen, dtype=np.float32)\n unseen_labels = np.array(unseen_labels)\n unseen_vectors = np.array(unseen_vectors)\n unseen_masks = np.array(unseen_masks)\n unseen_object_vectors = np.array(unseen_object_vectors)\n unseen_object_vector_masks = np.array(unseen_object_vector_masks)\n\n train = train.reshape([len(train)] + data_dimensions)\n test = test.reshape([len(test)] + data_dimensions)\n # unseen = unseen.reshape([len(unseen)] + data_dimensions)\n train = np.swapaxes(train, 1 ,3)\n test = np.swapaxes(test, 1 ,3)\n if unseen != []:\n unseen = np.swapaxes(unseen, 2 ,4)\n\n if self.include_eef:\n train_concat = TupleDataset(train, train_vectors, train_masks, \\\n train_object_vectors, train_object_vector_masks, train_eefs)\n else: \n train_concat = TupleDataset(train, train_vectors, train_masks, \\\n train_object_vectors, train_object_vector_masks)\n if self.include_eef: \n test_concat = TupleDataset(test, test_vectors, test_masks, \\\n test_object_vectors, test_object_vector_masks, test_eefs)\n else: \n test_concat = TupleDataset(test, test_vectors, test_masks, \\\n test_object_vectors, test_object_vector_masks)\n unseen_concat = TupleDataset(unseen, unseen_vectors, unseen_masks, \\\n unseen_object_vectors, unseen_object_vector_masks)\n\n result = []\n result.append(train)\n result.append(train_labels)\n result.append(train_concat)\n result.append(train_vectors)\n\n result.append(test)\n result.append(test_labels)\n result.append(test_concat)\n result.append(test_vectors)\n\n result.append(unseen)\n result.append(unseen_labels)\n result.append(unseen_concat)\n result.append(unseen_vectors)\n\n result.append(self.groups_obj)\n result.append(self.groups_rel)\n\n\n # for i,x in enumerate(test_concat[:]):\n\n # image = x[0]\n # image = np.swapaxes(image, 0 ,2)\n\n # bgr = image[...,:3]\n # bg = image[...,4]\n # mask_obj_ref = image[...,5]\n # mask_obj_tar = image[...,6]\n\n # mask = x[2]\n # vector = x[1]\n\n # object_vectors = x[3]\n # object_vector_masks = x[4]\n\n # print(\"Labels\", list(test_labels[i]))\n\n # # print(\"Masks\", mask)\n # # print(\"Vectors\", vector)\n\n # print(\"Object vectors\", object_vectors)\n # # print(\"Object vector masks\", object_vector_masks)\n\n # # cv2.imshow(\"bg\", (bg*255).astype(np.uint8))\n\n # cv2.imshow(\"ref\", (mask_obj_ref*255).astype(np.uint8))\n # cv2.imshow(\"tar\", (mask_obj_tar*255).astype(np.uint8))\n # cv2.imshow(\"bgr\", (bgr*255).astype(np.uint8))\n \n # # if (mask_obj_ref == mask_obj_tar).all():\n # # cv2.imshow(\"diff\", (mask_obj_ref != mask_obj_tar).astype(np.uint8) * 255)\n # cv2.waitKey()\n\n\n return result\n\ndef plot_xyz(branch_0, branch_1, labels, vectors):\n\n xs = xyz_points[:,:,0][::5]\n ys = xyz_points[:,:,1][::5]\n zs = xyz_points[:,:,2][::5]\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n\n ax.scatter(xs, ys, zs, c='c')\n \n ax.set_xlabel('X', fontsize='20', fontweight=\"bold\")\n ax.set_xlim(-1, 1)\n ax.set_ylabel('Y', fontsize='20', fontweight=\"bold\")\n ax.set_ylim(-1, 1)\n ax.set_zlabel('Z', fontsize='20', fontweight=\"bold\")\n ax.set_zlim(0, 1)\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n BASE_DIR = \"yordan_experiments_3\"\n # folder_names = [osp.join(BASE_DIR, 'off-on'), \n # osp.join(BASE_DIR, 'nonfacing-facing'), \n # osp.join(BASE_DIR, 'out-in')]\n folder_names = [osp.join(BASE_DIR, 'off-on'), \n osp.join(BASE_DIR, 'out-in')]\n # folder_names = ['yordan_experiments/nonfacing-facing', 'yordan_experiments/out-in']\n data_generator = DataGenerator(folder_names=folder_names)\n result = data_generator.generate_dataset()\n\n # folder_names = ['outputs_test/left-right_no_no/' + str(i) for i in range(0,5)]\n # data_generator = DataGenerator(folder_names=folder_names)\n # result = data_generator.generate_dataset(ignore=['train'])"
] | [
[
"numpy.swapaxes",
"numpy.take",
"numpy.random.seed",
"numpy.arange",
"numpy.linalg.norm",
"numpy.save",
"numpy.concatenate",
"numpy.max",
"numpy.transpose",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mendes1302/Data_Science | [
"1d364ab2bf1f68f62bb2d9453d2897fef3f03484"
] | [
"View_data/graph_of_bar.py"
] | [
"from matplotlib import pyplot as plt\n\nmovies = [\"Annie Hall\", \"Ben-Hur\", \"casablanca\", \"Ganndhi\", \"West Side Story\"]\nnum_oscars = [5, 11, 3, 8, 10]\n\n\nxl = list()\nfor i, _ in enumerate(movies):\n xl.append(i+0.1)\n\nplt.bar(xl, num_oscars)\nplt.ylabel(\"# de Premiações\")\nplt.title(\"Meus Filmes Favoritos\")\nplt.xticks([i + 0.1 for i, _ in enumerate(movies)], movies)\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.title",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tmatsuzawa/tflow | [
"f47a8cad097f37d4bc7f90bd010ff91f6365ec68"
] | [
"graph.py"
] | [
"'''\nModule for plotting and saving figures\n'''\nimport os, copy\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.pylab as pylab\nimport matplotlib.ticker as ticker\nimport mpl_toolkits.axes_grid as axes_grid\nfrom matplotlib.lines import Line2D\nimport matplotlib.lines as mlines\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.collections import LineCollection\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\nimport mpl_toolkits\nimport seaborn as sns\nfrom cycler import cycler\nfrom skimage import measure\n\n\nimport itertools\nfrom scipy.optimize import curve_fit\nfrom scipy import interpolate\nimport numpy as np\nfrom fractions import Fraction\nfrom math import modf\nimport pickle\nfrom scipy.stats import binned_statistic\nfrom numpy import ma\nimport scipy, seaborn, h5py\n\n# import ilpm.vector as vec\n# comment this and plot_fit_curve if it breaks\nimport tflow.std_func as std_func\n# Suppress warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n\n#Global variables\n#Default color cycle: iterator which gets repeated if all elements were exhausted\n#__color_cycle__ = itertools.cycle(iter(plt.rcParams['axes.prop_cycle'].by_key()['color']))\n__def_colors__ = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']\n__color_cycle__ = itertools.cycle(['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']) #matplotliv v2.0\n__old_color_cycle__ = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k']) #matplotliv classic\n__fontsize__ = 16\n__figsize__ = (8, 8)\ncmap = 'magma'\n\n# See all available arguments in matplotlibrc\nparams = {'figure.figsize': __figsize__,\n 'font.size': __fontsize__, #text\n 'legend.fontsize': 12, # legend\n 'axes.labelsize': __fontsize__, # axes\n 'axes.titlesize': __fontsize__,\n 'xtick.labelsize': __fontsize__, # tick\n 'ytick.labelsize': __fontsize__,\n 'lines.linewidth': 5}\n\n\n## Save a figure\ndef save(path, ext='pdf', close=False, verbose=True, fignum=None, dpi=None, overwrite=True, tight_layout=False,\n savedata=True, transparent=True, bkgcolor='w', **kwargs):\n \"\"\"\n Save a figure from pyplot\n\n Parameters\n ----------\n path: string\n The path (and filename, without the extension) to save the\n figure to.\n ext: string (default='png')\n The file extension. This must be supported by the active\n matplotlib backend (see matplotlib.backends module). Most\n backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.\n ext : string (default='png')\n The file extension. This must be supported by the active\n matplotlib backend (see matplotlib.backends module). Most\n backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.\n close : boolean (default=True)\n Whether to close the figure after saving. If you want to save\n the figure multiple times (e.g., to multiple formats), you\n should NOT close it in between saves or you will have to\n re-plot it.\n verbose : boolean (default=True)\n Whether to print information about when and where the image\n has been saved.\n fignum\n dpi\n overwrite\n tight_layout\n savedata\n transparent\n bkgcolor\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if fignum == None:\n fig = plt.gcf()\n else:\n fig = plt.figure(fignum)\n if dpi is None:\n dpi = fig.dpi\n\n if tight_layout:\n fig.tight_layout()\n\n # Separate a directory and a filename from the given path\n directory = os.path.split(path)[0]\n filename = \"%s.%s\" % (os.path.split(path)[1], ext)\n if directory == '':\n directory = '.'\n # If the directory does not exist, create it\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # path where the figure is saved\n savepath = os.path.join(directory, filename)\n # if a figure already exists AND you'd like to overwrite, name a figure differently\n ver_no = 0\n while os.path.exists(savepath) and not overwrite:\n # this needs to be fixed. right now, it keeps saving to _000.png\n savepath = directory + '/' + os.path.split(path)[1]+ '_%03d.' % ver_no + ext\n ver_no += 1\n\n if verbose:\n print((\"Saving figure to '%s'...\" % savepath))\n\n # Save the figure\n # if transparent: bkgcolor=None\n plt.savefig(savepath, dpi=dpi, facecolor=bkgcolor,transparent=transparent, **kwargs)\n\n # Save fig instance... This may fail for python2\n if savedata:\n try:\n pickle.dump(fig, open(savepath[:-len(ext)-1] + '_fig.pkl', 'wb'))\n except:\n print('... Could not save a fig instance')\n\n # Close it\n if close:\n plt.close(fignum)\n\n if verbose:\n print(\"... Done\")\n\n\n## Create a figure and axes\ndefault_custom_cycler = {'color': ['r', 'b', 'g', 'y'],\n 'linestyle': ['-', '-', '-', '-'],\n 'linewidth': [3, 3, 3, 3],\n 'marker': ['o', 'o', 'o', 'o'],\n 's': [0,0,0,0]}\n\ndef set_fig(fignum, subplot=111, dpi=100, figsize=None,\n custom_cycler=False, custom_cycler_dict=default_custom_cycler, # advanced features to change a plotting style\n **kwargs):\n \"\"\"\n Returns Figure and Axes instances\n ... a short sniplet for\n plt.figure(fignum, dpi=dpi, figsize=figsize)\n plt.subplot(subplot, **kwargs)\n\n Parameters\n ----------\n fignum: int, figure number\n subplot: int, A 3-digit integer. The digits are interpreted as if given separately as three single-digit integers, i.e. fig.add_subplot(235) is the same as fig.add_subplot(2, 3, 5). Note that this can only be used if there are no more than 9 subplots.\n dpi: int,\n figsize: tuple, figure size\n custom_cycler: bool, If True, it enables users to customize a plot style (color cycle, marker cycle, linewidth cycle etc.)\n ... The customized cycler could be passed to custom_cycler_dict.\n custom_cycler_dict: dict, A summary of a plotting style.\n ... E.g.- default_custom_cycler = {'color': ['r', 'b', 'g', 'y'],\n 'linestyle': ['-', '-', '-', '-'],\n 'linewidth': [3, 3, 3, 3],\n 'marker': ['o', 'o', 'o', 'o'],\n 's': [0,0,0,0]}\n ... The dictionary is turned into a list of cyclers, and passed to ax.set_prop_cycle(custom_cycler).\n\n kwargs: Visit plt.subplot(**kwargs) for available kwargs\n\n Returns\n -------\n fig: Figure instance\n ax: Axes instance\n \"\"\"\n if fignum == -1:\n if figsize is not None:\n fig = plt.figure(dpi=dpi, figsize=figsize)\n else:\n fig = plt.figure(dpi=dpi)\n if fignum == 0:\n fig = plt.cla() #clear axis\n if fignum > 0:\n if figsize is not None:\n fig = plt.figure(num=fignum, dpi=dpi, figsize=figsize)\n fig.set_size_inches(figsize[0], figsize[1])\n else:\n fig = plt.figure(num=fignum, dpi=dpi)\n fig.set_dpi(dpi)\n if subplot is None:\n subplot = 111\n # >=matplotlib 3.4: fig.add_subplot() ALWAYS creates a new axes instance\n # <matplotlib 3.4: fig.add_subplot() returns an existing Axes instance if it existed\n # ax = fig.add_subplot(subplot, **kwargs, )\n # >matplotlib 3.4 plt.suplot() continues to reuse an existing Axes with a matching subplot spec and equal kwargs.\n ax = plt.subplot(subplot, **kwargs, )\n\n if custom_cycler:\n apply_custom_cyclers(ax, **custom_cycler_dict)\n\n return fig, ax\n\n\ndef plotfunc(func, x, param, fignum=1, subplot=111, ax = None, label=None, color=None, linestyle='-', legend=False, figsize=None, **kwargs):\n \"\"\"\n plot a graph using the function fun\n fignum can be specified\n any kwargs from plot can be passed\n Use the homemade function refresh() to draw and plot the figure, no matter the way python is called (terminal, script, notebook)\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n\n # y = func(x, a, b)\n if len(param)==1:\n a=param[0]\n y = func(x, a)\n if len(param) == 2:\n a, b = param[0], param[1]\n y = func(x, a, b)\n if len(param) == 3:\n a, b, c = param[0], param[1], param[2]\n y = func(x, a, b, c)\n if len(param) == 4:\n a, b, c, d = param[0], param[1], param[2], param[3]\n y = func(x, a, b, c, d)\n if not color==None:\n ax.plot(x, y, color=color, linestyle=linestyle, label=label, **kwargs)\n else:\n ax.plot(x, y, label=label, linestyle=linestyle, **kwargs)\n if legend:\n ax.legend()\n return fig, ax\n\ndef plot(x, y=None, fmt='-', fignum=1, figsize=None, label='', color=None, subplot=None, legend=False,\n fig=None, ax=None, maskon=False, thd=1, xmin=None, xmax=None,\n set_bottom_zero=False, symmetric=False, #y-axis\n set_left_zero=False,\n smooth=False, smoothlog=False, window_len=5, window='hanning',\n custom_cycler=None, custom_cycler_dict=default_custom_cycler,\n return_xy=False, **kwargs):\n \"\"\"\n plot a graph using given x,y\n fignum can be specified\n any kwargs from plot can be passed\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n if custom_cycler:\n apply_custom_cyclers(ax, **custom_cycler_dict)\n\n if y is None:\n y = copy.deepcopy(x)\n x = np.arange(len(x))\n # Make sure x and y are np.array\n x, y = np.asarray(x), np.asarray(y)\n\n if len(x) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n elif len(y) > len(x):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(x)]\n\n # remove nans\n keep = ~np.isnan(x) * ~np.isnan(y)\n x, y = x[keep], y[keep]\n\n if maskon:\n keep = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n keep = [True] * len(x)\n if xmax is not None:\n keep *= x < xmax\n if xmin is not None:\n keep *= x >= xmin\n\n if smooth:\n x2plot = x[keep]\n y2plot = smooth1d(y[keep], window_len=window_len, window=window)\n elif smoothlog:\n x2plot = x[keep]\n try:\n logy2plot = smooth1d(np.log10(y[keep]), window_len=window_len, window=window)\n y2plot = 10**logy2plot\n except:\n y2plot = y[keep]\n else:\n x2plot, y2plot = x[keep], y[keep]\n if color is None:\n ax.plot(x2plot, y2plot, fmt, label=label, **kwargs)\n else:\n ax.plot(x2plot, y2plot, fmt, color=color, label=label, **kwargs)\n\n if legend:\n ax.legend()\n\n if set_bottom_zero:\n ax.set_ylim(bottom=0)\n if set_left_zero:\n ax.set_xlim(left=0)\n if symmetric:\n ymin, ymax = ax.get_ylim()\n yabs = np.abs(max(-ymin, ymax))\n ax.set_ylim(-yabs, yabs)\n if return_xy:\n return fig, ax, x2plot, y2plot\n else:\n return fig, ax\n\n\ndef plot_multicolor(x, y=None, colored_by=None, cmap='viridis',\n fignum=1, figsize=None,\n subplot=None,\n fig=None, ax=None, maskon=False, thd=1,\n linewidth=2, vmin=None, vmax=None, **kwargs):\n \"\"\"\n plot a graph using given x,y\n fignum can be specified\n any kwargs from plot can be passed\n\n org source: https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/multicolored_line.html\n \"\"\"\n\n if vmin is None:\n vmin = np.nanmin(colored_by)\n if vmax is None:\n vmax = np.nanmax(colored_by)\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n if y is None:\n y = copy.deepcopy(x)\n # x = np.arange(len(x))\n # Make sure x and y are np.array\n x, y = np.asarray(x), np.asarray(y)\n\n if colored_by is None:\n print('... colored_by is None. Pass a list/array by which line segments are colored. Using x instead...')\n colored_by = x\n\n if len(x) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n elif len(y) > len(x):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(x)]\n if maskon:\n mask = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n mask = [True] * len(x)\n\n x, y, colored_by = x[mask], y[mask], colored_by[mask]\n # Create a set of line segments so that we can color them individually\n # This creates the points as a N x 1 x 2 array so that we can stack points\n # together easily to get the segments. The segments array for line collection\n # needs to be (numlines) x (points per line) x 2 (for x and y)\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n norm = plt.Normalize(vmin, vmax)\n lc = LineCollection(segments, cmap=cmap, norm=norm)\n\n # Set the values used for colormapping\n lc.set_array(colored_by)\n lc.set_linewidth(linewidth)\n line = ax.add_collection(lc)\n\n # autoscale does not work for collection => manually set x/y limits\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n return fig, ax\n\n\n\ndef plot_with_varying_alphas(x, y=None, color=next(__color_cycle__), alphas=None,\n fignum=1, figsize=None,\n subplot=None,\n fig=None, ax=None,\n xmin=None, xmax=None,\n maskon=False, thd=1, # Filter out erroneous data by threasholding\n linewidth=2, **kwargs):\n \"\"\"\n Plots a curve with varying alphas (e.g. fading curves)\n ... plt.plot(x, y, alpha=alpha) does not allow varying alpha.\n ... A workaround for this is to use LineCollection. i.e. create lines for each segment, then assign different alpha values\n\n Parameters\n ----------\n x\n y\n color: color of the line\n alphas: list/array with the same length as x and y\n ... default: alphas = 1 - np.linspace(0, 1, len(x)) (linearly fade)\n\n Parameters\n ----------\n x\n y\n color\n alphas\n fignum\n figsize\n subplot\n fig\n ax\n xmin\n xmax\n maskon\n thd\n linewidth\n kwargs\n\n Returns\n -------\n\n \"\"\"\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n if y is None:\n y = copy.deepcopy(x)\n # x = np.arange(len(x))\n if alphas is None:\n alphas = 1 - np.linspace(0, 1, len(x)) # default alphas\n alphas[alphas < 0] = 0\n alphas[alphas > 1] = 1\n alphas[np.isnan(alphas)] = 0\n\n # Make sure x and y are np.array\n x, y, alphas = np.asarray(x), np.asarray(y), np.asarray(alphas)\n\n if len(x) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n elif len(y) > len(x):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(x)]\n if maskon:\n mask = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n mask = [True] * len(x)\n if xmin is not None:\n cond = x > xmin\n mask = mask * cond\n if xmax is not None:\n cond = x < xmax\n mask = mask * cond\n\n\n x, y, alphas= x[mask], y[mask], alphas[mask]\n # Create a set of line segments so that we can color them individually\n # This creates the points as a N x 1 x 2 array so that we can stack points\n # together easily to get the segments. The segments array for line collection\n # needs to be (numlines) x (points per line) x 2 (for x and y)\n points = np.array([x, y]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n # Get RGBA values of the specified color\n if type(color) == str:\n try:\n rgb = hex2rgb(cname2hex(color))\n except:\n rgb = hex2rgb(color) # Returned values are [0-255, 0-255, 0-255]\n rgba = np.append(rgb/255, 1).astype(float) # RGBA values must be between 0-1\n # Prepare an array to specify a color for each segment\n colors = np.tile(rgba, (len(x), 1))\n elif type(color) in [tuple, list, np.array]:\n if len(color) == 3:\n colors = np.tile(np.append(color, 1), (len(x), 1))\n elif len(color) == 4:\n colors = np.tile(color, (len(x), 1))\n else:\n raise ValueError('plot_with_varying_alphas: color must be a tuple/list/1d array with 3 or 4 elements (rgb or rgba)')\n # Insert the alphas specified by users\n colors[:, -1] = alphas\n # Create a line collection instead of a single line\n lc = LineCollection(segments, color=colors)\n\n lc.set_linewidth(linewidth)\n lines = ax.add_collection(lc)\n\n # autoscale does not work for collection => manually set x/y limits\n ax.set_xlim(x.min(), x.max())\n ax.set_ylim(y.min(), y.max())\n\n return fig, ax\n\ndef plot_with_arrows(x, y=None, fignum=1, figsize=None, label='', color=None, subplot=None, legend=False, fig=None, ax=None, maskon=False, thd=1, **kwargs):\n \"\"\"\n Add doc later\n Parameters\n ----------\n x\n y\n fignum\n figsize\n label\n color\n subplot\n legend\n fig\n ax\n maskon\n thd\n kwargs\n\n Returns\n -------\n\n \"\"\"\n fig, ax = plot(x, **kwargs)\n lines = ax.get_lines()\n for i, line in enumerate(lines):\n add_arrow_to_line(ax, line)\n return fig, ax\n\n\ndef plot3d(x, y, z, fignum=1, figsize=None, label='', color=None, subplot=None, fig=None,\n ax=None, labelaxes=True, **kwargs):\n \"\"\"\n plot a 3D graph using given x, y, z\n \"\"\"\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize, projection='3d')\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n # Make sure x and y are np.array\n x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)\n\n if not len(x)==len(y)==len(z):\n raise ValueError('... x, y, z do not have the same length.')\n\n # # color=color,\n # color=color,\n # )\n if color is None:\n line, = ax.plot(x, y, z, label=label, **kwargs)\n else:\n line, = ax.plot(x, y, z, color=color, label=label, **kwargs)\n if labelaxes:\n ax.set_xlabel('x (mm)')\n ax.set_ylabel('y (mm)')\n ax.set_zlabel('z (mm)')\n\n set_axes_equal(ax)\n return fig, ax\n\n\n\ndef plot_surface(x, y, z, shade=True, fig=None, ax=None, fignum=1, subplot=None, figsize=None,\n azdeg=0, altdeg=65):\n \"\"\"\n plot_surface for the graph module\n ... By default, it enables the shading feature\n\n Source: https://stackoverflow.com/questions/28232879/phong-shading-for-shiny-python-3d-surface-plots/31754643\n\n Parameters\n ----------\n x\n y\n z\n shade\n fignum\n subplot\n figsize\n\n Returns\n -------\n\n \"\"\"\n # example\n # x, y = np.mgrid[-3:3:100j,-3:3:100j]\n # z = 3*(1 - x)**2 * np.exp(-x**2 - (y + 1)**2) - 10*(x/5 - x**3 - y**5)*np.exp(-x**2 - y**2) - 1./3*np.exp(-(x + 1)**2 - y**2)\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize, projection='3d')\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n # Create light source object.\n ls = mpl.colors.LightSource(azdeg=azdeg, altdeg=altdeg)\n if shade:\n # Shade data, creating an rgb array.\n rgb = ls.shade(z, plt.cm.RdYlBu)\n else:\n rgb = None\n surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0,\n antialiased=False, facecolors=rgb)\n return fig, ax, surf\n\n\ndef plot_isosurface(qty, isovalue, xxx, yyy, zzz, cmap='Spectral',\n r=None, xc=0, yc=0, zc=0, fill_value=0,\n fignum=1, subplot=None, lw=1,\n figsize=(8, 8), labelaxes=True, **kwargs):\n \"\"\"\n Plots a isosurface given a 3D data, value at which isosurface is defined, 3D grids in Cartesian coordinates)\n ... the isosurface is extracted by the marching cube algorithm\n\n Parameters\n ----------\n qty: 3D array, a scalar field\n isovalue: float, the value at which the isosurface is extracted\n xxx: 3D array, x component of the positional grid\n yyy: 3D array, y component of the positional grid\n zzz: 3D array, z component of the positional grid\n cmap: str, name of the color map used to plot the isosurface\n r: float,\n ... If provided, it treats the qty[R > r] = fill_value where R = np.sqrt(xx**2 + yy**2 + zz**2)\n ... This is used for a simple fitering the problematic outliers near the boundaries\n xc: float/int, x-coordinate of the origin in case r is not None\n yc: float/int, y-coordinate of the origin in case r is not None\n zc: float/int, z-coordinate of the origin in case r is not None\n fill_value: float/int\n ... If r is not None, it sets qty[R > r] = fill_value where R = np.sqrt(xx**2 + yy**2 + zz**2)\n fignum: int, figure number (>=1)\n subplot: int, e.g. 121, 111, 331, default=None\n ... the three digit notaton for the matplotlib\n figsize: tuple, figure size in inches e.g.- (8, 8)\n labelaxes: bool, default True\n ... If True, it labels each axis as x(mm), y(mm), z(mm)\n\n Returns\n -------\n fig, ax: matplotlib.figure.Figure instance, matplotlib.axes.Axes instance,\n \"\"\"\n def get_grid_spacing(xx, yy, zz=None):\n dim = len(xx.shape)\n if dim == 2:\n dx = np.abs(xx[0, 1] - xx[0, 0])\n dy = np.abs(yy[1, 0] - yy[0, 0])\n return dx, dy\n elif dim == 3:\n dx = np.abs(xx[0, 1, 0] - xx[0, 0, 0])\n dy = np.abs(yy[1, 0, 0] - yy[0, 0, 0])\n dz = np.abs(zz[0, 0, 1] - zz[0, 0, 0])\n return dx, dy, dz\n\n def cart2sph(x, y, z):\n \"\"\"\n Transformation: cartesian to spherical\n z = r cos theta\n y = r sin theta sin phi\n x = r sin theta cos phi\n\n Parameters\n ----------\n x: array / float /int\n y: array / float /int\n z: array / float /int\n\n Returns\n -------\n r: radial distance\n theta: polar angle [-pi/2, pi/2] (angle from the z-axis)\n phi: azimuthal angle [-pi, pi] (angle on the x-y plane)\n\n \"\"\"\n # hxy = np.hypot(x, y)\n # r = np.hypot(hxy, z)\n # theta = np.arctan2(z, hxy)\n # phi = np.arctan2(y, x)\n r = np.sqrt(x ** 2 + y ** 2 + z ** 2)\n theta = np.arccos(z / r)\n phi = np.arctan2(y, x)\n return r, theta, phi\n\n if np.sum(np.isnan(qty)) > 0:\n raise ValueError(\n 'plot_isosurface: qty contains np.nan. skimage.measure.marching_cubes_lewiner does not work with nans.')\n dx, dy, dz = get_grid_spacing(xxx, yyy, zzz)\n\n qty_ = copy.deepcopy(qty)\n\n if r is not None:\n rrr, tttheta, ppphi = cart2sph(xxx - xc, yyy - yc, zzz - zc)\n qty_[rrr > r] = fill_value\n verts, faces, normals, vals = measure.marching_cubes_lewiner(qty_, isovalue, spacing=(dy, dx, dz))\n\n verts[:, 0] += np.min(yyy)\n verts[:, 1] += np.min(xxx)\n verts[:, 2] += np.min(zzz)\n\n fig, ax = set_fig(fignum, subplot, figsize=figsize, projection='3d')\n ax.plot_trisurf(verts[:, 0], verts[:, 1], faces, verts[:, 2],\n cmap=cmap, lw=lw, **kwargs)\n set_axes_equal(ax)\n if labelaxes:\n ax.set_xlabel('$x~(mm)$')\n ax.set_ylabel('$y~(mm)$')\n ax.set_zlabel('$z~(mm)$')\n return fig, ax\n\ndef plot_spline(x_, y_, order=3,\n fignum=1, figsize=None, subplot=None,\n fig=None, ax=None, log=False,\n label='', color=None, legend=False,\n maskon=False, thd=1., **kwargs):\n \"\"\"\n Plots a spline representation of a curve (x against y)\n\n Parameters\n ----------\n x: 1d array-like\n y: 1d array-like\n order: int, order of spline interpolation\n fignum: int, figure number, default=1\n figsize: tuple, figure size e.g. (8, 8) in inch\n subplot# int, e.g.- 121- matplotlib shorthand notation\n fig: matplotlib.figure.Figure instance, default: None\n ax: matplotlib.axes.Axes instance, default: None\n ... If passed, this function plots a curve on the given ax.\n label: str, label of the curve\n color: str, color e.g.- 'r' for red, 'b' for blue. Consult mpl website for the full color code.\n legend: bool, If True, ax.legend() is called.\n maskon: bool, If True, it uses get_mask4erroneous_pts() to spot potentially erroneous values, and hides them.\n thd: float, This argument is only relevant if maskon=True. This is a parameter which controls the tolerance of the jumpiness of hte plot.\n ... The higher thd is, the less inputs gets hide.\n kwargs: dict, The other keyword arguments gets passed to ax.plot()\n\n Returns\n -------\n fig, ax: matplotlib.figure.Figure instance, matplotlib.axes.Axes instance\n \"\"\"\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n if log:\n x = np.log10(copy.deepcopy(x_))\n y = np.log10(copy.deepcopy(y_))\n else:\n x, y = x_, y_\n\n if y is None:\n y = copy.deepcopy(x)\n x = np.arange(len(x))\n # Make sure x and y are np.array\n x, y = np.asarray(x), np.asarray(y)\n\n if len(x) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n elif len(y) > len(x):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(x)]\n cond1, cond2 = ~np.isnan(x), ~np.isnan(y)\n keep = cond1 * cond2\n x, y = x[keep], y[keep]\n\n\n try:\n if maskon:\n mask = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n mask1 = ~np.isnan(x)\n mask2 = ~np.isnan(y)\n mask = mask1 * mask2\n spl_func = interpolate.UnivariateSpline(x[mask], y[mask], k=order)\n except:\n x, y, yerr = get_binned_stats(x, y, n_bins=len(x))\n if maskon:\n mask = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n mask = [True] * len(x)\n spl_func = interpolate.UnivariateSpline(x[mask], y[mask], k=order)\n x2plot = np.linspace(np.nanmin(x), np.nanmax(x), 1000)\n y2plot = spl_func(x2plot)\n\n if log:\n x2plot, y2plot = 10**x2plot, 10**y2plot\n\n if color is None:\n ax.plot(x2plot, y2plot, label=label, **kwargs)\n else:\n ax.plot(x2plot, y2plot, color=color, label=label, **kwargs)\n\n if legend:\n ax.legend()\n return fig, ax\n\ndef plot_date(dates, y,\n fignum=1, figsize=None, label='', color=None, subplot=None, legend=False,\n fig=None, ax=None, set_bottom_zero=False, **kwargs):\n \"\"\"\n A function to plot values against dates with format \"2020-01-01\"\n\n Parameters\n ----------\n dates: 1d array-like of dates- each entry must be in the format \"YYYY-MM-DD\"\n y: 1d array-like\n fignum: int, fignure number\n figsize: tuple, figure size e.g.- (8, 8)\n label: label kwarg in plt.plot_date\n color: str, color kwarg in plt.plot_date\n subplot: int, 3-digit notation to specify a subplot\n legend: bool\n fig: mpl.figure.Figure instance- if given, it will just return this instance at the end\n ax: mpl.axes.Axes instance- if given, it plots the given inputs on this subplot.\n set_bottom_zero: bool, if True, it sets the ymin=0\n kwargs: the keyword arguments will be passed to plt.plot_date()\n\n Returns\n -------\n fig, ax: matplotlib.figure.Figure instance, matplotlib.axes.Axes instance\n\n \"\"\"\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n # Make sure x and y are np.array\n if len(dates) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n dates = dates[:len(y)]\n elif len(y) > len(dates):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(dates)]\n\n # remove nans\n keep = ~np.isnan(dates) * ~np.isnan(y)\n dates, y = dates[keep], y[keep]\n\n ax.plot_date(dates, y, label=label, color=color, **kwargs)\n if legend:\n ax.legend()\n\n if set_bottom_zero:\n ax.set_ylim(bottom=0)\n return fig, ax\n\n\ndef pie(sizes, labels=None, explode=None, autopct='%1.1f%%', startangle=90, shadow=False, sort=True,\n fignum=1, figsize=None, subplot=None,\n fig=None, ax=None, **kwargs):\n \"\"\"\n A wrapper for plt.pie\n ... a main difference from the original plt.plot is the sorting feature. It automatically sorts the portions from the largest to smallest.\n ... If one\n \"\"\"\n\n def sort_n_arrays_using_order_of_first_array(list_of_arrays, element_dtype=tuple):\n \"\"\"\n Sort a list of N arrays by the order of the first array in the list\n e.g. a=[2,1,3], b=[1,9,8], c=['a', 'b', 'c']\n [a, b, c] -> [(1, 2, 3), (9, 1, 8), ('b', 'a', 'c')]\n\n Parameters\n ----------\n list_of_arrays: a list of lists/1D-arrays\n element_dtype: data type, default: tuple\n ... This argument specifies the data type of the elements in the returned list\n ... The default data type of the element is tuple because this functon utilizes sorted(zip(...))\n ... E.g. element_dtype=np.ndarray\n -> [a, b, c] -> [np.array([1, 2, 3]),\n np.array([9, 1, 8],\n np.array(['b', 'a', 'c'], dtype='<U1']\n\n Returns\n -------\n list_of_sorted_arrays: list of sorted lists/1D arrays\n\n \"\"\"\n\n list_of_sorted_arrays = list(zip(*sorted(zip(*list_of_arrays))))\n if element_dtype == list:\n list_of_sorted_arrays = [list(a) for a in list_of_sorted_arrays]\n elif element_dtype == np.ndarray:\n list_of_sorted_arrays = [np.asarray(a) for a in list_of_sorted_arrays]\n\n return list_of_sorted_arrays\n\n if sort:\n if explode is None:\n explode = [0] * len(sizes)\n if labels is None:\n labels_dummy = [''] * len(sizes)\n sizes, labels_dummy, explode = sort_n_arrays_using_order_of_first_array([sizes, labels_dummy, explode])\n else:\n sizes, labels, explode = sort_n_arrays_using_order_of_first_array([sizes, labels, explode])\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n ax.pie(sizes, explode=explode, labels=labels, autopct=autopct,\n shadow=shadow, startangle=startangle, **kwargs)\n ax.axis('equal')\n\n return fig, ax\n\n\ndef plot_saddoughi(fignum=1, fig=None, ax=None, figsize=None,\n # label='Re$_{\\lambda} \\\\approx 600 $ \\n Saddoughi and Veeravalli, 1994',\n label='Re$_{\\lambda} \\\\approx 600 $ \\n SV, 1994',\n color='k', alpha=0.6, subplot=None, cc=1, legend=False, **kwargs):\n \"\"\"\n plot universal 1d energy spectrum (Saddoughi, 1992)\n\n E(k)=C epsilon^(2/3)k^(-5/3), E11(k)=C1 epsilon^(2/3)k^(-5/3)\n # # In iso, homo, turbulence, C1 = 18/55 C. (Pope 6.242)\n # c = 1.6\n # c1 = 18. / 55. * c\n\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n x = np.asarray([1.27151, 0.554731, 0.21884, 0.139643, 0.0648844, 0.0198547, 0.00558913, 0.00128828, 0.000676395, 0.000254346])\n y = np.asarray([0.00095661, 0.0581971, 2.84666, 11.283, 59.4552, 381.78, 2695.48, 30341.9, 122983, 728530])\n y *= cc\n ax.plot(x, y, color=color, label=label, alpha=alpha,**kwargs)\n if legend:\n ax.legend()\n tologlog(ax)\n labelaxes(ax, '$\\kappa \\eta$', '$E_{11} / (\\epsilon\\\\nu^5)^{1/4}$')\n return fig, ax\n\ndef plot_saddoughi_struc_func(fignum=1, fig=None, ax=None, figsize=None,\n label='Re$_{\\lambda} \\approx 600 $ \\n Saddoughi and Veeravalli, 1994',\n color='k', alpha=0.6, subplot=None,\n legend=False, **kwargs):\n \"\"\"\n Plots the second order structure function on Saddoughi & Veeravalli, 1994\n\n Parameters\n ----------\n fignum\n fig\n ax\n figsize\n label\n color: str, array-like (1d)\n alpha\n subplot\n legend\n marker: str or list\n ... Unlike the plt.scatter(), this accepts a list for markers.\n A useful feature if one wants to plot with different markers\n kwargs\n\n Returns\n -------\n fig, ax\n \"\"\"\n tflow_dir = os.path.split(os.path.realpath(__file__))[0]\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n datapath = tflow_dir + '/reference_data/sv_struc_func.h5'\n # datapath = tflow_dir + '/velocity_ref/sv_struc_func.txt'\n # data = np.loadtxt(datapath, skiprows=1, delimiter=',')\n # r_scaled, dll = data[:, 0], data[:, 1]\n with h5py.File(datapath, 'r') as ff:\n r_scaled = np.asarray(ff['r_s'])\n dll = np.asarray(ff['dll'])\n ax.plot(r_scaled, dll, alpha=alpha, color=color, label=label, **kwargs)\n if legend:\n ax.legend()\n tosemilogx(ax)\n labelaxes(ax, '$r / \\eta$', '$D_{LL} / (\\epsilon r)^{2/3}$')\n\ndef scatter(x, y, ax=None, fig=None, fignum=1, figsize=None,\n marker='o', fillstyle='full', label=None, subplot=None, legend=False,\n maskon=False, thd=1,\n xmin=None, xmax=None, alpha=1.,\n set_bottom_zero=False, symmetric=False,\n set_left_zero=False,\n **kwargs):\n \"\"\"\n plot a graph using given x,y\n fignum can be specified\n any kwargs from plot can be passed\n Use the homemade function refresh() to draw and plot the figure, no matter the way python is called (terminal, script, notebook)\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n if figsize is not None:\n fig.set_size_inches(figsize)\n\n x, y = np.array(x), np.array(y)\n if len(x.flatten()) > len(y.flatten()):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n\n if maskon:\n keep = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n keep = [True] * len(x)\n if xmax is not None:\n keep *= x < xmax\n if xmin is not None:\n keep *= x >= xmin\n\n if type(marker) == list:\n marker_list = [m for i, m in enumerate(marker) if mask[i]]\n marker = None\n else:\n marker_list = None\n\n if fillstyle =='none':\n # Scatter plot with open markers\n facecolors = 'none'\n if type(alpha) == float or type(alpha) == int:\n # ax.scatter(x, y, color=color, label=label, marker=marker, facecolors=facecolors, edgecolors=edgecolors, **kwargs)\n sc = ax.scatter(x[keep], y[keep], label=label, marker=marker, facecolors=facecolors, alpha=alpha, **kwargs)\n else:\n for i, alpha_ in enumerate(alpha[keep]):\n if i != 0:\n label=None\n sc = ax.scatter(x[keep][i], y[keep][i], label=label, marker=marker, facecolors=facecolors, alpha=alpha_, **kwargs)\n else:\n if type(alpha) == float or type(alpha) == int:\n sc = ax.scatter(x[keep], y[keep], label=label, marker=marker, alpha=alpha, **kwargs)\n else:\n for i, alpha_ in enumerate(alpha[keep]):\n if i != 0:\n label=None\n sc = ax.scatter(x[keep][i], y[keep][i], label=label, marker=marker, alpha=alpha_, **kwargs)\n if legend:\n plt.legend()\n\n if type(marker_list) == list:\n paths = []\n for marker in marker_list:\n if isinstance(marker, mpl.markers.MarkerStyle):\n marker_obj = marker\n else:\n marker_obj = mpl.markers.MarkerStyle(marker)\n path = marker_obj.get_path().transformed(\n marker_obj.get_transform())\n paths.append(path)\n sc.set_paths(paths)\n\n if set_bottom_zero:\n ax.set_ylim(bottom=0)\n if set_left_zero:\n ax.set_xlim(left=0)\n if symmetric:\n xmin, xmax = ax.get_xlim()\n xabs = np.abs(max(-xmin, xmax))\n ymin, ymax = ax.get_ylim()\n yabs = np.abs(max(-ymin, ymax))\n ax.set_xlim(-xabs, xabs)\n ax.set_ylim(-yabs, yabs)\n return fig, ax\n\ndef scatter3d(x, y, z, ax=None, fig=None, fignum=1, figsize=None, marker='o',\n fillstyle='full', label=None, subplot=None, legend=False,\n labelaxes=True, **kwargs):\n \"\"\"\n plot a graph using given x,y\n fignum can be specified\n any kwargs from plot can be passed\n Use the homemade function refresh() to draw and plot the figure, no matter the way python is called (terminal, script, notebook)\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize, projection='3d')\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n x, y, z = np.array(x), np.array(y), np.asarray(z)\n\n if fillstyle =='none':\n # Scatter plot with open markers\n facecolors = 'none'\n # ax.scatter(x, y, color=color, label=label, marker=marker, facecolors=facecolors, edgecolors=edgecolors, **kwargs)\n ax.scatter(x, y, z, label=label, marker=marker, facecolors=facecolors, **kwargs)\n else:\n ax.scatter(x, y, z, label=label, marker=marker, **kwargs)\n if legend:\n plt.legend()\n\n if labelaxes:\n ax.set_xlabel('x (mm)')\n ax.set_ylabel('y (mm)')\n ax.set_zlabel('z (mm)')\n\n set_axes_equal(ax)\n return fig, ax\n\ndef pdf(data, nbins=100, return_data=False, vmax=None, vmin=None,\n fignum=1, figsize=None, subplot=None, density=True, analyze=False, **kwargs):\n \"\"\"\n Plots a probability distribution function of ND data\n ... a wrapper for np.histogram and matplotlib\n ... Returns fig, ax, (optional: bins, hist)\n\n Parameters\n ----------\n data: nd-array, list, or tuple, data used to get a histogram/pdf\n nbins: int, umber of bins\n return_data: bool, If True, it returns fig, ax, bins (centers of the bins), hist (counts or probability density values)\n vmax: float, data[data>vmax] will be ignored during counting.\n vmin: float, data[data<vmin] will be ignored during counting.\n fignum: int, figure number (the argument called \"num\" in matplotlib)\n figsize: tuple, figure size in inch (width x height)\n subplot: int, matplotlib subplot notation. default: 111\n density: bool, If True, it plots the probability density instead of counts.\n analyze: bool If True, it adds mean, mode, variane to the plot.\n kwargs: other kwargs passed to plot() of the velocity module\n\n Returns\n -------\n fig: matplotlib.Figure instance\n ax: matplotlib.axes.Axes instance\n (Optional)\n bins: 1d array, bin centers\n hist: 1d array, probability density vales or counts\n \"\"\"\n def compute_pdf(data, nbins=10, density=density):\n # Get a normalized histogram\n # exclude nans from statistics\n hist, bins = np.histogram(data.flatten()[~np.isnan(data.flatten())], bins=nbins, density=density)\n # len(bins) = len(hist) + 1\n # Get middle points for plotting sake.\n bins1 = np.roll(bins, 1)\n bins = (bins1 + bins) / 2.\n bins = np.delete(bins, 0)\n return bins, hist\n\n data = np.asarray(data)\n\n # Use data where values are between vmin and vmax\n if vmax is not None:\n cond1 = np.asarray(data) < vmax # if nan exists in data, the condition always gives False for that data point\n else:\n cond1 = np.ones(data.shape, dtype=bool)\n if vmin is not None:\n cond2 = np.asarray(data) > vmin\n else:\n cond2 = np.ones(data.shape, dtype=bool)\n data = data[cond1 * cond2]\n\n # compute a pdf\n bins, hist = compute_pdf(data, nbins=nbins)\n fig, ax = plot(bins, hist, fignum=fignum, figsize=figsize, subplot=subplot, **kwargs)\n\n if analyze:\n bin_width = float(bins[1]-bins[0])\n mean = np.nansum(bins * hist * bin_width)\n mode = bins[np.argmax(hist)]\n var = np.nansum(bins**2 * hist * bin_width)\n text2 = 'mean: %.2f' % mean\n text1 = 'mode: %.2f' % mode\n text3 = 'variance: %.2f' % var\n addtext(ax, text=text2, option='tc2')\n addtext(ax, text=text1, option='tc')\n addtext(ax, text=text3, option='tc3')\n\n if not return_data:\n return fig, ax\n else:\n return fig, ax, bins, hist\n\n\ndef cdf(data, nbins=100, return_data=False, vmax=None, vmin=None,\n fignum=1, figsize=None, subplot=None, **kwargs):\n \"\"\"\n Plots a cummulative distribution function of ND data\n ... a wrapper for np.histogram and matplotlib\n ... Returns fig, ax, (optional: bins, hist)\n\n Parameters\n ----------\n data: nd-array, list, or tuple, data used to get a histogram/pdf\n nbins: int, umber of bins\n return_data: bool, If True, it returns fig, ax, bins (centers of the bins), hist (counts or probability density values)\n vmax: float, data[data>vmax] will be ignored during counting.\n vmin: float, data[data<vmin] will be ignored during counting.\n fignum: int, figure number (the argument called \"num\" in matplotlib)\n figsize: tuple, figure size in inch (width x height)\n subplot: int, matplotlib subplot notation. default: 111\n density: bool, If True, it plots the probability density instead of counts.\n analyze: bool If True, it adds mean, mode, variane to the plot.\n kwargs: other kwargs passed to plot() of the velocity module\n\n Returns\n -------\n fig: matplotlib.Figure instance\n ax: matplotlib.axes.Axes instance\n (Optional)\n bins: 1d array, bin centers\n hist: 1d array, probability density vales or counts\n \"\"\"\n def compute_pdf(data, nbins=10):\n # Get a normalized histogram\n # exclude nans from statistics\n pdf, bins = np.histogram(data.flatten()[~np.isnan(data.flatten())], bins=nbins, density=True)\n # len(bins) = len(hist) + 1\n # Get middle points for plotting sake.\n bins1 = np.roll(bins, 1)\n bins = (bins1 + bins) / 2.\n bins = np.delete(bins, 0)\n return bins, pdf\n\n def compute_cdf(data, nbins=10):\n \"\"\"compute cummulative probability distribution of data\"\"\"\n bins, pdf = compute_pdf(data, nbins=nbins)\n cdf = np.cumsum(pdf) * np.diff(bins, prepend=0)\n return bins, cdf\n\n data = np.asarray(data)\n\n # Use data where values are between vmin and vmax\n if vmax is not None:\n cond1 = np.asarray(data) < vmax # if nan exists in data, the condition always gives False for that data point\n else:\n cond1 = np.ones(data.shape, dtype=bool)\n if vmin is not None:\n cond2 = np.asarray(data) > vmin\n else:\n cond2 = np.ones(data.shape, dtype=bool)\n data = data[cond1 * cond2]\n\n # compute a cdf\n bins, cdf = compute_cdf(data, nbins=nbins)\n fig, ax = plot(bins, cdf, fignum=fignum, figsize=figsize, subplot=subplot, **kwargs)\n\n if not return_data:\n return fig, ax\n else:\n return fig, ax, bins, cdf\n\n\ndef errorbar(x, y, xerr=0., yerr=0., fignum=1, marker='o', fillstyle='full', linestyle='None', label=None, mfc='white',\n subplot=None, legend=False, legend_remove_bars=False, figsize=None, maskon=False, thd=1, capsize=10,\n xmax=None, xmin=None, ax=None, **kwargs):\n \"\"\" errorbar plot\n\n Parameters\n ----------\n x : array-like\n y : array-like\n xerr: must be a scalar or numpy array with shape (N,1) or (2, N)... [xerr_left, xerr_right]\n yerr: must be a scalar or numpy array with shape (N,) or (2, N)... [yerr_left, yerr_right]\n fignum\n label\n color\n subplot\n legend\n kwargs\n\n Returns\n -------\n fig\n ax\n\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n # Make sure that xerr and yerr are numpy arrays\n ## x, y, xerr, yerr do not have to be numpy arrays. It is just a convention. - takumi 04/01/2018\n x, y = np.array(x), np.array(y)\n # Make xerr and yerr numpy arrays if they are not scalar. Without this, TypeError would be raised.\n if not (isinstance(xerr, int) or isinstance(xerr, float)):\n xerr = np.array(xerr)\n else:\n xerr = np.ones_like(x) * xerr\n if not (isinstance(yerr, int) or isinstance(yerr, float)):\n yerr = np.array(yerr)\n else:\n yerr = np.ones_like(x) * yerr\n xerr[xerr==0] = np.nan\n yerr[yerr==0] = np.nan\n if maskon:\n keep = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n keep = [True] * len(x)\n if xmax is not None:\n keep *= x < xmax\n if xmin is not None:\n keep *= x >= xmin\n if fillstyle == 'none':\n ax.errorbar(x[keep], y[keep], xerr=xerr[keep], yerr=yerr[keep], marker=marker, mfc=mfc, linestyle=linestyle,\n label=label, capsize=capsize, **kwargs)\n else:\n ax.errorbar(x[keep], y[keep], xerr=xerr[keep], yerr=yerr[keep], marker=marker, fillstyle=fillstyle,\n linestyle=linestyle, label=label, capsize=capsize, **kwargs)\n\n if legend:\n ax.legend()\n\n if legend_remove_bars:\n from matplotlib import container\n handles, labels = ax.get_legend_handles_labels()\n handles = [h[0] if isinstance(h, container.ErrorbarContainer) else h for h in handles]\n return fig, ax\n\ndef errorfill(x, y, yerr, fignum=1, color=None, subplot=None, alpha_fill=0.3, ax=None, label=None,\n legend=False, figsize=None, color_cycle=__color_cycle__, maskon=False, thd=1,\n xmin=None, xmax=None, smooth=False, smoothlog=False, window_len=5, window='hanning',\n set_bottom_zero=False, set_left_zero=False, symmetric=False, return_xy=False,\n **kwargs):\n\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n\n x = np.array(x)\n y = np.array(y)\n\n #ax = ax if ax is not None else plt.gca()\n # if color is None:\n # color = color_cycle.next()\n if maskon:\n keep = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n keep = [True] * len(x)\n if xmax is not None:\n keep *= x < xmax\n if xmin is not None:\n keep *= x >= xmin\n\n mask2removeNans = ~np.isnan(x) * ~np.isnan(y)\n keep = keep * mask2removeNans\n\n if smooth:\n x2plot = x[keep]\n y2plot = smooth1d(y[keep], window_len=window_len, window=window)\n elif smoothlog:\n x2plot = x[keep]\n try:\n logy2plot = smooth1d(np.log10(y[keep]), window_len=window_len, window=window)\n y2plot = 10**logy2plot\n except:\n y2plot = y[keep]\n else:\n x2plot, y2plot = x[keep], y[keep]\n if len(yerr) == len(y):\n ymin = y2plot - yerr[keep]\n ymax = y2plot + yerr[keep]\n elif len(yerr) == 2:\n yerrdown, yerrup = yerr\n ymin = y2plot - yerrdown\n ymax = y2plot + yerrup\n else:\n ymin = y2plot - yerr\n ymax = y2plot + yerr\n\n\n p = ax.plot(x2plot, y2plot, color=color, label=label, **kwargs)\n color = p[0].get_color()\n ax.fill_between(x2plot, ymax, ymin, color=color, alpha=alpha_fill)\n\n #patch used for legend\n color_patch = mpatches.Patch(color=color, label=label)\n if legend:\n plt.legend(handles=[color_patch])\n\n if set_bottom_zero:\n ax.set_ylim(bottom=0)\n if set_left_zero:\n ax.set_xlim(left=0)\n if symmetric:\n ymin, ymax = ax.get_ylim()\n yabs = np.abs(max(-ymin, ymax))\n ax.set_ylim(-yabs, yabs)\n\n if not return_xy:\n return fig, ax, color_patch\n else:\n return fig, ax, color_patch, x2plot, y2plot\n\n\ndef bin_and_errorbar(x_, y_, xerr=None,\n n_bins=100, mode='linear', bin_center=True, return_std=False,\n fignum=1, ax=None, marker='o', fillstyle='full',\n linestyle='None', linewidth=1, label=None, mfc='white',\n subplot=None, legend=False, figsize=None, maskon=False, thd=1, capsize=5,\n set_bottom_zero=False, symmetric=False, #y-axis\n set_left_zero=False,\n return_stats=False, **kwargs):\n \"\"\"\n Takes scattered data points (x, y), bin them (compute avg and std), then plots the results with error bars\n\n Parameters\n ----------\n x : array-like\n y : array-like\n xerr: must be a scalar or numpy array with shape (N,1) or (2, N)... [xerr_left, xerr_right]\n ... if xerr==0, it removes the error bars in x.\n yerr: must be a scalar or numpy array with shape (N,) or (2, N)... [yerr_left, yerr_right]\n n_bins: int, number of bins used to compute a histogram between xmin and xmax\n mode: str, default: 'linear', options are 'linear' and 'log'. Select either linear binning or logarithmic binning\n ... If \"linear\", it computes statistics using evenly separated bins between xmin and xmax.\n ... If \"log\", it uses bins evenly separted in the log space. (It assumes that xmin>0)\n i.e. The bin edges are like (10^-1.0, 10^-0.5), (10^-0.5, 10^0), (10^0, 10^0.5), and so on.\n bin_center: bool, default: True.\n ... passed to get_binned_stats()\n return_std: bool, default: False.\n ... passed to get_binned_stats()\n ... If False, it uses standard errors as error bars, instead of using standard deviations\n fignum: int, figure number\n ax: Axes object, default: None\n ... If given, this becomes the Axes on which the results are plotted\n marker: str, default: 'o', marker style\n fillstyle: str, default: 'full'. Options: 'full', 'none'. See matplotlib scatter for more details\n linestyle: str, default:'None'\n linewidth: float, linewidth of the error bars\n label: str, label for a legend\n mfc: str, default:'white', marker face color\n ... Use this with fillstyle='none' in order to change the face color of the marker.\n ... Common usage: empty circles- fillstyle='none', mfc='white'\n subplot: int, three-digit number. e.g.-111\n legend: bool, default: False. If True, ax.legend is called at the end.\n figsize: tuple, figure size in inches\n maskon: bool, default: False\n ... This hides \"suspicious\" data points / outliers.\n ... See the docstr of get_mask4erroneous_pts() for more details\n thd: float, threshold value used for get_mask4erroneous_pts() to determine the outliers\n capsize: float, width of the error bars\n return_stats: bool, default: False\n ... If True, it returns the binned results (that are being plotted): x[mask], y[mask], xerr[mask], yerr[mask]\n kwargs: passed to ax.errorbar()\n\n Returns\n -------\n If not return_stats (default),\n fig, ax: a Figure instance, an Axes instance\n If return_stats:\n fig, ax, x[mask], y[mask], xerr[mask], yerr[mask]: a Figure instance, an Axes instance, binned results (x, y, x_err, y_err)\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n\n # Make sure that xerr and yerr are numpy arrays\n ## x, y, xerr, yerr do not have to be numpy arrays. It is just a convention. - takumi 04/01/2018\n x_, y_ = np.array(x_), np.array(y_)\n x, y, yerr = get_binned_stats(x_, y_, n_bins = n_bins, mode = mode, bin_center=bin_center, return_std = return_std)\n if xerr is None:\n xerr = np.ones_like(x) * (x[1] - x[0])\n elif type(xerr) in [int, float]:\n xerr = np.ones_like(x) * xerr\n xerr[xerr == 0] = np.nan\n yerr[yerr == 0] = np.nan\n\n if maskon:\n mask = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n mask = [True] * len(x)\n if fillstyle == 'none':\n ax.errorbar(x[mask], y[mask], xerr=xerr[mask], yerr=yerr[mask], marker=marker, mfc=mfc, linestyle=linestyle,\n label=label, capsize=capsize, linewidth=linewidth, **kwargs)\n else:\n ax.errorbar(x[mask], y[mask], xerr=xerr[mask], yerr=yerr[mask], marker=marker, fillstyle=fillstyle,\n linestyle=linestyle, label=label, capsize=capsize, linewidth=linewidth, **kwargs)\n if legend:\n ax.legend()\n\n if set_bottom_zero:\n ax.set_ylim(bottom=0)\n if set_left_zero:\n ax.set_xlim(left=0)\n if symmetric:\n xmin, xmax = ax.get_xlim()\n xabs = np.abs(max(-xmin, xmax))\n ymin, ymax = ax.get_ylim()\n yabs = np.abs(max(-ymin, ymax))\n ax.set_xlim(-xabs, xabs)\n ax.set_ylim(-yabs, yabs)\n\n if not return_stats: # default\n return fig, ax\n else:\n return fig, ax, x[mask], y[mask], xerr[mask], yerr[mask]\n\n## Plot a fit curve\ndef plot_fit_curve(xdata, ydata, func=None, fignum=1, subplot=111, ax=None, figsize=None, linestyle='--',\n xmin=None, xmax=None, add_equation=True, eq_loc='bl', color=None, label='fit',\n show_r2=False, return_r2=False, p0=None, bounds=(-np.inf, np.inf), maskon=True, thd=1,**kwargs):\n \"\"\"\n Plots a fit curve given xdata and ydata\n Parameters\n ----------\n xdata: 1d array\n ydata: 1d array\n func : a function to be fit- e.g. lambda x, a, b: a*x+b\n fignum: int, figure number\n subplot: int, three-digit number to specify a subplot location\n ax: Axes instance- If given, it plots on the\n figsize\n linestyle\n xmin\n xmax\n add_equation\n eq_loc\n color\n label\n show_r2\n return_r2\n p0\n bounds\n maskon\n thd\n kwargs\n\n Returns\n -------\n fig, ax: A Figure object, an Axes object\n popt, pcov : fit results, a covariance matrix\n\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n xdata = np.array(xdata)\n ydata = np.array(ydata)\n\n if len(xdata) != len(ydata):\n print('x and y have different length! Data will be clipped... %d, %d' % (len(xdata), len(ydata)))\n n = min(len(xdata), len(ydata))\n xdata = xdata[:n]\n ydata = ydata[:n]\n\n if any(np.isnan(ydata)) or any(np.isnan(xdata)):\n print('Original data contains np.nans! Delete them for curve fitting')\n condx, condy = np.isnan(xdata), np.isnan(ydata)\n cond = (~condx * ~condy)\n print('No of deleted data points %d / %d' % (np.sum(~cond), len(xdata)))\n if np.sum(~cond) == len(xdata):\n print('No data points for fitting!')\n raise RuntimeError\n xdata, ydata = xdata[cond], ydata[cond]\n\n if xmin is None:\n xmin = np.nanmin(xdata)\n if xmax is None:\n xmax = np.nanmax(xdata)\n\n if maskon:\n mask = get_mask4erroneous_pts(xdata, ydata, thd=thd)\n if any(mask):\n xdata = xdata[mask]\n ydata = ydata[mask]\n\n\n x_for_plot = np.linspace(xmin, xmax, 1000)\n if func is None or func == 'linear':\n print('Fitting to a linear function...')\n popt, pcov = curve_fit(std_func.linear_func, xdata, ydata, p0=p0, bounds=bounds)\n if color is None:\n fig, ax = plot(x_for_plot, std_func.linear_func(x_for_plot, *popt), fignum=fignum, subplot=subplot,\n label=label, figsize=figsize, linestyle=linestyle, ax=ax, **kwargs)\n else:\n fig, ax = plot(x_for_plot, std_func.linear_func(x_for_plot, *popt), fignum=fignum, subplot=subplot,\n label=label, figsize=figsize, color=color, linestyle=linestyle, ax=ax, **kwargs)\n\n if add_equation:\n text = '$y=ax+b$: a=%.2f, b=%.2f' % (popt[0], popt[1])\n addtext(ax, text, option=eq_loc)\n y_fit = std_func.linear_func(xdata, *popt)\n elif func == 'power':\n print('Fitting to a power law...')\n\n popt, pcov = curve_fit(std_func.power_func, xdata, ydata, p0=p0, bounds=bounds)\n if color is None:\n fig, ax = plot(x_for_plot, std_func.power_func(x_for_plot, *popt), fignum=fignum, subplot=subplot,\n label=label, figsize=figsize, linestyle=linestyle, ax=ax, **kwargs)\n else:\n fig, ax = plot(x_for_plot, std_func.power_func(x_for_plot, *popt), fignum=fignum, subplot=subplot,\n label=label, figsize=figsize, color=color, linestyle=linestyle, ax=ax, **kwargs)\n\n if add_equation:\n text = '$y=ax^b$: a=%.2f, b=%.2f' % (popt[0], popt[1])\n addtext(ax, text, option=eq_loc)\n y_fit = std_func.power_func(xdata, *popt)\n elif func == 'power2':\n print('Fitting to a linear function to the log-log plot')\n xdata[xdata<10**-16], ydata[xdata<10**-16] = np.nan, np.nan\n xdata_log, ydaya_log = np.log(xdata), np.log(ydata)\n\n popt, pcov = curve_fit(std_func.linear_func, xdata_log, ydaya_log, p0=p0, bounds=bounds)\n\n y_fit = np.exp(popt[1]) * x_for_plot ** popt[0]\n\n # plot(x_for_plot, y_fit, fignum=fignum)\n if color is None:\n fig, ax = plot(x_for_plot, y_fit, fignum=fignum, subplot=subplot,\n label = label, figsize = figsize, linestyle = linestyle, ax = ax, ** kwargs)\n else:\n fig, ax = plot(x_for_plot, y_fit, fignum=fignum, subplot=subplot,\n label = label, figsize = figsize, color = color, linestyle = linestyle, ax = ax, ** kwargs)\n\n if add_equation:\n text = '$y=ax^b$: a=%.2f, b=%.2f' % (np.exp(popt[1]) , popt[0])\n addtext(ax, text, option=eq_loc)\n else:\n popt, pcov = curve_fit(func, xdata, ydata, p0=p0, bounds=bounds)\n if color is None:\n fig, ax = plot(x_for_plot, func(x_for_plot, *popt), fignum=fignum, subplot=subplot, label=label, figsize=figsize,\n linestyle=linestyle, ax=ax, **kwargs)\n else:\n fig, ax = plot(x_for_plot, func(x_for_plot, *popt), fignum=fignum, subplot=subplot, label=label, figsize=figsize,\n color=color, linestyle=linestyle, ax=ax, **kwargs)\n y_fit = func(xdata, *popt)\n\n #plot(x_for_plot, std_func.power_func(x_for_plot, *popt))\n\n if show_r2 or return_r2:\n # compute R^2\n # residual sum of squares\n ss_res = np.sum((ydata - y_fit) ** 2)\n # total sum of squares\n ss_tot = np.sum((ydata - np.mean(ydata)) ** 2)\n # r-squared\n r2 = 1 - (ss_res / ss_tot)\n if show_r2:\n addtext(ax, '$R^2: %.2f$' % r2, option='bl3')\n if return_r2:\n return fig, ax, popt, pcov, r2\n\n return fig, ax, popt, pcov\n\n\ndef plot_interpolated_curves(x, y, zoom=2, fignum=1, figsize=None, label='', color=None, subplot=None, legend=False,\n fig=None, ax=None, maskon=False, thd=1, return_interp_func=False, **kwargs):\n \"\"\"\n plot a graph using given x, y\n fignum can be specified\n any kwargs from plot can be passed\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n if y is None:\n y = copy.deepcopy(x)\n x = np.arange(len(x))\n # Make sure x and y are np.array\n x, y = np.asarray(x), np.asarray(y)\n\n if len(x) > len(y):\n print(\"Warning : x and y data do not have the same length\")\n x = x[:len(y)]\n elif len(y) > len(x):\n print(\"Warning : x and y data do not have the same length\")\n y = y[:len(x)]\n\n # remove nans\n keep = ~np.isnan(x) * ~np.isnan(y)\n x, y = x[keep], y[keep]\n\n if maskon:\n keep = get_mask4erroneous_pts(x, y, thd=thd)\n else:\n keep = [True] * len(x)\n # f = scipy.interpolate.interp1d(x[keep], y[keep], fill_value=\"extrapolate\")\n # FOR A SMOOTHER CURVE\n x_ = scipy.ndimage.zoom(x[keep], zoom)\n y_ = scipy.ndimage.zoom(y[keep], zoom)\n f = scipy.interpolate.interp1d(x_, y_, fill_value=\"extrapolate\")\n\n fig, ax = plot(x_, f(x_), label=label, color=color, ax=ax, legend=legend, **kwargs)\n if return_interp_func:\n return fig, ax, f\n else:\n return fig, ax\n\n## 2D plotsFor the plot you showed at group meeting of lambda converging with resolution, can you please make a version with two x axes (one at the top, one below) one pixel spacing, other PIV pixel spacing, and add a special tick on each for the highest resolution point.\n# (pcolormesh)\ndef color_plot(x, y, z, subplot=None, fignum=1, figsize=None, ax=None, vmin=None, vmax=None, log10=False, label=None,\n cbar=True, cmap='magma', symmetric=False, enforceSymmetric=True, aspect='equal', option='scientific', ntick=5, tickinc=None,\n crop=None, fontsize=None, ticklabelsize=None,\n **kwargs):\n \"\"\"\n\n Parameters\n ----------\n x: 2d array\n y: 2d array\n z: 2d array\n subplot: int, default is 111\n fignum\n figsize\n ax\n vmin\n vmax\n log10\n label\n cbar\n cmap\n symmetric\n aspect: str, 'equal' or 'auto\n option\n ntick\n tickinc\n crop\n kwargs\n\n Returns\n -------\n fig:\n ax:\n cc: QuadMesh object\n \"\"\"\n\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n # fig, ax = set_fig(fignum, subplot, figsize=figsize, aspect=aspect)\n if crop is not None:\n x = x[crop:-crop, crop:-crop]\n y = y[crop:-crop, crop:-crop]\n z = z[crop:-crop, crop:-crop]\n\n\n if log10:\n z = np.log10(z)\n\n # For Diverging colormap, ALWAYS make the color thresholds symmetric\n if cmap in ['PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic'] \\\n and enforceSymmetric:\n symmetric = True\n\n if symmetric:\n hide = np.isinf(z)\n keep = ~hide\n if vmin is None and vmax is None:\n v = max(np.abs(np.nanmin(z[keep])), np.abs(np.nanmax(z[keep])))\n vmin, vmax = -v, v\n elif vmin is not None and vmax is not None:\n arr = np.asarray([vmin, vmax])\n v = np.nanmax(np.abs(arr))\n vmin, vmax = -v, v\n elif vmin is not None and vmax is None:\n vmax = -vmin\n else:\n vmin = -vmax\n\n\n\n\n # Note that the cc returned is a matplotlib.collections.QuadMesh\n # print('np.shape(z) = ' + str(np.shape(z)))\n if vmin is None and vmax is None:\n # plt.pcolormesh returns a QuadMesh class object.\n cc = ax.pcolormesh(x, y, z, cmap=cmap, **kwargs)\n else:\n cc = ax.pcolormesh(x, y, z, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)\n\n if cbar:\n if vmin is None and vmax is None:\n add_colorbar(cc, ax=ax, label=label, option=option, ntick=ntick, tickinc=tickinc, fontsize=fontsize, ticklabelsize=ticklabelsize)\n elif vmin is not None and vmax is None:\n add_colorbar(cc, ax=ax, label=label, option=option, vmin=vmin, ntick=ntick, tickinc=tickinc, fontsize=fontsize, ticklabelsize=ticklabelsize)\n elif vmin is None and vmax is not None:\n add_colorbar(cc, ax=ax, label=label, option=option, vmax=vmax, ntick=ntick, tickinc=tickinc, fontsize=fontsize, ticklabelsize=ticklabelsize)\n else:\n add_colorbar(cc, ax=ax, label=label, option=option, vmin=vmin, vmax=vmax, ntick=ntick, tickinc=tickinc, fontsize=fontsize, ticklabelsize=ticklabelsize)\n ax.set_aspect(aspect)\n # set edge color to face color\n cc.set_edgecolor('face')\n\n return fig, ax, cc\n\n#imshow\ndef imshow(arr, xmin=0, xmax=1, ymin=0, ymax=1, cbar=True, vmin=0, vmax=0, \\\n fignum=1, subplot=111, figsize=__figsize__, ax=None, interpolation='nearest', cmap='bwr',\n cb_kwargs={}, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n arr: array-like or PIL image\n xmin: float [0., 1.)- extent=(xmin, xmax, ymin, ymax) The bounding box in data coordinates that the image will fill.\n xmax: float (0., 1.]- extent=(xmin, xmax, ymin, ymax) The bounding box in data coordinates that the image will fill.\n ymin: float [0., 1.)- extent=(xmin, xmax, ymin, ymax) The bounding box in data coordinates that the image will fill.\n ymax: float (0., 1.]- extent=(xmin, xmax, ymin, ymax) The bounding box in data coordinates that the image will fill.\n cbar: bool, If True, fig.colorbar(ImageAxes instance, **cb_kwargs) is called.\n vmin: float, image intensity ranges from vmin to vmax\n vmax: float, image intensity ranges from vmin to vmax\n fignum: int, figure number\n subplot: int, three-digit integer to specify a subplot\n figsize: tuple, figure size- e.g. (8,8)\n interpolation: 'none', 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36',\n 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom',\n 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos'.\n cmap: str, color map used for plt.imshow\n cb_kwargs: dict, color bar keyward arguments can be passed in this dictionary like {'shrink': 0.5, 'pad':0.05}\n https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.colorbar.html\n\n Returns\n -------\n fig, ax, ima, cc: Figure, Axes, AxesImage, Colorbar instances\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n if vmin == vmax == 0:\n ima = ax.imshow(arr, extent=(xmin, xmax, ymin, ymax),\\\n interpolation=interpolation, cmap=cmap)\n else:\n ima = ax.imshow(arr, extent=(xmin, xmax, ymin, ymax),\\\n interpolation=interpolation, cmap=cmap, vmin=vmin, vmax=vmax)\n if cbar:\n cc = fig.colorbar(ima, **cb_kwargs)\n else:\n cc = None\n return fig, ax, ima, cc\n\n\ndef imgScatter(x, y, imgs, img_x=None, img_y=None, subax_size=0.06,\n cmap='viridis', vmin=None, vmax=None,\n cbar=True, cb_pad='2%', cb_size='5%', cb_option='scientific', cb_label=None,\n axLim=(None, None, None, None),\n fignum=1, figsize=__figsize__,\n **kwargs):\n \"\"\"\n Scatter plots images (2d arrays)\n ... This function creates additional Axes on top of the master Axes instance.\n\n To do1: one should be able to plot on the master Axes; however, this fails even with altering the zorder values.\n To do2: Should one be able to pass a list of imag_x as well?- this would enable plotting imgs with different resolutions\n Parameters\n ----------\n x: 1d array-like, x-coordinates of the image locations\n y: 1d array-like, y-coordinates of the image locations\n imgs: list, list of images (2d arrays\n img_x: 2d array (optional), x grid of the images- if given, this calls color_plot(img_x, img_y, imgs[i]).\n ... Otherwise, it calls imshow(imgs[i]).\n img_y: 2d array (optional), y grid of the images- if given, this calls color_plot(img_x, img_y, imgs[i]).\n ... Otherwise, it calls imshow(imgs[i]).\n subax_size: float (0, 1], default:0.06, the size of the subaxes (inner plots)\n cmap: str/cmap object- a color map of the images\n vmin: float, default:None- color bar range [vmin, vmax]\n vmax: float, default:None- color bar range [vmin, vmax]\n cbar: boolean, default:False- To toggle a color bar, vmin and vmax must be given. This is because each image could be drawn with a different color bar range.\n cb_pad: str, default:\"2%\" (with respect to the figure width)- Do not pass a float.\n cb_size: str, default:\"2%\" (with respect to the figure width)- Do not pass a float.\n cb_option: str, color bar notation, choose from 'normal' and 'scientific'\n cb_label: str, label of the color bar\n axLim: 1d array-like (xmin, xmax, ymin, ymax)- x- and y-limits of the master axes\n kwargs: dict, this gets passed to either imshow() or color_plot()\n\n Returns\n -------\n fig, ax, axes, cb: Figure, Axes (master), list of Axes, a color bar object\n \"\"\"\n\n def pc2float(x):\n return float(x.strip('%')) / 100\n #\n # def float2pc(x):\n # return \"{0}%\".format(x * 100)\n\n subaxes = []\n\n fig, ax = scatter(x, y, s=1, zorder=0, fignum=fignum, figsize=figsize) # dummy plot\n if cbar:\n if vmin is None and vmax is None:\n print('imgScatter: To toggle a universal color bar, provide vmin and vmax. Color range: [vmin, vmax]')\n cb = None\n cbar = False\n else:\n cb = add_colorbar_alone(ax, [vmin, vmax], cmap=cmap, option=cb_option, label=cb_label)\n else: cb = None\n\n if any([a is not None for a in axLim]):\n ax.set_xlim(axLim[:2])\n ax.set_ylim(axLim[2:])\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n\n pad, size = pc2float(cb_pad), pc2float(cb_size)\n\n for i, (x_, y_) in enumerate(zip(x, y)):\n if cbar:\n subax = add_subplot_axes(ax, [(x_ - xmin) / ((xmax - xmin) * (1. + pad + size)) - subax_size / 2.,\n (y_ - ymin) / (ymax - ymin) - subax_size / 2.,\n subax_size, subax_size],\n zorder=0, )\n else:\n subax = add_subplot_axes(ax, [(x_ - xmin) / (xmax - xmin) - subax_size / 2.,\n (y_ - ymin) / (ymax - ymin) - subax_size / 2.,\n subax_size, subax_size],\n zorder=0, )\n if img_x is not None and img_y is not None:\n color_plot(img_x, img_y, imgs[i], ax=subax,\n cbar=False, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)\n else:\n imshow(imgs[i], ax=subax, cmap=cmap, vmin=vmin, vmax=vmax, cbar=False, aspect='equal', **kwargs)\n subax.axis('off')\n subaxes.append(subax)\n\n return fig, ax, subaxes, cb\n\n\n# quiver\ndef quiver(x, y, u, v, subplot=None, fignum=1, figsize=None, ax=None,\n inc_x=1, inc_y=1, inc=None, color='k',\n vmin=None, vmax=None,\n absolute=False,\n key=True, key_loc=[0.08, 1.06], key_length=None,\n key_label=None, key_units='mm/s', key_labelpos='E',\n key_pad=25., key_fmt='.1f',\n key_kwargs={},\n aspect='equal',\n **kwargs):\n \"\"\"\n Wrapper for plt.quiver()\n\n Some tips:\n ... plt.quiver() autoscales the arrows. This may be problematic if you want to show them with absolute scales.\n ... I got a workaround for you. You can control this by toggling a boolean \"absolute\"\n ...... If \"absolute\" is False, it autoscales.\n ...... If \"absolute\" is True, you must supply \"scale\" (float)\n ......... e.g. Plot two quiver plots with the same scale\n fig1, ax1, Q1 = quiver(x1, y1, u1, v1, scale=4, fignum=1, key_length=50)\n quiver(x2, y2, u2, v2, scale=4, fignum=2, key_length=50) # scale could be Q1.scale\n ............ This ensures to plot the arrows with the same scale with the same quiver key.\n This is essential to create an animation of quiver plots to avoid distraction.\n\n Parameters\n ----------\n x\n y\n u\n v\n subplot\n fignum\n figsize\n ax\n inc_x\n inc_y\n inc\n color\n vmin\n vmax\n absolute\n u_ref\n key\n key_loc\n key_length\n key_label\n key_units\n key_labelpos\n key_pad\n key_fmt\n key_kwargs\n aspect\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n if inc is not None:\n inc_x = inc_y = inc\n x_tmp, y_temp = x[::inc_y, ::inc_x], y[::inc_y, ::inc_x]\n u_tmp, v_tmp = u[::inc_y, ::inc_x], v[::inc_y, ::inc_x]\n u_norm = np.sqrt(u_tmp ** 2 + v_tmp ** 2)\n u_rms = np.sqrt(np.nanmean(u_tmp ** 2 + v_tmp ** 2))\n\n if vmin is None:\n vmin = np.nanmin(u_norm)\n if vmax is None:\n vmax = np.nanmax(u_norm)\n hide1 = u_norm < vmin\n hide2 = u_norm > vmax\n hide3 = np.isinf(u_norm)\n hide = np.logical_or(np.logical_or(hide1, hide2), hide3)\n cond = ~hide\n\n Q = ax.quiver(x_tmp[cond], y_temp[cond], u_tmp[cond], v_tmp[cond], color=color, **kwargs)\n\n if key:\n if key_length is None:\n U_rms = np.nanmean(u_norm[cond])\n U_rmedians = np.nanmedian(u_norm[cond])\n\n # key_length = 10 ** round(np.log10(U_rms))\n # key_length = 10 ** round(np.log10(U_rmedians))\n # key_length = round(U_rmedians, int(-round(np.log10(U_rmedians))) + 1) * 5\n key_length = round(u_rms, int(-round(np.log10(U_rms))) + 1)\n if key_label is None:\n key_label = '{:' + key_fmt + '} '\n key_label = key_label.format(key_length) + key_units\n title(ax, ' ') # dummy title to create space on the canvas\n ax._set_title_offset_trans(key_pad)\n # print(key_length)\n # print(Q.scale)\n ax.quiverkey(Q, key_loc[0], key_loc[1], key_length, key_label, labelpos=key_labelpos, coordinates='axes',\n color=color, **key_kwargs)\n ax.set_aspect(aspect)\n return fig, ax, Q\n\n\ndef quiver3d(udata, normalize=False, mag=1, inc=1, xinc=None, yinc=None, zinc=None,\n umin=0, umax=None, # data range to show quiver\n vmin=0, vmax=None, # colorbar range\n add_bounding_box=True, notebook=True,\n show=True,\n save=False, savepath='./vectorfield.png', verbose=True, **kwargs):\n \"\"\"\n 3D Quiver plot using pyvista\n\n Parameters\n ----------\n udata: 4d array with shape (3, y, x, z)\n normalize: bool, default: False. If True, it ignores the magnitude in udata. All vectors have the magnitude of 1.\n ... This is handy if you would like to assess the directions of the field.\n mag: float greater than 0, default:1. udata*mag gets plotted. Sometimes, it is necessary to multiply a scalar to see the quivers.\n inc: int, default:1. Increment of quivers to be plotted- if inc=1, it plots all vectors in udata.\n If inc=2, it plots vectors every 2 xsteps, 2ysteps, and 2zsteps. i.e. 1/8 of vectors in udata gets plotted\n xinc: int, default:1. Increment of quivers to be plotted along the x-axis (the third index of udata)\n yinc: int, default:1. Increment of quivers to be plotted along the y-axis (the second index of udata)\n zinc: int, default:1. Increment of quivers to be plotted along the z-axis (the fourth index of udata)\n vmin: float, default: 0. The color range is specified by [vmin, vmax]\n vmax: float, default: None. The default is the maximum value in udata\n add_bounding_box: bool, default: True. If True, it draws a bounding box of udata\n save: bool, default: False. If True, it saves an image (png) at savepath.\n savepath: str, a path where an image gets saved if save is True.\n verbose: bool, default: True. If False, it suppresses print outputs.\n\n Returns\n -------\n None\n\n \"\"\"\n import pyvista\n if notebook:\n pyvista.set_jupyter_backend('ipygany')\n\n def fix_udata_shape(udata):\n \"\"\"\n It is better to always have udata with shape (height, width, depth, duration) (3D) or (height, width, duration) (2D)\n This method fixes the shape of udata whose shape is (height, width, depth) or (height, width)\n\n Parameters\n ----------\n udata: nd array,\n ... with shape (height, width, depth) (3D) or (height, width, duration) (2D)\n ... OR shape (height, width, depth, duration) (3D) or (height, width, duration) (2D)\n\n Returns\n -------\n udata: nd array, with shape (height, width, depth, duration) (3D) or (height, width, duration) (2D)\n\n \"\"\"\n shape = udata.shape # shape=(dim, nrows, ncols, nstacks) if nstacks=0, shape=(dim, nrows, ncols)\n if shape[0] == 2:\n ux, uy = udata[0, ...], udata[1, ...]\n try:\n dim, nrows, ncols, duration = udata.shape\n return udata\n except:\n dim, nrows, ncols = udata.shape\n duration = 1\n ux = ux.reshape((ux.shape[0], ux.shape[1], duration))\n uy = uy.reshape((uy.shape[0], uy.shape[1], duration))\n return np.stack((ux, uy))\n\n elif shape[0] == 3:\n dim = 3\n ux, uy, uz = udata[0, ...], udata[1, ...], udata[2, ...]\n try:\n nrows, ncols, nstacks, duration = ux.shape\n return udata\n except:\n nrows, ncols, nstacks = ux.shape\n duration = 1\n ux = ux.reshape((ux.shape[0], ux.shape[1], ux.shape[2], duration))\n uy = uy.reshape((uy.shape[0], uy.shape[1], uy.shape[2], duration))\n uz = uz.reshape((uz.shape[0], uz.shape[1], uz.shape[2], duration))\n return np.stack((ux, uy, uz))\n def compute_direction_from_udata(udata, normalize=False, t=0):\n udata = fix_udata_shape(udata)\n dim, height, width, depth, duration = udata.shape\n ux, uy, uz = udata[0, ..., t].ravel('F'), udata[1, ..., t].ravel('F'), udata[2, ..., t].ravel('F')\n umag = np.sqrt(ux ** 2 + uy ** 2 + uz ** 2)\n direction = np.empty((len(ux), 3))\n if normalize:\n direction[:, 0] = ux / umag\n direction[:, 1] = uy / umag\n direction[:, 2] = uz / umag\n else:\n direction[:, 0] = ux\n direction[:, 1] = uy\n direction[:, 2] = uz\n direction[np.isnan(direction)] = 0\n direction[umag==0, :] = 0\n return direction\n\n def get_speed(udata):\n \"\"\"Returns speed from udata\"\"\"\n speed = np.zeros_like(udata[0, ...])\n dim = udata.shape[0]\n for d in range(dim):\n speed += udata[d, ...] ** 2\n speed = np.sqrt(speed)\n return speed\n\n udata = fix_udata_shape(udata)\n dim, height, width, depth, duration = udata.shape\n\n # set up coordinates\n if xinc is None: xinc = inc\n if yinc is None: yinc = inc\n if zinc is None: zinc = inc\n x, y, z = np.meshgrid(np.arange(0, width, xinc),\n np.arange(0, height, yinc),\n np.arange(0, depth, zinc)\n )\n udata = udata[:, ::yinc, ::xinc, ::zinc]\n\n points = np.empty((x.size, 3))\n points[:, 0] = x.ravel('F')\n points[:, 1] = y.ravel('F')\n points[:, 2] = z.ravel('F')\n\n # data range [umin, umax]\n if umin!=0 or umax is not None:\n speed = get_speed(udata)\n keep = np.logical_and(umin <= speed, speed <= umax)\n for d in range(udata.shape[0]):\n udata[d, ~keep] = 0\n # color bar range\n if vmax is None:\n vmax = np.nanmax(udata) * mag\n\n # Compute a direction for the vector field\n direction = compute_direction_from_udata(udata, normalize=normalize)\n # plot using the plotting class\n plobj = pyvista.Plotter()\n\n a = plobj.add_arrows(points, direction, mag=mag, **kwargs)\n plobj.update_scalar_bar_range([vmin, vmax])\n if add_bounding_box:\n plobj.add_bounding_box()\n if not save and show:\n plobj.show()\n else:\n savedir = os.path.split(savepath)[0]\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n if show:\n plobj.show(screenshot=savepath)\n if verbose:\n print('... A vector field image was saved at ', savepath)\n return plobj, a\n\n# streamlines\ndef streamplot(x, y, u, v, subplot=None, fignum=1, figsize=None, ax=None, density=[1., 1.],\n aspect='equal', **kwargs):\n \"\"\"\n Plots streamlines (2D)\n\n Parameters\n ----------\n x: 2d array\n y: 2d array\n u: 2d array\n v: 2d array\n subplot: int\n fignum: int\n figsize: tuple\n ax: matplotlib.ax.Axes instance\n density: 1d array-like\n ... density of streamlines\n aspect: str, default: \"equal\"\n ... options: \"equal\", \"auto\"\n kwargs: dict\n ... passed to ax.streamplot()\n\n Returns\n -------\n\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n ax.streamplot(x, y, u, v, density=density, **kwargs)\n\n if aspect=='equal':\n ax.set_aspect('equal')\n return fig, ax\n\ndef contour(x, y, psi, levels=10,\n vmin=None, vmax=None,\n subplot=None, fignum=1, figsize=None, ax=None,\n clabel=True,\n fontsize=9, inline=True, fmt='%1.3f',\n label_kwargs={},\n **kwargs):\n \"\"\"\n Plot contours.\n\n\n Parameters\n ----------\n x: 2d array\n y: 2d array\n psi: 2d array\n levels: int or 1d array-like\n ... If int (n), it plots n contours.\n ... If array-like, it plots contours at the corresponding levels.\n vmin: int\n ... plots contours at the levels in (vmin, vmax)\n vmax: int\n ... plots contours at the levels in (vmin, vmax)\n subplot: int\n fignum: int\n figsize: tuple\n ax: matplotlib.ax.Axes instance\n fontsize\n ... passed to ax.clabel()\n inline: bool, default: True\n ... passed to ax.clabel()\n fmt: str, default: \"%.1.3f\"\n ... passed to ax.clabel()\n label_kwargs\n ... passed to ax.clabel()\n kwargs: dict\n ... passed to ax.contour()\n\n Returns\n -------\n fig, ax, ctrs\n ... ctrs: a QuadContourSet instance\n\n \"\"\"\n if ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n else:\n fig = plt.gcf()\n\n if vmin is None:\n vmin = np.nanmin(psi)\n if vmax is None:\n vmax = np.nanmax(psi)\n hide1 = psi <= vmin\n hide2 = psi > vmax\n hide = np.logical_or(hide1, hide2)\n psi2plot = copy.deepcopy(psi)\n psi2plot[hide] = np.nan\n\n ctrs = ax.contour(x, y, psi2plot, levels, **kwargs)\n if clabel:\n ax.clabel(ctrs, fontsize=fontsize, inline=inline, fmt=fmt, **label_kwargs)\n\n return fig, ax, ctrs\n\n\ndef get_contours(ctrs, close_ctr=True, thd=0.5, min_length=0,\n levels=None):\n \"\"\"\n Returns positions of contours drawn by ax.contour()\n ... each contour has a different length.\n\n Parameters\n ----------\n ctrs: QuadContourSet instance (output of ax.contour)\n close_ctr: bool\n ... If True, returned points on the contours would be closed.\n thd: float\n ... Relevant parameter if close_ctr is True\n ... Let the beginning and the end of a contour be R1 and R2.\n If |R1 - R2| < thd, it considers the contour closed.\n Returns\n -------\n verts: list\n ... verts[n] stores (x, y) of the n-th contour\n ... xn, yn = verts[n][:, 0], verts[n][:, 1]\n \"\"\"\n def get_path_length(p):\n \"\"\"\n Returns arclength of a matplotlib.path.Path instance\n Parameters\n ----------\n p: matplotlib.path.Path instance\n\n Returns\n -------\n length\n \"\"\"\n vert = p.vertices\n x, y = vert[:, 0], vert[:, 1]\n xdiff, ydiff = np.diff(x), np.diff(y)\n length = np.nansum(np.sqrt(xdiff ** 2 + ydiff **2))\n return length\n\n verts = []\n level_list = []\n n_levels = len(ctrs.collections)\n for i in range(n_levels):\n ps = ctrs.collections[i].get_paths()\n n_ctrs = len(ps) # number of contours at the i-th level\n\n for j in range(n_ctrs):\n vert = ps[j].vertices\n if close_ctr:\n x0, y0 = vert[0, 0], vert[0, 1]\n x1, y1 = vert[-1, 0], vert[-1, 1]\n r = np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)\n if r > thd:\n vert = np.append(vert, [[x0, y0]], axis=0)\n ps[j].vertices = vert\n arclength = get_path_length(ps[j])\n if arclength > min_length:\n verts.append(vert)\n if levels is None:\n level_list.append(i)\n else:\n level_list.append(levels[i])\n return verts, level_list\n\n## Miscellanies\ndef show():\n plt.show()\n\n\n## Lines\ndef axhline(ax, y, x0=None, x1=None, color='black', linestyle='--', linewidth=1, zorder=0, **kwargs):\n \"\"\"\n Draw a horizontal line at y=y from xmin to xmax\n Parameters\n ----------\n y\n x\n\n Returns\n -------\n\n \"\"\"\n if x0 is not None:\n xmin, xmax = ax.get_xlim()\n xmin_frac, xmax_frac = x0 / float(xmax), x1 / float(xmax)\n else:\n xmin_frac, xmax_frac= 0, 1\n ax.axhline(y, xmin_frac, xmax_frac, color=color, linestyle=linestyle, linewidth=linewidth, zorder=zorder, **kwargs)\n\ndef axvline(ax, x, y0=None, y1=None, color='black', linestyle='--', linewidth=1, zorder=0, **kwargs):\n \"\"\"\n Draw a vertical line at x=x from ymin to ymax\n Parameters\n ----------\n x\n y\n\n Returns\n -------\n\n \"\"\"\n if y0 is not None:\n ymin, ymax = ax.get_ylim()\n ymin_frac, ymax_frac = y0 / float(ymax), y1 / float(ymax)\n else:\n ymin_frac, ymax_frac= 0, 1\n ax.axvline(x, ymin_frac, ymax_frac, color=color, linestyle=linestyle, linewidth=linewidth, zorder=zorder,\n **kwargs)\n\n## Bands\ndef axhband(ax, y0, y1, x0=None, x1=None, color='C1', alpha=0.2, **kwargs):\n \"\"\"\n Make a horizontal band between y0 and y1 (highlighting effect)\n Parameters\n ----------\n ax: plt.axes.axes object\n x0: x-coordinate of the left of a band (x0 < x1). As a default, x0, x1 = ax.get_xlim()\n x1: x-coordinate of the right of a band (x0 < x1)\n y0: y-coordinate of the bottom of a band (y0 < y1)\n y1: y-coordinate of the top of a band (y0 < y1)\n color: color of a band\n alpha: alpha of a band\n kwargs: kwargs for ax.fill_between()\n\n Returns\n -------\n\n \"\"\"\n ymin, ymax = ax.get_ylim()\n if x0 is None and x1 is None:\n x0, x1 = ax.get_xlim()\n ax.fill_between(np.linspace(x0, x1, 2), y0, y1, alpha=alpha, color=color, **kwargs)\n ax.set_xlim(x0, x1)\n ax.set_ylim(ymin, ymax)\n\ndef axvband(ax, x0, x1, y0=None, y1=None, color='C1', alpha=0.2, **kwargs):\n \"\"\"\n Make a vertical band between x0 and x1 (highlighting effect)\n Parameters\n ----------\n ax: plt.axes.axes object\n x0: x-coordinate of the left of a band (x0 < x1)\n x1: x-coordinate of the right of a band (x0 < x1)\n y0: y-coordinate of the bottom of a band (y0 < y1)\n y1: y-coordinate of the top of a band (y0 < y1). As a default, y0, y1 = ax.get_ylim()\n color: color of a band\n alpha: alpha of a band\n kwargs: kwargs for ax.fill_between()\n\n Returns\n -------\n\n \"\"\"\n xmin, xmax = ax.get_xlim()\n if y0 is None and y1 is None:\n y0, y1 = ax.get_ylim()\n ax.fill_between(np.linspace(x0, x1, 2), y0, y1, alpha=alpha, color=color, **kwargs)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(y0, y1)\n\n# Arrow plots\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\n FancyArrowPatch.draw(self, renderer)\n\ndef add_arrow_to_line(axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8], head_width=15, transform=None, **kwargs):\n if isinstance(line, mlines.Line2D):\n add_arrow_to_line2D(axes, line, arrow_locs=arrow_locs, head_width=head_width, transform=transform, **kwargs)\n else:\n add_arrow_to_line3D(axes, line, arrow_locs=arrow_locs, head_width=head_width, transform=transform, **kwargs)\n\n\ndef add_arrow_to_line2D(\n axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],\n arrowstyle='-|>', head_width=15, transform=None):\n \"\"\"\n Add arrows to a matplotlib.lines.Line2D at selected locations.\n\n Parameters:\n -----------\n axes:\n line: Line2D object as returned by plot command\n arrow_locs: list of locations where to insert arrows, % of total length\n arrowstyle: style of the arrow\n arrowsize: size of the arrow\n transform: a matplotlib transform instance, default to data coordinates\n\n Returns:\n --------\n arrows: list of arrows\n \"\"\"\n if not isinstance(line, mlines.Line2D):\n raise ValueError(\"expected a matplotlib.lines.Line2D object\")\n x, y = line.get_xdata(), line.get_ydata()\n\n arrow_kw = {\n \"arrowstyle\": arrowstyle,\n \"mutation_scale\": head_width * line.get_linewidth(),\n }\n\n color = line.get_color()\n use_multicolor_lines = isinstance(color, np.ndarray)\n if use_multicolor_lines:\n raise NotImplementedError(\"multicolor lines not supported\")\n else:\n arrow_kw['color'] = color\n\n linewidth = line.get_linewidth()\n if isinstance(linewidth, np.ndarray):\n raise NotImplementedError(\"multiwidth lines not supported\")\n else:\n arrow_kw['linewidth'] = linewidth\n\n if transform is None:\n transform = axes.transData\n\n arrows = []\n for loc in arrow_locs:\n s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))\n n = np.searchsorted(s, s[-1] * loc)\n arrow_tail = (x[n], y[n])\n arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))\n p = mpatches.FancyArrowPatch(\n arrow_tail, arrow_head, transform=transform,\n **arrow_kw)\n axes.add_patch(p)\n arrows.append(p)\n return arrows\n\ndef add_arrow_to_line3D(\n axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8], head_width=15, lw=1, transform=None, **kwargs):\n \"\"\"\n Add arrows to a matplotlib.lines.Line2D at selected locations.\n\n example:\n # plotting\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n x = np.linspace(0, 10, 11)\n y = np.linspace(0, 10, 11)\n z = np.zeros(11)\n line, = ax.plot(x,y,z, alpha=1, lw=3, color='k')\n add_arrow_to_line3D(ax, line, arrow_locs=np.linspace(0., 1., 5), alpha=0.3)\n\n Parameters:\n -----------\n axes:\n line: Line2D object as returned by plot command\n arrow_locs: list of locations where to insert arrows, % of total length\n arrowstyle: style of the arrow\n arrowsize: size of the arrow\n transform: a matplotlib transform instance, default to data coordinates\n\n Returns:\n --------\n arrows: list of arrows\n \"\"\"\n if not isinstance(line, mpl_toolkits.mplot3d.art3d.Line3D):\n raise ValueError(\"expected a matplotlib.lines.Line3D object\")\n x, y, z = line.get_data_3d()\n\n length = len(x)\n if length < 2:\n return None\n else:\n arrow_kw = {}\n\n color = line.get_color()\n use_multicolor_lines = isinstance(color, np.ndarray)\n if use_multicolor_lines:\n raise NotImplementedError(\"multicolor lines not supported\")\n else:\n kwargs['color'] = color\n\n linewidth = line.get_linewidth()\n if isinstance(linewidth, np.ndarray):\n raise NotImplementedError(\"multiwidth lines not supported\")\n else:\n kwargs['linewidth'] = linewidth\n\n if transform is None:\n transform = axes.transData\n\n arrows = []\n for loc in arrow_locs:\n s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2 + np.diff(z) ** 2))\n n = np.searchsorted(s, s[-1] * loc)\n arrow_tail = (x[n], y[n], z[n])\n arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]), np.mean(z[n:n + 2]))\n arrow_lines = list(zip(arrow_tail, arrow_head))\n\n arrow = Arrow3D(\n arrow_lines[0], arrow_lines[1], arrow_lines[2],\n mutation_scale=head_width,\n lw=lw, **kwargs)\n ax.add_artist(arrow)\n arrows.append(arrow)\n return arrows\n\ndef arrow3D(x, y, z, dx, dy, dz, lw=3, arrowstyle='-|>', color='r', mutation_scale=20,\n ax=None, fig=None, fignum=1, subplot=111, figsize=None,\n xlabel='x (mm)', ylabel='y (mm)', zlabel='z (mm)',\n **kwargs):\n\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize, projection='3d')\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n\n\n if isinstance(x, (int, float)):\n arrow_obj = Arrow3D([x, x+dx], [y, y+dy],\n [z, z+dz], mutation_scale=mutation_scale,\n lw=lw, arrowstyle=arrowstyle, color=color, **kwargs)\n ax.add_artist(arrow_obj)\n elif isinstance(x, (np.ndarray, list)):\n if not len(x) == len(y) == len(z) == len(dx) == len(dy) == len(dz):\n raise ValueError('graph.arrow3D: x, y, z, dx, dy, dz must have the same length')\n for i, x_ in enumerate(x):\n arrow_obj = Arrow3D([x[i], x[i] + dx[i]], [y[i], y[i] + dy[i]],\n [z[i], z[i] + dz[i]], mutation_scale=mutation_scale,\n lw=lw, arrowstyle=arrowstyle, color=color, **kwargs)\n ax.add_artist(arrow_obj)\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n ax.set_zlabel(zlabel)\n return fig, ax, arrow_obj\n\ndef arrow(x, y, dx, dy,\n ax=None, fig=None, fignum=1, subplot=111, figsize=None, **kwargs):\n \"\"\"\n Adds an arrow on a canvas\n ... Specify an arrow by its starting point (x, y) and its direction (dx, dy)\n\n Parameters\n ----------\n x\n y\n dx\n dy\n ax\n fig\n fignum\n subplot\n figsize\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if fig is None and ax is None:\n fig, ax = set_fig(fignum, subplot, figsize=figsize)\n elif fig is None:\n fig = plt.gcf()\n elif ax is None:\n ax = plt.gca()\n try:\n ax.arrow(x, y, dx, dy, **kwargs)\n except:\n n = len(x)\n for i in range(n):\n ax.arrow(x[i], y[i], dx[i], dy[i], **kwargs)\n return fig, ax\n\n## Legend\n# Legend\ndef legend(ax, remove=False, **kwargs):\n \"\"\"\n loc:\n best\t0, upper right\t1, upper left\t2, lower left\t3, lower right\t4, right\t5,\n center left\t6, center right\t7, lower center\t8, upper center\t9, center\t10\n Parameters\n ----------\n ax\n kwargs\n\n Returns\n -------\n\n \"\"\"\n leg = ax.legend(**kwargs)\n if remove:\n leg.get_frame().set_facecolor('none')\n\n\n# Colorbar\nclass FormatScalarFormatter(mpl.ticker.ScalarFormatter):\n \"\"\"\n Ad-hoc class to subclass matplotlib.ticker.ScalarFormatter\n in order to alter the number of visible digits on color bars\n \"\"\"\n def __init__(self, fformat=\"%03.1f\", offset=True, mathText=True):\n self.fformat = fformat\n mpl.ticker.ScalarFormatter.__init__(self,useOffset=offset,useMathText=mathText)\n self.set_scientific(True)\n # Scientific notation is used for data < 10^-n or data >= 10^m, where n and m are the power limits set using set_powerlimits((n,m))\n self.set_powerlimits((0, 0))\n def _set_format(self):\n \"\"\"\n Call this method to change the format of tick labels\n\n Returns\n -------\n\n \"\"\"\n\n self.format = self.fformat\n if self._useMathText:\n # self.format = '$%s$' % mpl.ticker._mathdefault(self.format) # matplotlib < 3.1\n self.format = '$%s$' % self.format\n\n\n def _update_format(self, fformat):\n self.fformat = fformat\n self._set_format()\n\ndef reset_sfmt(fformat=\"%03.1f\"):\n global sfmt\n sfmt = FormatScalarFormatter() # Default format: \"%04.1f\"\n # sfmt.fformat = fformat # update format\n # sfmt._set_format() # this updates format for scientific nota\n sfmt._update_format(fformat)\n\nreset_sfmt()\n\ndef get_sfmt():\n \"\"\n global sfmt\n reset_sfmt()\n return sfmt\n\ndef add_colorbar_old(mappable, fig=None, ax=None, fignum=None, label=None, fontsize=__fontsize__,\n vmin=None, vmax=None, cmap='jet', option='normal', **kwargs):\n \"\"\"\n Adds a color bar (Depricated. replaced by add_colorbar)\n Parameters\n ----------\n mappable : image like QuadMesh object to which the color bar applies (NOT a plt.figure instance)\n ax : Parent axes from which space for a new colorbar axes will be stolen\n label :\n\n Returns\n -------\n \"\"\"\n # Get a Figure instance\n if fig is None:\n fig = plt.gcf()\n if fignum is not None:\n fig = plt.figure(num=fignum)\n if ax is None:\n ax = plt.gca()\n\n # if vmin is not None and vmax is not None:\n # norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n # elif vmin is None and vmax is not None:\n # print 'vmin was not provided!'\n # elif vmin is not None and vmax is None:\n # print 'vmax was not provided!'\n\n # fig.colorbar makes a another ax object which colives with ax in the fig instance.\n # Therefore, cb has all attributes that ax object has!\n\n if option == 'scientific':\n cb = fig.colorbar(mappable, ax=ax, cmap=cmap, format=sfmt, **kwargs)\n else:\n cb = fig.colorbar(mappable, ax=ax, cmap=cmap, **kwargs)\n\n if not label == None:\n cb.set_label(label, fontsize=fontsize)\n\n return cb\n\n\ndef add_colorbar(mappable, fig=None, ax=None, fignum=None, location='right', label=None, fontsize=None, option='normal',\n tight_layout=True, ticklabelsize=None, aspect='equal', ntick=5, tickinc=None, **kwargs):\n \"\"\"\n Adds a color bar\n\n e.g.\n fig = plt.figure()\n img = fig.add_subplot(111)\n ax = img.imshow(im_data)\n colorbar(ax)\n Parameters\n ----------\n mappable\n location\n\n Returns\n -------\n\n \"\"\"\n global sfmt\n def get_ticks_for_sfmt(mappable, n=10, inc=0.5, **kwargs):\n \"\"\"\n Returns ticks for scientific notation\n ... setting format=smft sometimes fails to use scientific notation for colorbar.\n ... This function should ensure the colorbar object to have appropriate ticks\n to display numbers in scientific fmt once the generated ticks are passed to fig.colorbar().\n Parameters\n ----------\n mappable\n n: int, (ROUGHLY) the number of ticks\n inc: float (0, 0.5]\n ... 0.5 or 0.25 is recommended\n Returns\n -------\n ticks, list, ticks for scientific format\n \"\"\"\n # ticks for scientific notation\n zmin, zmax = np.nanmin(mappable.get_array()), np.nanmax(mappable.get_array())\n if 'vmin' in kwargs.keys():\n zmin = kwargs['vmin']\n if 'vmax' in kwargs.keys():\n zmax = kwargs['vmax']\n\n # ticks = np.linspace(zmin, zmax, 2*n)\n exponent = int(np.floor(np.log10(np.abs(zmax))))\n # ticks = np.around(ticks[1::2], decimals=-exponent + 1)\n if tickinc is not None:\n # Specify the increment of ticks!\n dz = inc * 10 ** exponent\n ticks = [i * dz for i in range(int(zmin / dz), int(zmax / dz)+1)]\n else:\n # Specify the number of ticks!\n exp = int(np.floor(np.log10((zmax - zmin) / n)))\n dz = np.round((zmax - zmin) / n, -exp)\n # exp = int(np.ceil(np.log10((zmax - zmin) / n)))\n # dz = (zmax - zmin) / n\n ticks = [i * dz for i in range(int(zmin / dz), int(zmax / dz) + 1)]\n # print(np.log10((zmax - zmin) / n), exp)\n # print((zmax - zmin) / n, dz)\n # print(ticks)\n\n return ticks\n\n def remove_vmin_vmax_from_kwargs(**kwargs):\n if 'vmin' in kwargs.keys():\n del kwargs['vmin']\n if 'vmax' in kwargs.keys():\n del kwargs['vmax']\n return kwargs\n\n # ax = mappable.axes\n # fig = ax.figure\n # Get a Figure instance\n if fig is None:\n fig = plt.gcf()\n if fignum is not None:\n fig = plt.figure(num=fignum)\n if ax is None:\n ax = plt.gca()\n if fig is None:\n fig = plt.gcf()\n\n reset_sfmt()\n\n divider = axes_grid.make_axes_locatable(ax)\n cax = divider.append_axes(location, size='5%', pad=0.15)\n if option == 'scientific_custom':\n ticks = get_ticks_for_sfmt(mappable, n=ntick, inc=tickinc, **kwargs)\n kwargs = remove_vmin_vmax_from_kwargs(**kwargs)\n # sfmt.format = '$\\mathdefault{%1.1f}$'\n cb = fig.colorbar(mappable, cax=cax, format=sfmt, ticks=ticks, **kwargs)\n # cb = fig.colorbar(mappable, cax=cax, format=sfmt, **kwargs)\n elif option == 'scientific':\n # old but more robust\n kwargs = remove_vmin_vmax_from_kwargs(**kwargs)\n cb = fig.colorbar(mappable, cax=cax, format=sfmt, **kwargs)\n else:\n kwargs = remove_vmin_vmax_from_kwargs(**kwargs)\n cb = fig.colorbar(mappable, cax=cax, **kwargs)\n\n if not label is None:\n if fontsize is None:\n cb.set_label(label)\n else:\n cb.set_label(label, fontsize=fontsize)\n if ticklabelsize is not None:\n cb.ax.tick_params(labelsize=ticklabelsize)\n\n # Adding a color bar may distort the aspect ratio. Fix it.\n if aspect=='equal':\n ax.set_aspect('equal')\n\n # Adding a color bar may disport the overall balance of the figure. Fix it.\n if tight_layout:\n fig.tight_layout()\n\n return cb\n\n\ndef add_discrete_colorbar(ax, colors, vmin=0, vmax=None, label=None, fontsize=None, option='normal',\n tight_layout=True, ticklabelsize=None, ticklabel=None,\n aspect = None, **kwargs):\n fig = ax.get_figure()\n if vmax is None:\n vmax = len(colors)\n tick_spacing = (vmax - vmin) / float(len(colors))\n ticks = np.linspace(vmin, vmax, len(colors)+1) + tick_spacing / 2. # tick positions\n\n # if there are too many ticks, just use 3 ticks\n if len(ticks) > 10:\n n = len(ticks)\n ticks = [ticks[0], ticks[n/2], ticks[-2]]\n if ticklabel is not None:\n ticklabel = [ticklabel[0], ticklabel[n/2], ticklabel[-1]]\n\n\n cmap = mpl.colors.ListedColormap(colors)\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)\n sm.set_array([]) # dummy mappable\n\n if option == 'scientific':\n cb = fig.colorbar(sm, ticks=ticks, format=sfmt, **kwargs)\n else:\n cb = fig.colorbar(sm, ticks=ticks, **kwargs)\n\n if ticklabel is not None:\n cb.ax.set_yticklabels(ticklabel)\n\n if not label is None:\n if fontsize is None:\n cb.set_label(label)\n else:\n cb.set_label(label, fontsize=fontsize)\n if ticklabelsize is not None:\n cb.ax.tick_params(labelsize=ticklabelsize)\n\n # Adding a color bar may distort the aspect ratio. Fix it.\n if aspect=='equal':\n ax.set_aspect('equal')\n\n # Adding a color bar may disport the overall balance of the figure. Fix it.\n if tight_layout:\n fig.tight_layout()\n\n return cb\n\n\n\ndef add_colorbar_alone(ax, values, cmap=cmap, label=None, fontsize=None, option='normal', fformat=None,\n tight_layout=True, ticklabelsize=None, ticklabel=None,\n aspect = None, location='right', color='k',\n size='5%', pad=0.15, **kwargs):\n \"\"\"\n Add a colorbar to a figure without a mappable\n ... It creates a dummy mappable with given values\n\n ... LOCATION OF CAX\n fig, ax = graph.set_fig(1, 111)\n w, pad, size = 0.1, 0.05, 0.05\n graph.add_colorbar_alone(ax, [0, 1], pad=float2pc(pad), size=float2pc(size), tight_layout=False)\n graph.add_subplot_axes(ax, [1-w-(1-1/(1.+pad+size)), 0.8, w, 0.2])\n\n\n Parameters\n ----------\n ax: Axes instance\n values: 1D array-like- min and max values of values are found from this array\n cmap: str, cmap instance\n label: str, label of the color bar\n fontsize: float, fontsize of the label\n option: str, choose from 'normal' and 'scientific'\n ... if 'scientific', the color bar is shown in a scientific format like 1x10^exponent\n fformat: str, default: None equivalent to \"%03.1f\"\n tight_layout: bool, if True, fig.tight_layout() is called.\n ticklabelsize: float\n ticklabel: 1d array-like\n aspect:\n ... Adding a color bar may distort the aspect ratio. Fix it.\n if aspect == 'equal':\n ax.set_aspect('equal')\n location\n color\n kwargs\n\n Returns\n -------\n cb:\n \"\"\"\n fig = ax.get_figure()\n\n # number of values\n n = np.asarray(values).size\n # get min/max values\n vmin, vmax = np.nanmin(values), np.nanmax(values)\n # vmin, vmax = 0, len(values)\n\n\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n sm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)\n sm.set_array([]) # dummy mappable\n\n # make an axis instance for a colorbar\n ## divider.append_axes(location, size=size, pad=pad) creates an Axes\n ## s.t. the size of the cax becomes 'size' (e.g.'5%') of the ax.\n divider = axes_grid.make_axes_locatable(ax)\n cax = divider.append_axes(location, size=size, pad=pad)\n\n\n if option == 'scientific':\n if fformat is not None:\n global sfmt\n sfmt.fformat = fformat\n\n cb = fig.colorbar(sm, cax=cax, format=sfmt, **kwargs)\n reset_sfmt()\n else:\n cb = fig.colorbar(sm, cax=cax, **kwargs)\n\n if ticklabel is not None:\n cb.ax.set_yticklabels(ticklabel)\n\n if label is not None:\n if fontsize is None:\n cb.set_label(label, color=color)\n else:\n cb.set_label(label, fontsize=fontsize, color=color)\n if ticklabelsize is not None:\n cb.ax.tick_params(labelsize=ticklabelsize)\n\n # Adding a color bar may distort the aspect ratio. Fix it.\n if aspect == 'equal':\n ax.set_aspect('equal')\n\n # Adding a color bar may disport the overall balance of the figure. Fix it.\n if tight_layout:\n fig.tight_layout()\n return cb\n\n# def add_colorbar_alone(fig=None, ax=None, ax_loc=[0.05, 0.80, 0.9, 0.15], vmin=0, vmax=1, cmap=cmap, orientation='horizontal',\n# label=None, fontsize=__fontsize__, *kwargs):\n# \"\"\"\n# Add a colorbar alone to a canvas.\n# Use a specified figure and axis object if given. Otherwise, create one at location \"ax_loc\"\n# Parameters\n# ----------\n# fig\n# ax\n# ax_loc\n# vmin\n# vmax\n# cmap\n# orientation\n# label\n#\n# Returns\n# -------\n# ax: axis object\n# cb: colorbarbase object\n#\n# \"\"\"\n#\n#\n# if fig is None:\n# fig = plt.gcf()\n# if ax is None:\n# ax = fig.add_axes(ax_loc)\n# norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n# cb = mpl.colorbar.ColorbarBase(ax, cmap=cmap,\n# norm=norm,\n# orientation=orientation)\n# if label is not None:\n# cb.set_label(label, fontsize=fontsize)\n# return ax, cb\n\n\n\n\n\ndef colorbar(fignum=None, label=None, fontsize=__fontsize__):\n \"\"\"\n Use is DEPRECIATED. This method is replaced by add_colorbar(mappable)\n I keep this method for old codes which might have used this method\n Parameters\n ----------\n fignum :\n label :\n\n Returns\n -------\n \"\"\"\n fig, ax = set_fig(fignum)\n c = plt.colorbar()\n if not label==None:\n c.set_label(label, fontsize=fontsize)\n return c\n\n\n### Axes\n# Label\ndef labelaxes(ax, xlabel, ylabel, **kwargs):\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n# multi-color labels\ndef labelaxes_multicolor(ax, list_of_strings, list_of_colors, axis='x', anchorpad=0, **kwargs):\n \"\"\"this function creates axes labels with multiple colors\n ax specifies the axes object where the labels should be drawn\n list_of_strings is a list of all of the text items\n list_if_colors is a corresponding list of colors for the strings\n axis='x', 'y', or 'both' and specifies which label(s) should be drawn\"\"\"\n from matplotlib.offsetbox import AnchoredOffsetbox, TextArea, HPacker, VPacker\n\n # x-axis label\n if axis == 'x' or axis == 'both':\n boxes = [TextArea(text, textprops=dict(color=color, ha='left', va='bottom', **kwargs))\n for text, color in zip(list_of_strings, list_of_colors)]\n xbox = HPacker(children=boxes, align=\"center\", pad=0, sep=5)\n anchored_xbox = AnchoredOffsetbox(loc=3, child=xbox, pad=anchorpad, frameon=False, bbox_to_anchor=(0.2, -0.09),\n bbox_transform=ax.transAxes, borderpad=0.)\n ax.add_artist(anchored_xbox)\n\n # y-axis label\n if axis == 'y' or axis == 'both':\n boxes = [TextArea(text, textprops=dict(color=color, ha='left', va='bottom', rotation=90, **kwargs))\n for text, color in zip(list_of_strings[::-1], list_of_colors)]\n ybox = VPacker(children=boxes, align=\"center\", pad=0, sep=5)\n anchored_ybox = AnchoredOffsetbox(loc=3, child=ybox, pad=anchorpad, frameon=False, bbox_to_anchor=(-0.2, 0.4),\n bbox_transform=ax.transAxes, borderpad=0.)\n ax.add_artist(anchored_ybox)\n\n\n\n# Limits\ndef setaxes(ax, xmin, xmax, ymin, ymax):\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n return ax\n\n## Set axes to semilog or loglog\ndef tosemilogx(ax=None, **kwargs):\n if ax == None:\n ax = plt.gca()\n ax.set_xscale(\"log\", **kwargs)\ndef tosemilogy(ax=None, **kwargs):\n if ax == None:\n ax = plt.gca()\n ax.set_yscale(\"log\", **kwargs)\ndef tologlog(ax=None, **kwargs):\n if ax == None:\n ax = plt.gca()\n ax.set_xscale(\"log\", **kwargs)\n ax.set_yscale(\"log\", **kwargs)\n\n# Ticks\ndef set_xtick_interval(ax, tickint):\n \"\"\"\n Sets x-tick interval as tickint\n Parameters\n ----------\n ax: Axes object\n tickint: float, tick interval\n\n Returns\n -------\n\n \"\"\"\n ax.xaxis.set_major_locator(ticker.MultipleLocator(tickint))\n\ndef set_ytick_interval(ax, tickint):\n \"\"\"\n Sets y-tick interval as tickint\n Parameters\n ----------\n ax: Axes object\n tickint: float, tick interval\n\n Returns\n -------\n\n \"\"\"\n ax.yaxis.set_major_locator(ticker.MultipleLocator(tickint))\n\n\n\n##Title\ndef title(ax, title, **kwargs):\n \"\"\"\n ax.set_title(title, **kwargs)\n ... if you want more space for the tile, try \"pad=50\"\n\n Parameters\n ----------\n ax\n title\n subplot\n kwargs\n\n Returns\n -------\n\n \"\"\"\n ax.set_title(title, **kwargs)\n\ndef suptitle(title, fignum=None,\n tight_layout=True,\n rect=[0, 0.03, 1, 0.95],\n **kwargs):\n \"\"\"\n Add a centered title to the figure.\n If fignum is given, it adds a title, then it reselects the figure which selected before this method was called.\n ... this is because figure class does not have a suptitle method.\n ...\n Parameters\n ----------\n title\n fignum\n kwargs\n\n Returns\n -------\n\n \"\"\"\n if fignum is not None:\n plt.figure(fignum)\n fig = plt.gcf()\n\n plt.suptitle(title, **kwargs)\n if tight_layout:\n fig.tight_layout(rect=rect)\n\n\n\n\n##Text\ndef set_standard_pos(ax):\n \"\"\"\n Sets standard positions for added texts in the plot\n left: 0.025, right: 0.75\n bottom: 0.10 top: 0.90\n xcenter: 0.5 ycenter:0.5\n Parameters\n ----------\n ax\n\n Returns\n -------\n top, bottom, right, left, xcenter, ycenter: float, position\n\n \"\"\"\n left_margin, right_margin, bottom_margin, top_margin = 0.025, 0.75, 0.1, 0.90\n\n xleft, xright = ax.get_xlim()\n ybottom, ytop = ax.get_ylim()\n width, height = np.abs(xright - xleft), np.abs(ytop - ybottom)\n\n if ax.get_xscale() == 'linear':\n left, right = xleft + left_margin * width, xleft + right_margin * width\n xcenter = xleft + width/2.\n if ax.get_yscale() == 'linear':\n bottom, top = ybottom + bottom_margin * height, ybottom + top_margin * height\n ycenter = ybottom + height / 2.\n\n if ax.get_xscale() == 'log':\n left, right = xleft + np.log10(left_margin * width), xleft + np.log10(right_margin * width)\n xcenter = xleft + np.log10(width/2.)\n\n if ax.get_yscale() == 'log':\n bottom, top = ybottom + np.log10(bottom_margin * height), ybottom + np.log10(top_margin * height)\n ycenter = ybottom + np.log10(height / 2.)\n\n return top, bottom, right, left, xcenter, ycenter, height, width\n\n\ndef addtext(ax, text='text goes here', x=0, y=0, color='k',\n option=None, npartition=15, **kwargs):\n \"\"\"\n Adds text to a plot. You can specify the position where the texts will appear by 'option'\n | tl2 tc2 tr2 |\n | tl tc tr |\n | tl3 tc3 tr3 |\n | |\n | cl2 |\n | cl cc cr |\n | cl3 |\n | |\n | bl2 br2 |\n | bl bc br |\n | bl3 br3 |\n\n Parameters\n ----------\n ax\n subplot\n text\n x\n y\n fontsize\n color\n option: default locations\n kwargs\n\n Returns\n ax : with a text\n -------\n\n \"\"\"\n top, bottom, right, left, xcenter, ycenter, height, width = set_standard_pos(ax)\n dx, dy = width / npartition, height / npartition\n\n if type(option) in [tuple or list or np.ndarray]:\n x, y = option[0], option[1]\n option = None\n\n if option == None:\n ax.text(x, y, text, color=color, **kwargs)\n if option == 'tr':\n ax.text(right, top, text, color=color, **kwargs)\n if option == 'tr2':\n ax.text(right, top + dy, text, color=color, **kwargs)\n if option == 'tr3':\n ax.text(right, top - dy, text, color=color, **kwargs)\n if option == 'tl':\n ax.text(left, top, text, color=color, **kwargs)\n if option == 'tl2':\n ax.text(left, top + dy, text, color=color, **kwargs)\n if option == 'tl3':\n ax.text(left, top - dy, text, color=color, **kwargs)\n\n if option == 'tc':\n ax.text(xcenter, top, text, color=color, **kwargs)\n if option == 'tc2':\n ax.text(xcenter, top + dy, text, color=color, **kwargs)\n if option == 'tc3':\n ax.text(xcenter, top - dy, text, color=color, **kwargs)\n if option == 'br':\n ax.text(right, bottom, text, color=color, **kwargs)\n if option == 'br2':\n ax.text(right, bottom + dy, text, color=color, **kwargs)\n if option == 'br3':\n ax.text(right, bottom - dy, text, color=color, **kwargs)\n if option == 'bl':\n ax.text(left, bottom, text, color=color, **kwargs)\n if option == 'bl2':\n ax.text(left, bottom + dy, text, color=color, **kwargs)\n if option == 'bl3':\n ax.text(left, bottom - dy, text, color=color, **kwargs)\n if option == 'bc':\n ax.text(xcenter, bottom, text, color=color, **kwargs)\n if option == 'bc2':\n ax.text(xcenter, bottom + dy, text, color=color, **kwargs)\n if option == 'bc3':\n ax.text(xcenter, bottom - dy, text, color=color, **kwargs)\n if option == 'cr':\n ax.text(right, ycenter, text, color=color, **kwargs)\n if option == 'cl':\n ax.text(left, ycenter, text, color=color, **kwargs)\n if option == 'cl2':\n ax.text(left, ycenter + dy, text, color=color, **kwargs)\n if option == 'cl3':\n ax.text(left, ycenter - dy, text, color=color, **kwargs)\n if option == 'cc':\n ax.text(xcenter, ycenter, text, color=color, **kwargs)\n return ax\n\ndef draw_power_triangle(ax, x, y, exponent, w=None, h=None, facecolor='none', edgecolor='r', alpha=1.0, flip=False,\n fontsize=__fontsize__, set_base_label_one=False, beta=20, zorder=100, **kwargs):\n \"\"\"\n Draws a triangle which indicates a power law in the log-log plot.\n\n Parameters\n ----------\n ax: matplotlib.axes._subplots.AxesSubplot object\n ... get it like plt.gca()\n x: float / int\n ... x coordinate of the triangle drawn on the plot\n y: float / int\n ... x coordinate of the triangle drawn on the plot\n exponent: float / int\n ... exponent of the power law\n ... Y = X^exponent\n w: float / int\n ... number of decades for the drawn triangle to span on the plot\n ... By default, this function draws a triangle with size of 0.4 times the width of the plot\n h: float / int\n ... number of decades for the drawn triangle to span on the plot\n facecolor: str\n ... face color of the drawn triangle, default: 'none' (transparent)\n ... passed to mpatches.PathPatch object\n edgecolor: str\n ... edge color of the drawn triangle, default: 'r'\n ... passed to mpatches.PathPatch object\n alpha: float [0, 1]\n ... alpha value of the drawn triangle\n flip: bool\n ... If True, it will flip the triangle horizontally.\n fontsize: float / int\n ... fontsize of the texts to indicate the exponent aside the triangle\n set_base_label_one: bool, default: False\n ... If True, it will always annotate the base as '1' and alter the text for the height accordingly.\n ... By default, it will annotate the base and the height using the closest integer pair.\n beta: float / int, default: 20\n ... This is used to control the spacing between the text and the drawn triangle\n ... The higher beta is, the less spacing between the text and the triangle\n zorder: zorder of triangle, default: 0\n kwargs: the other kwargs will be passed to ax.text()\n\n Returns\n -------\n\n \"\"\"\n def simplest_fraction_in_interval(x, y):\n \"\"\"Return the fraction with the lowest denominator in [x,y].\"\"\"\n if x == y:\n # The algorithm will not terminate if x and y are equal.\n raise ValueError(\"Equal arguments.\")\n elif x < 0 and y < 0:\n # Handle negative arguments by solving positive case and negating.\n return -simplest_fraction_in_interval(-y, -x)\n elif x <= 0 or y <= 0:\n # One argument is 0, or arguments are on opposite sides of 0, so\n # the simplest fraction in interval is 0 exactly.\n return Fraction(0)\n else:\n # Remainder and Coefficient of continued fractions for x and y.\n xr, xc = modf(1 / x);\n yr, yc = modf(1 / y);\n if xc < yc:\n return Fraction(1, int(xc) + 1)\n elif yc < xc:\n return Fraction(1, int(yc) + 1)\n else:\n return 1 / (int(xc) + simplest_fraction_in_interval(xr, yr))\n\n def approximate_fraction(x, e):\n \"\"\"Return the fraction with the lowest denominator that differs\n from x by no more than e.\"\"\"\n return simplest_fraction_in_interval(x - e, x + e)\n\n xmin, xmax = ax.get_xlim()\n ymin, ymax = ax.get_ylim()\n exp_xmax, exp_xmin = np.log10(xmax), np.log10(xmin)\n exp_ymax, exp_ymin = np.log10(ymax), np.log10(ymin)\n exp_x, exp_y = np.log10(x), np.log10(y)\n\n # Default size of the triangle is 0.4 times the width of the plot\n if w is None and h is None:\n exp_w = (exp_xmax - exp_xmin) * 0.4\n exp_h = exp_w * exponent\n elif w is None and h is not None:\n exp_h = h\n exp_w = exp_h / exponent\n elif w is not None and h is None:\n exp_w = w\n exp_h = exp_w * exponent\n else:\n exp_w = w\n exp_h = h\n\n w = 10 ** (exp_x + exp_w) - 10 ** exp_x # base of the triangle\n h = 10 ** (exp_y + exp_h) - 10 ** exp_y # height of the triangle\n if not flip:\n path = mpl.path.Path([[x, y], [x + w, y], [x + w, y + h], [x, y]])\n else:\n path = mpl.path.Path([[x, y], [x, y + h], [x + w, y + h], [x, y]])\n patch = mpatches.PathPatch(path, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha, zorder=zorder)\n ax.add_patch(patch)\n\n\n # annotate\n # beta = 20. # greater beta corresponds to less spacing between the texts and the triangle edges\n if exponent >= 0 and not flip:\n x_base, y_base = 10 ** (exp_x + exp_w * 0.5), 10 ** (exp_y - (exp_ymax - exp_ymin) / beta)\n x_height, y_height = 10 ** (exp_w + exp_x + 0.4*(exp_xmax - exp_xmin) / beta), 10 ** (exp_y + exp_h * 0.5)\n elif exponent < 0 and not flip:\n x_base, y_base = 10 ** (exp_x + exp_w * 0.5), 10 ** (exp_y + 0.3*(exp_ymax - exp_ymin) / beta)\n x_height, y_height = 10 ** (exp_w + exp_x + 0.4*(exp_xmax - exp_xmin) / beta), 10 ** (exp_y + exp_h * 0.5)\n elif exponent >= 0 and flip:\n x_base, y_base = 10 ** (exp_x + exp_w * 0.4), 10 ** (exp_y + exp_h + 0.3*(exp_ymax - exp_ymin) / beta)\n x_height, y_height = 10 ** (exp_x - (exp_xmax - exp_xmin) / beta), 10 ** (exp_y + exp_h * 0.5)\n else:\n x_base, y_base = 10 ** (exp_x + exp_w * 0.5), 10 ** (exp_y + exp_h - (exp_ymax - exp_ymin) / beta)\n x_height, y_height = 10 ** (exp_x - (exp_xmax - exp_xmin) / beta), 10 ** (exp_y + exp_h * 0.6)\n\n\n if set_base_label_one:\n ax.text(x_base, y_base, '1', fontsize=fontsize)\n ax.text(x_height, y_height, '%.2f' % exponent, fontsize=fontsize)\n else:\n # get the numbers to put on the graph to indicate the power\n exponent_rational = approximate_fraction(exponent, 0.0001)\n ax.text(x_base, y_base, str(np.abs(exponent_rational.denominator)), fontsize=fontsize, **kwargs)\n ax.text(x_height, y_height, str(np.abs(exponent_rational.numerator)), fontsize=fontsize, **kwargs)\n\n\n\n##Clear plot\ndef clf(fignum=None):\n plt.figure(fignum)\n plt.clf()\ndef close(*argv, **kwargs):\n plt.close(*argv, **kwargs)\n\n## Color cycle\ndef skipcolor(numskip, color_cycle=__color_cycle__):\n \"\"\" Skips numskip times in the color_cycle iterator\n Can be used to reset the color_cycle\"\"\"\n for i in range(numskip):\n next(color_cycle)\ndef countcolorcycle(color_cycle = __color_cycle__):\n return sum(1 for color in color_cycle)\n\ndef get_default_color_cycle():\n return __color_cycle__\n\ndef get_first_n_colors_from_color_cycle(n):\n color_list = []\n for i in range(n):\n color_list.append(next(__color_cycle__))\n return color_list\n\ndef get_first_n_default_colors(n):\n return __def_colors__[:n]\n\n\ndef apply_custom_cyclers(ax, color=['r', 'b', 'g', 'y'], linestyle=['-', '-', '-', '-'], linewidth=[3, 3, 3, 3],\n marker=['o', 'o', 'o', 'o'], s=[0,0,0,0], **kwargs):\n\n \"\"\"\n This is a simple example to apply a custom cyclers for particular plots.\n ... This simply updates the rcParams so one must call this function BEFORE ceration of the plots.\n ... e.g.\n fig, ax = set_fig(1, 111)\n apply_custom_cyclers(ax, color=['r', 'b', 'g', 'y'])\n ax.plot(x1, y1)\n ax.plot(x2, y2)\n ...\n\n Parameters\n ----------\n ax: mpl.axes.Axes instance\n color: list of strings, color\n linewidths: list of float values, linewidth\n linestyles: list of strings, linestyle\n marker: list of strings, marker\n s: list of float values, marker size\n\n Returns\n -------\n None\n\n \"\"\"\n custom_cycler = cycler(color=color) + cycler(linestyle=linestyle) + cycler(lw=linewidth) + cycler(marker=marker) + cycler(markersize=s)\n ax.set_prop_cycle(custom_cycler)\n\n\ndef create_cmap_using_values(colors=None, color1='greenyellow', color2='darkgreen', color3=None, n=100):\n \"\"\"\n Create a colormap instance from a list\n ... same as mpl.colors.LinearSegmentedColormap.from_list()\n Parameters\n ----------\n colors\n color1\n color2\n n\n\n Returns\n -------\n\n \"\"\"\n if colors is None:\n colors = get_color_list_gradient(color1=color1, color2=color2, color3=color3, n=n)\n cmap_name = 'new_cmap'\n newcmap = mpl.colors.LinearSegmentedColormap.from_list(cmap_name, colors, N=n)\n return newcmap\n\n\ndef get_colors_and_cmap_using_values(values, cmap=None, color1='greenyellow', color2='darkgreen', color3=None,\n vmin=None, vmax=None, n=100):\n \"\"\"\n Returns colors (list), cmap instance, mpl.colors.Normalize instance assigned by the\n ...\n\n Parameters\n ----------\n values: 1d array-like,\n cmap: str or matplotlib.colors.Colormap instance\n color1:\n color2\n vmin\n vmax\n n\n\n Returns\n -------\n colors, cmap, norm\n\n \"\"\"\n values = np.asarray(values)\n if vmin is None:\n vmin = np.nanmin(values)\n if vmax is None:\n vmax = np.nanmax(values)\n if cmap is None:\n cmap = create_cmap_using_values(color1=color1, color2=color2, color3=color3, n=n)\n else:\n cmap = plt.get_cmap(cmap, n)\n # normalize\n # vmin, vmax = np.nanmin(values), np.nanmax(values)\n norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)\n colors = cmap(norm(values))\n return colors, cmap, norm\n\ndef get_color_list_gradient(color1='greenyellow', color2='darkgreen', color3=None, n=100, return_cmap=False):\n \"\"\"\n Returns a list of colors in RGB between color1 and color2\n Input (color1 and color2) can be RGB or color names set by matplotlib\n ... color1-color2-color3\n\n Parameters\n ----------\n color1\n color2\n n: length of the returning list\n\n Returns\n -------\n color_list\n \"\"\"\n if color3 is None:\n # convert color names to rgb if rgb is not given as arguments\n if not color1[0] == '#':\n color1 = cname2hex(color1)\n if not color2[0] == '#':\n color2 = cname2hex(color2)\n color1_rgb = hex2rgb(color1) / 255. # np array\n color2_rgb = hex2rgb(color2) / 255. # np array\n\n r = np.linspace(color1_rgb[0], color2_rgb[0], n)\n g = np.linspace(color1_rgb[1], color2_rgb[1], n)\n b = np.linspace(color1_rgb[2], color2_rgb[2], n)\n color_list = list(zip(r, g, b))\n else:\n # convert color names to rgb if rgb is not given as arguments\n if not color1[0] == '#':\n color1 = cname2hex(color1)\n if not color2[0] == '#':\n color2 = cname2hex(color2)\n if not color3[0] == '#':\n color3 = cname2hex(color3)\n color1_rgb = hex2rgb(color1) / 255. # np array\n color2_rgb = hex2rgb(color2) / 255. # np array\n color3_rgb = hex2rgb(color3) / 255. # np array\n\n n_middle = int((n-1)/2)\n\n r1 = np.linspace(color1_rgb[0], color2_rgb[0], n_middle, endpoint=False)\n g1 = np.linspace(color1_rgb[1], color2_rgb[1], n_middle, endpoint=False)\n b1 = np.linspace(color1_rgb[2], color2_rgb[2], n_middle, endpoint=False)\n color_list1 = list(zip(r1, g1, b1))\n\n r2 = np.linspace(color2_rgb[0], color3_rgb[0], n-n_middle)\n g2 = np.linspace(color2_rgb[1], color3_rgb[1], n-n_middle)\n b2 = np.linspace(color2_rgb[2], color3_rgb[2], n-n_middle)\n color_list2 = list(zip(r2, g2, b2))\n color_list = color_list1 + color_list2\n if return_cmap:\n cmap = create_cmap_using_values(colors=color_list, n=n)\n return color_list, cmap\n else:\n return color_list\n\ndef get_color_from_cmap(cmap='viridis', n=10, lut=None, reverse=False):\n \"\"\"\n A simple function which returns a list of RGBA values from a cmap (evenly spaced)\n ... If one desires to assign a color based on values, use get_colors_and_cmap_using_values()\n ... If one prefers to get colors between two colors of choice, use get_color_list_gradient()\n Parameters\n ----------\n cmapname: str, standard cmap name\n n: int, number of colors\n lut, int,\n ... If lut is not None it must be an integer giving the number of entries desired in the lookup table,\n and name must be a standard mpl colormap name.\n\n Returns\n -------\n colors\n\n \"\"\"\n cmap = mpl.cm.get_cmap(cmap, lut)\n if reverse:\n cmap = cmap.reversed()\n colors = cmap(np.linspace(0, 1, n, endpoint=True))\n return colors\n\ndef hex2rgb(hex):\n \"\"\"\n Converts a HEX code to RGB in a numpy array\n Parameters\n ----------\n hex: str, hex code. e.g. #B4FBB8\n\n Returns\n -------\n rgb: numpy array. RGB\n\n \"\"\"\n h = hex.strip('#')\n rgb = np.asarray(list(int(h[i:i + 2], 16) for i in (0, 2, 4)))\n return rgb\n\ndef cname2hex(cname):\n \"\"\"\n Converts a color registered on matplotlib to a HEX code\n Parameters\n ----------\n cname\n\n Returns\n -------\n\n \"\"\"\n colors = dict(mpl.colors.BASE_COLORS, **mpl.colors.CSS4_COLORS) # dictionary. key: names, values: hex codes\n try:\n hex = colors[cname]\n return hex\n except NameError:\n print(cname, ' is not registered as default colors by matplotlib!')\n return None\n\ndef set_default_color_cycle(name='tab10', n=10, colors=None):\n \"\"\"\n Sets a color cycle for plotting\n\n sns_palettes = ['deep', 'muted', 'bright', 'pastel', 'dark', 'colorblind'] # sns_palettes\n matplotlab cmap names: 'tab10' (default cmap of mpl), 'tab20', 'Set1', 'Set2' etc.\n (https://matplotlib.org/stable/tutorials/colors/colormaps.html)\n ... One may specify the color cycles using the existing color maps (seaborn and matplotlib presets)\n or a list of colors specified by a user.\n ... For the presets, pass a name of the colormap like \"tab10\" (mpl default), \"muted\" (seaborn defualt)\n ... For a more customized color cycle, pass a list of colors to 'colors'.\n\n Parameters\n ----------\n name: str, name of the cmap\n n: int, number of colors\n colors: list, a list of colors like ['r', 'b', 'g', 'magenta']\n\n Returns\n -------\n None\n \"\"\"\n if colors is None:\n colors = sns.color_palette(name, n_colors=n)\n sns.set_palette(colors)\n\ndef set_color_cycle(cmapname='tab10', ax=None, n=10, colors=None):\n \"\"\"\n Sets a color cycle of a particular Axes instance\n\n sns_palettes = ['deep', 'muted', 'bright', 'pastel', 'dark', 'colorblind'] # sns_palettes\n matplotlab cmap names: 'tab10' (default cmap of mpl), 'tab20', 'Set1', 'Set2' etc.\n (https://matplotlib.org/stable/tutorials/colors/colormaps.html)\n ... One may specify the color cycles using the existing color maps (seaborn and matplotlib presets)\n or a list of colors specified by a user.\n ... For the presets, pass a name of the colormap like \"tab10\" (mpl default), \"muted\" (seaborn defualt)\n ... For a more customized color cycle, pass a list of colors to 'colors'.\n\n Parameters\n ----------\n cmapname: str, name of the cmap like 'viridis', 'jet', etc.\n n: int, number of colors\n colors: list, a list of colors like ['r', 'b', 'g', 'magenta']\n\n Returns\n -------\n None\n \"\"\"\n if colors is None:\n colors = sns.color_palette(cmapname, n_colors=n)\n if ax is None:\n sns.set_palette(colors)\n else:\n ax.set_prop_cycle(color=colors)\n\ndef set_color_cycle_custom(ax, colors=__def_colors__):\n \"\"\"\n Sets a color cycle using a list\n Parameters\n ----------\n ax\n colors: list of colors in rgb/cnames/hex codes\n\n Returns\n -------\n\n \"\"\"\n ax.set_prop_cycle(color=colors)\n\ndef set_color_cycle_gradient(ax, color1='greenyellow', color2='navy', n=10):\n colors = get_color_list_gradient(color1, color2, n=n)\n ax.set_prop_cycle(color=colors)\n\n\n\n\n# Figure settings\ndef update_figure_params(params):\n \"\"\"\n update a default matplotlib setting\n e.g. params = { 'legend.fontsize': 'x-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'x-large',\n 'axes.titlesize':'x-large',\n 'xtick.labelsize':'x-large',\n 'ytick.labelsize':'x-large'}\n Parameters\n ----------\n params: dictionary\n\n Returns\n -------\n\n \"\"\"\n pylab.rcParams.update(params)\n\ndef reset_figure_params():\n pylab.rcParams.update(params)\n\ndef default_figure_params():\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n# Use the settings above as a default\nreset_figure_params()\n\n## 3D plotting\ndef set_axes_equal(ax):\n '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\n\n Input\n ax: a matplotlib axis, e.g., as output from plt.gca().\n '''\n\n x_limits = ax.get_xlim3d()\n y_limits = ax.get_ylim3d()\n z_limits = ax.get_zlim3d()\n\n x_range = abs(x_limits[1] - x_limits[0])\n x_middle = np.mean(x_limits)\n y_range = abs(y_limits[1] - y_limits[0])\n y_middle = np.mean(y_limits)\n z_range = abs(z_limits[1] - z_limits[0])\n z_middle = np.mean(z_limits)\n\n # The plot bounding box is a sphere in the sense of the infinity\n # norm, hence I call half the max range the plot radius.\n plot_radius = 0.5*max([x_range, y_range, z_range])\n\n ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])\n ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])\n ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])\n\n\n\n# plotting styles\ndef show_plot_styles():\n \"\"\"Prints available plotting styles\"\"\"\n style_list = ['default'] + sorted(style for style in plt.style.available)\n print(style_list)\n return style_list\ndef use_plot_style(stylename):\n \"\"\"Reminder for me how to set a plotting style\"\"\"\n plt.style.use(stylename)\n\n#\ndef get_markers():\n \"\"\"Returns a list of available markers for ax.scatter()\"\"\"\n filled_markers = list(Line2D.filled_markers)\n unfilled_markers = [m for m, func in Line2D.markers.items()\n if func != 'nothing' and m not in Line2D.filled_markers]\n markers = filled_markers + unfilled_markers\n return markers\n\n# Embedded plots\ndef add_subplot_axes(ax, rect, axisbg='w', alpha=1, **kwargs):\n \"\"\"\n Creates a sub-subplot inside the subplot (ax)\n rect: list, [x, y, width, height] e.g. rect = [0.2,0.2,0.7,0.7]\n\n Parameters\n ----------\n ax\n rect: list, [x, y, width, height] e.g. rect = [0.2,0.2,0.7,0.7]\n axisbg: background color of the newly created axes object\n\n Returns\n -------\n subax, Axes class object\n \"\"\"\n\n fig = plt.gcf()\n box = ax.get_position()\n width = box.width\n height = box.height\n inax_position = ax.transAxes.transform(rect[0:2])\n transFigure = fig.transFigure.inverted()\n infig_position = transFigure.transform(inax_position)\n x = infig_position[0]\n y = infig_position[1]\n width *= rect[2]\n height *= rect[3]\n subax = fig.add_axes([x, y, width, height], **kwargs)\n subax.set_facecolor(axisbg)\n subax.patch.set_alpha(alpha)\n x_labelsize = subax.get_xticklabels()[0].get_size()\n y_labelsize = subax.get_yticklabels()[0].get_size()\n x_labelsize *= rect[2]**0.3\n y_labelsize *= rect[3]**0.3\n subax.xaxis.set_tick_params(labelsize=x_labelsize)\n subax.yaxis.set_tick_params(labelsize=y_labelsize)\n return subax\n\n\n# sketches\ndef draw_circle(ax, x, y, r, linewidth=1, edgecolor='r', facecolor='none', fill=False, **kwargs):\n \"\"\"\n Draws a circle in a figure (ax)\n Parameters\n ----------\n ax\n x\n y\n r\n linewidth\n edgecolor\n facecolor\n fill\n\n Returns\n -------\n\n \"\"\"\n circle = plt.Circle((x, y), r, linewidth=linewidth, edgecolor=edgecolor, facecolor=facecolor, fill=fill, **kwargs)\n ax.add_artist(circle)\n return circle\n\ndef draw_rectangle(ax, x, y, width, height, angle=0.0, linewidth=1, edgecolor='r', facecolor='none', **kwargs):\n \"\"\"\n Draws a rectangle in a figure (ax)\n Parameters\n ----------\n ax\n x\n y\n width\n height\n angle\n linewidth\n edgecolor\n facecolor\n kwargs\n\n Returns\n -------\n\n \"\"\"\n rect = mpatches.Rectangle((x, y), width, height, angle=angle, linewidth=linewidth, edgecolor=edgecolor,\n facecolor=facecolor, **kwargs)\n ax.add_patch(rect)\n ax.axis('equal') # this ensures to show the rectangle if the rectangle is bigger than the original size\n return rect\n\n\ndef draw_box(ax, xx, yy, w_box=351., h_box=351., xoffset=0, yoffset=0, linewidth=5,\n scalebar=True, sb_length=50., sb_units='$mm$', sb_loc=(0.95, 0.1), sb_txtloc=(0.0, 0.4),\n sb_lw=10, sb_txtcolor='white', fontsize=None,\n facecolor='k', fluidcolor=None,\n bounding_box=True, bb_lw=1, bb_color='w'):\n \"\"\"\n Draws a box and fills the surrounding area with color (default: skyblue)\n Adds a scalebar by default\n ... drawn box center coincides with the center of given grids(xx, yy)\n ... in order to shift the center of the box, use xoffset any yoffset\n Parameters\n ----------\n ax: matplotlib.axes.Axes instance\n xx: 2d numpy array\n x coordinates\n yy: 2d numpy array\n y coordinates\n w_box: float/int\n width of the box- used to be set as 325\n h_box: float/int\n height of the box- used to be set as 325\n xoffset: float/int\n real number to shift the box center in the x direction\n yoffset:\n real number to shift the box center in the x direction\n linewidth: int\n linewidth of drawn box\n scalebar: bool (default: True)\n ... draws a scalebar inside the drawn box\n sb_length: int\n ... length of the scale bar in physical units.\n ...... In principle, this can be float. If you want that, edit the code where ax.text() is called.\n ...... Generalizing to accept the float requires a format which could vary everytime, so just accept integer.\n sb_units: str\n ... units of the sb_length. Default: '$mm$'\n sb_loc: tuple, (x, y)\n ... location of the scale bar. Range: [0, 1]\n ... the units are with respect the width and height of the box\n sb_txtloc: tuple, (x, y)\n ... location of the TEXT of the scale bar. Range: [0, 1]\n ... x=0: LEFT of the scale bar, x=1: RIGHT of the scale bar\n ... y=0: LEFT of the scale bar, x=1: RIGHT of the scale bar\n\n sb_lw: float\n ... line width of the scale bar\n\n facecolor\n fluidcolor\n\n Returns\n -------\n\n \"\"\"\n xmin, xmax = np.nanmin(xx), np.nanmax(xx)\n ymin, ymax = np.nanmin(yy), np.nanmax(yy)\n # if np.nanmean(yy) > 0:\n # xc, yc = xmin + (xmax - xmin) / 2., ymin + (ymax - ymin) / 2.\n # else:\n # xc, yc = xmin + (xmax - xmin) / 2., ymin - (ymax - ymin) / 2.\n xc, yc = xmin + (xmax - xmin) / 2., ymin + (ymax - ymin) / 2.\n x0, y0 = xc - w_box / 2. + xoffset, yc - h_box / 2. + yoffset\n draw_rectangle(ax, x0, y0, w_box, h_box, linewidth=linewidth, facecolor=facecolor, zorder=0)\n if fluidcolor is not None:\n ax.set_facecolor(fluidcolor)\n\n if bounding_box:\n w, h = xmax-xmin, ymax-ymin\n draw_rectangle(ax, xmin, ymin, width=w, height=h, edgecolor=bb_color, linewidth=bb_lw)\n\n if scalebar:\n dx, dy = np.abs(xx[0, 1] - xx[0, 0]), np.abs(yy[1, 0] - yy[0, 0]) # mm/px\n\n # x0_sb, y0_sb = x0 + 0.8 * w_box, y0 + 0.1*h_box\n x1_sb, y1_sb = x0 + sb_loc[0] * w_box, y0 + sb_loc[1] * h_box\n x0_sb, y0_sb = x1_sb - sb_length, y1_sb\n if sb_loc[1] < 0.5:\n x_sb_txt, y_sb_txt = x0_sb + sb_txtloc[0] * sb_length, y0 + sb_loc[1] * h_box * sb_txtloc[1]\n else:\n x_sb_txt, y_sb_txt = x0_sb + sb_txtloc[0] * sb_length, y0 - (1 - sb_loc[1]) * h_box * sb_txtloc[1] + sb_loc[1] * h_box\n x_sb, y_sb = [x0_sb, x1_sb], [y0_sb, y1_sb]\n xmin, xmax, ymin, ymax = ax.axis()\n width, height = xmax - xmin, ymax - ymin\n ax.plot(x_sb, y_sb, linewidth=sb_lw, color=sb_txtcolor)\n if fontsize is None or fontsize>0:\n ax.text(x_sb_txt, y_sb_txt, '%d %s' % (sb_length, sb_units), color=sb_txtcolor, fontsize=fontsize)\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n\n\ndef draw_cuboid(ax, xx, yy, zz, color='c', lw=2, **kwargs):\n\n\n xmin, xmax = np.nanmin(xx), np.nanmax(xx)\n ymin, ymax = np.nanmin(yy), np.nanmax(yy)\n zmin, zmax = np.nanmin(zz), np.nanmax(zz)\n rx = [xmin, xmax]\n ry = [ymin, ymax]\n rz = [zmin, zmax]\n w, h, d = xmax - xmin, ymax - ymin, zmax - zmin\n for s, e in itertools.combinations(np.array(list(itertools.product(rx, ry, rz))), 2):\n dist = np.linalg.norm(s - e)\n if dist in [w, h, d]:\n ax.plot3D(*zip(s, e), color=color, lw=lw, **kwargs)\n\n ax.set_xlim(rx)\n ax.set_ylim(ry)\n ax.set_zlim(rz)\n set_axes_equal(ax)\n\ndef draw_sphere(ax, xc, yc, zc, r, color='r', lw=1, **kwargs):\n # draw sphere\n u, v = np.mgrid[0:2 * np.pi:20j, 0:np.pi:10j]\n x = r * np.cos(u) * np.sin(v) + xc\n y = r * np.sin(u) * np.sin(v) + yc\n z = r * np.cos(v) + zc\n ax.plot_wireframe(x, y, z, color=color, lw=lw, **kwargs)\n set_axes_equal(ax)\n\n\ndef add_color_wheel(fig=None, fignum=1, figsize=__figsize__,\n rect=[0.68, 0.65, 0.2, 0.2],\n cmap=None, cmapname='hsv',\n norm=None, values=[-np.pi, np.pi],\n n=2056,\n ring=True,\n text='Phase',\n fontsize=__fontsize__,\n ratio=1, text_loc_ratio=0.35, text_loc_angle=np.pi*1.07,\n **kwargs\n ):\n if fig is None:\n fig = plt.figure(num=fignum, figsize=figsize)\n\n subax = fig.add_axes(rect, projection='polar')\n subax._direction = 2*np.pi\n\n if cmap is None or norm is None:\n colors, cmap, norm = get_colors_and_cmap_using_values(values, cmap=cmapname, n=n)\n\n cb = mpl.colorbar.ColorbarBase(subax,\n cmap=cmap,\n norm=norm,\n orientation='horizontal')\n\n # aesthetics - get rid of border and axis labels\n cb.outline.set_visible(False)\n subax.set_axis_off()\n\n if ring:\n w = values[1] - values[0]\n subax.set_rlim([values[0] - w * ratio , values[1]]) # This makes it a color RING not a wheel (filled circle)\n # addtext(subax, text, np.pi*1.07, (values[0] - w/2.9 ), color='w', fontsize=fontsize)\n addtext(subax, text,\n text_loc_angle,\n values[0] - w * ratio * (1/ (1/text_loc_ratio * ratio + 1)), color='w', fontsize=fontsize)\n\n\n # subax2 = fig.add_axes([0.2, 0.2, 0.6, 0.6])\n # plot([0, 1], [0, 1], ax=subax2)\n # addtext(subax2, text, 0, 0, color='w', fontsize=fontsize)\n # # print(values[0], values[0] - w)\n return subax, cb\n## misc.\ndef simplest_fraction_in_interval(x, y):\n \"\"\"Return the fraction with the lowest denominator in [x,y].\"\"\"\n if x == y:\n # The algorithm will not terminate if x and y are equal.\n raise ValueError(\"Equal arguments.\")\n elif x < 0 and y < 0:\n # Handle negative arguments by solving positive case and negating.\n return -simplest_fraction_in_interval(-y, -x)\n elif x <= 0 or y <= 0:\n # One argument is 0, or arguments are on opposite sides of 0, so\n # the simplest fraction in interval is 0 exactly.\n return Fraction(0)\n else:\n # Remainder and Coefficient of continued fractions for x and y.\n xr, xc = modf(1/x);\n yr, yc = modf(1/y);\n if xc < yc:\n return Fraction(1, int(xc) + 1)\n elif yc < xc:\n return Fraction(1, int(yc) + 1)\n else:\n return 1 / (int(xc) + simplest_fraction_in_interval(xr, yr))\n\ndef approximate_fraction(x, e):\n \"\"\"Return the fraction with the lowest denominator that differs\n from x by no more than e.\"\"\"\n return simplest_fraction_in_interval(x - e, x + e)\n\ndef get_mask4erroneous_pts(x, y, thd=1):\n \"\"\"\n Retruns a mask that can be sued to hide erroneous data points for 1D plots\n ... e.g. x[mask] and y[mask] hide the jumps which appear to be false to human eyes\n ... Uses P = dy/dx / y to determine whether data points appear to be false\n If P is high, we'd expect a jump. thd is a threshold of P.\n\n Parameters\n ----------\n x: 1d array\n y: 1d array\n thd: float, threshold on P (fractional dy/dx)\n\n Returns\n -------\n mask: 1d bool array\n\n \"\"\"\n # remove nans\n keep_x, keep_y = ~np.isnan(x), ~np.isnan(y)\n keep = keep_x * keep_y\n x, y = x[keep], y[keep]\n\n fractional_dydx = np.gradient(y, x) / y\n reasonable_rate_of_change = np.abs(fractional_dydx) < thd # len(reasonable_rate_of_change) is not necessarily equal to len(keep)\n reasonable_rate_of_change = np.roll(reasonable_rate_of_change, 1) # shift the resulting array (the convention of np.gradient)\n keep[keep] = reasonable_rate_of_change\n return keep\n\ndef tight_layout(fig, rect=[0, 0.03, 1, 0.95]):\n \"\"\"\n Reminder for myself how tight_layout works with the ect option\n fig.tight_layout(rect=rect)\n Parameters\n ----------\n fig\n rect\n\n Returns\n -------\n \"\"\"\n fig.tight_layout(rect=rect)\n\n# data extraction from fig\ndef get_scatter_data_from_fig(fig, axis_number=0):\n \"\"\"\n Return x, y data of scattered data in a figure instance\n ... It requires a different code to extract scattered data from a Figure instance, compared to plt.plot() output (type: line?)\n ... Scattered data are stored as an collections.collection object.\n\n Parameters\n ----------\n fig: matplotlib.Figure object\n axis_number: int, number to specify which axis user refers to. ax = fig.axes[axis_number]\n\n Returns\n -------\n data_list: list, each element of a list is a 2d array which store x,y coordinates of the scattered data points\n \"\"\"\n\n n_col = len(fig.axes[axis_number].collections)\n data_list = []\n for i in range(n_col):\n data_list.append(fig.axes[axis_number].collections[i].get_offsets())\n return data_list\n\ndef get_plot_data_from_fig(fig, axis_number=0):\n \"\"\"\n Returns a list of data included in the figure\n ... this function extracts data points for fig.ax.lines\n ... Any other data must be returned\n\n Parameters\n ----------\n fig\n axis_number\n\n Returns\n -------\n\n \"\"\"\n nlines = len(fig.axes[axis_number].lines)\n xlist, ylist = [], []\n for i in range(nlines):\n x = fig.axes[axis_number].lines[i]._xorig\n y = fig.axes[axis_number].lines[i]._yorig\n xlist.append(x)\n ylist.append(y)\n return xlist, ylist\n\n## Interactive plotting\nclass LineDrawer(object):\n \"\"\"\n Class which allows users to draw lines/splines by clicking pts on the plot\n ... Default: lines/splines are closed.\n ... make sure that matplotlib backend is interactive\n\n Procedure for self.draw_lines() or self.draw_splines:\n It uses plt.ginput()\n 1. Add a point by a left click\n 2. Remove a point by a right click\n 3. Stop interaction (move onto the next line to draw)\n\n Example\n # Pass matplotlib.axes._subplots.AxesSubplot object whose coordinates are used for extracting pts\n ld = LineDrawer(ax)\n\n # Draw lines/splines\n ld.draw_lines(n=5) # Draw 5 lines (connecting 5 set of points)\n # ld.draw_splines(n=2) # Or draw 2 splines based on the clicked points\n\n xs, ys = ld.xs, ld.ys # Retrieve x and y coords of pts used to draw lines/splines\n # xis, yis = ld.xis, ld.yis # Retrieve x and y coords for each spline\n\n # plot the first contour\n plt.plot(xs[0], ys[0]\n\n # for example, I could feed this contour to compute a line integral using vel.compute_circulation()\n\n\n \"\"\"\n\n def __init__(self, ax):\n self.ax = ax\n\n def get_contour(self, npt=100, close=True):\n ax = self.ax\n xy = plt.ginput(npt)\n\n x = [p[0] for p in xy]\n y = [p[1] for p in xy]\n # line = ax.scatter(x,y, marker='x', s=20, zorder=100)\n # ax.figure.canvas.draw()\n # self.lines.append(line)\n\n if close:\n # append the starting x,y coordinates\n x = np.r_[x, x[0]]\n y = np.r_[y, y[0]]\n\n self.x = x\n self.y = y\n\n return x, y\n\n def draw_lines(self, n=1, close=True):\n ax = self.ax\n xs, ys = [], []\n for i in range(n):\n x, y = self.get_contour(close=close)\n xs.append(x)\n ys.append(y)\n\n ax.plot(x, y)\n\n self.xs = xs\n self.ys = ys\n\n def spline_fit(self, x, y, n=1000):\n from scipy import interpolate\n # fit splines to x=f(u) and y=g(u), treating both as periodic. also note that s=0\n # is needed in order to force the spline fit to pass through all the input points.\n tck, u = interpolate.splprep([x, y], s=0, per=True)\n\n # evaluate the spline fits for 1000 evenly spaced distance values\n xi, yi = interpolate.splev(np.linspace(0, 1, n), tck)\n\n return xi, yi\n\n def draw_splines(self, n=1, npt=100, n_sp=1000, close=True):\n ax = self.ax\n\n xs, ys = [], []\n xis, yis = [], []\n for i in range(n):\n x, y = self.get_contour(npt=npt, close=close)\n xi, yi = self.spline_fit(x, y, n=n_sp)\n\n xs.append(x)\n ys.append(y)\n\n xis.append(xi)\n yis.append(yi)\n\n # ax.plot(x, y)\n ax.plot(xi, yi)\n\n self.xs = xs\n self.ys = ys\n self.xis = xis\n self.yis = yis\n\n def return_pts_on_splines(self):\n return self.xis, self.yis\n\n def close(self):\n plt.close()\n\n\nclass PointFinder(object):\n def __init__(self, ax, xx, yy, weight=None):\n self.ax = ax\n self.xx = xx\n self.yy = yy\n self.ind = None\n\n if weight is None:\n self.weight = np.ones_like(xx)\n else:\n self.weight = weight\n\n def get_pts(self, npt=100):\n def find_indices(xx, yy, xs, ys):\n xg, yg = xx[0, :], yy[:, 0]\n xmin, xmax, ymin, ymax = xg.min(), xg.max(), yg.min(), yg.max()\n # i_list, j_list = [], []\n inds = []\n for n in range(len(xs)):\n if xs[n] > xmin and xs[n] < xmax and ys[n] > ymin and ys[n] < ymax:\n\n X = np.abs(xg - xs[n])\n Y = np.abs(yg - ys[n])\n j = int(np.where(X == X.min())[0])\n i = int(np.where(Y == Y.min())[0])\n # i_list.append(i)\n # j_list.append(j)\n else:\n i, j = np.nan, np.nan\n inds.append(np.asarray([i, j]))\n return inds\n\n\n ax = self.ax\n xy = plt.ginput(npt)\n x = [p[0] for p in xy]\n y = [p[1] for p in xy]\n\n inds = find_indices(self.xx, self.yy, x, y)\n self.ind = inds\n self.x = x\n self.y = y\n return x, y, inds\n\n def find_local_center_of_mass(self, kernel_radius=2):\n def get_subarray(arr, i, j, kernel_radius):\n arr = np.asarray(arr)\n nrows, ncols = arr.shape\n\n imax = i + kernel_radius\n imin = i - kernel_radius\n jmax = j + kernel_radius\n jmin = j - kernel_radius\n\n\n if imax >= nrows:\n imax = nrows - 1\n if imin < 0:\n imin = 0\n if jmax >= ncols:\n jmax = ncols - 1\n if jmin < 0:\n jmin = 0\n subarr = arr[imin:imax, jmin:jmax]\n return subarr\n\n xcs, ycs = [], []\n\n\n for n, idx in enumerate(self.ind):\n if ~np.isnan(idx[0]):\n xx_sub = get_subarray(self.xx, idx[0], idx[1], kernel_radius=kernel_radius)\n yy_sub = get_subarray(self.yy, idx[0], idx[1], kernel_radius=kernel_radius)\n weight_sub = get_subarray(self.weight, idx[0], idx[1], kernel_radius=kernel_radius)\n\n xc = np.nansum(xx_sub * weight_sub) / np.nansum(weight_sub)\n yc = np.nansum(yy_sub * weight_sub) / np.nansum(weight_sub)\n else:\n xc, yc = np.nan, np.nan\n xcs.append(xc)\n ycs.append(yc)\n\n self.ax.scatter([xc], [yc], marker='x', color='k')\n self.xc = xcs\n self.yc = ycs\n\n return xcs, ycs\n\n def get_local_center_of_mass(self, npt=100, kernel_radius=2):\n x, y, inds = self.get_pts(npt=npt)\n xcs, ycs = self.find_local_center_of_mass(kernel_radius=kernel_radius)\n return xcs, ycs\n\n # def get_local_center_of_mass(self, weight, kernel_size=3):\n # from scipy import ndimage\n # import numpy as np\n # arr_conv = ndimage.generic_filter(weight, np.nanmean, size=kernel_size,\n # mode='constant', cval=np.NaN)\n\n## backend\ndef get_current_backend():\n gui = mpl.get_backend()\n print(gui)\n return gui\n\ndef list_available_backends():\n current_backend = mpl.get_backend()\n\n gui_backends = [i for i in mpl.rcsetup.interactive_bk]\n non_gui_backends = mpl.rcsetup.non_interactive_bk\n # gui_env = ['TKAgg', 'GTKAgg', 'Qt4Agg', 'WXAgg']\n\n backends = gui_backends + non_gui_backends\n\n available_backends = []\n\n print (\"Non Gui backends are:\", non_gui_backends)\n print (\"Gui backends I will test for\", gui_backends)\n for backend in backends:\n try:\n mpl.use(backend, warn=False, force=True)\n available_backends.append(backend)\n except:\n continue\n print('Available backends:')\n print(available_backends)\n\n mpl.use(current_backend)\n print(\"Currently using:\", mpl.get_backend() )\n\ndef use_backend(name='agg'):\n mpl.use(name)\n\n\n# smooth a curve using convolution\ndef smooth1d(x, window_len=11, window='hanning', log=False):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with a given signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the beginning and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n\n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.filter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth() only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n if log:\n x = np.log(x)\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n if not log:\n return y[(window_len//2-1):(window_len//2-1)+len(x)]\n else:\n return np.exp(y[(window_len // 2 - 1):(window_len // 2 - 1) + len(x)])\n\ndef add_secondary_xaxis(ax, functions=None, loc='top', label='', log=False, **kwargs):\n \"\"\"\n Adds a secondary x-axis at the top\n ... Must pass a pair of mapping functions between a current x and a new x\n\n e.g.\n def deg2rad(x):\n return x * np.pi / 180\n def rad2deg(x):\n return x * 180 / np.pi\n add_secondary_xaxis(ax, functions=(deg2rad, rad2deg))\n\n Parameters\n ----------\n ax\n functions\n\n Returns\n -------\n secax\n\n \"\"\"\n if functions is None:\n print('add_secondary_xaxis: supply a mapping function (Current X to New X) and its inverse function')\n print('... e.g. (deg2rad, rad2deg)')\n\n def f1(x):\n return 2 * x\n\n def f2(x):\n return x / 2\n\n functions = (f1, f2)\n secax = ax.secondary_xaxis(location=loc, functions=functions)\n secax.set_xlabel(label, **kwargs)\n if log:\n secax.set_xscale(\"log\")\n return secax\n\ndef add_secondary_yaxis(ax, functions=None, loc='right', label='', log=False, **kwargs):\n \"\"\"\n Adds a secondary yaxis at the top\n ... Must pass a pair of mapping functions between a current x and a new x\n\n e.g.\n def deg2rad(y):\n return y * np.pi / 180\n def rad2deg(y):\n return y * 180 / np.pi\n add_secondary_yaxis(ax, functions=(deg2rad, rad2deg))\n\n Parameters\n ----------\n ax\n functions\n\n Returns\n -------\n secax\n\n \"\"\"\n if functions is None:\n print('add_secondary_xaxis: supply a mapping function (Current X to New X) and its inverse function')\n print('... e.g. (deg2rad, rad2deg)')\n\n def f1(x):\n return 2 * x\n\n def f2(x):\n return x / 2\n\n functions = (f1, f2)\n secax = ax.secondary_yaxis(location=loc, functions=functions)\n secax.set_ylabel(label, **kwargs)\n if log:\n secax.set_yscale(\"log\")\n return secax\n\n\ndef use_symmetric_ylim(ax):\n bottom, top = ax.get_ylim()\n if bottom * top < 0:\n bottom, top = -np.max([-bottom, top]), np.max([-bottom, top])\n ax.set_ylim(bottom=bottom, top=top)\n return ax\n\n\ndef get_binned_stats(arg, var, n_bins=100, mode='linear', bin_center=True, return_std=False):\n \"\"\"\n Make a histogram out of a pair of 1d arrays.\n ... Returns arg_bins, var_mean, var_err\n ... The given arrays could contain nans and infs. They will be ignored.\n\n Parameters\n ----------\n arg: 1d array, controlling variable\n var: 1d array, data array to be binned\n n_bins: int, default: 100\n mode: str, deafult: 'linear'\n If 'linear', var will be sorted to equally spaced bins. i.e. bin centers increase linearly.\n If 'log', the bins will be not equally spaced. Instead, they will be equally spaced in log.\n ... bin centers will be like... 10**0, 10**0.5, 10**1.0, 10**1.5, ..., 10**9\n return_std: bool\n If True, it returns the STD of the statistics instead of the error = STD / np.sqrt(N-1)\n Returns\n -------\n arg_bins: 1d array, bin centers\n var_mean: 1d array, mean values of data in each bin\n var_err: 1d array, std of data in each bin\n\n \"\"\"\n\n def sort2arr(arr1, arr2):\n \"\"\"\n Sort arr1 and arr2 using the order of arr1\n e.g. a=[2,1,3], b=[9,1,4] -> a[1,2,3], b=[1,9,4]\n Parameters\n ----------\n arr1\n arr2\n\n Returns\n -------\n Sorted arr1, and arr2\n\n \"\"\"\n arr1, arr2 = list(zip(*sorted(zip(arr1, arr2))))\n return np.asarray(arr1), np.asarray(arr2)\n\n def get_mask_for_nan_and_inf(U):\n \"\"\"\n Returns a mask for nan and inf values in a multidimensional array U\n Parameters\n ----------\n U: N-d array\n\n Returns\n -------\n\n \"\"\"\n U = np.array(U)\n U_masked_invalid = ma.masked_invalid(U)\n return U_masked_invalid.mask\n\n arg, var = np.asarray(arg), np.asarray(var)\n\n # make sure rr and corr do not contain nans\n mask1 = get_mask_for_nan_and_inf(arg)\n mask1 = ~mask1\n mask2 = get_mask_for_nan_and_inf(var)\n mask2 = ~mask2\n mask = mask1 * mask2\n\n if mode == 'log':\n argmin, argmax = np.nanmin(arg), np.nanmax(arg)\n mask_for_log10arg = get_mask_for_nan_and_inf(np.log10(arg))\n exp_min, exp_max = np.nanmin(np.log10(arg)[~mask_for_log10arg]), np.nanmax(np.log10(arg)[~mask_for_log10arg])\n exp_interval = (exp_max - exp_min) / n_bins\n exp_bin_centers = np.linspace(exp_min, exp_max, n_bins)\n exp_bin_edges = np.append(exp_bin_centers, exp_max + exp_interval) - exp_interval / 2.\n bin_edges = 10 ** (exp_bin_edges)\n bins = bin_edges\n mask_for_arg = get_mask_for_nan_and_inf(bins)\n bins = bins[~mask_for_arg]\n else:\n bins = n_bins\n\n # get a histogram\n if not bin_center:\n arg_means, arg_edges, binnumber = binned_statistic(arg[mask], arg[mask], statistic='mean', bins=bins)\n var_mean, bin_edges, binnumber = binned_statistic(arg[mask], var[mask], statistic='mean', bins=bins)\n var_err, _, _ = binned_statistic(arg[mask], var[mask], statistic='std', bins=bins)\n counts, _, _ = binned_statistic(arg[mask], var[mask], statistic='count', bins=bins)\n\n # bin centers\n if mode == 'log':\n bin_centers = 10 ** ((exp_bin_edges[:-1] + exp_bin_edges[1:]) / 2.)\n else:\n binwidth = (bin_edges[1] - bin_edges[0])\n bin_centers = bin_edges[1:] - binwidth / 2\n\n # Sort arrays\n if bin_center:\n arg_bins, var_mean = sort2arr(bin_centers, var_mean)\n arg_bins, var_err = sort2arr(bin_centers, var_err)\n else:\n arg_bins, var_mean = sort2arr(arg_means, var_mean)\n arg_bins, var_err = sort2arr(arg_means, var_err)\n if return_std:\n return arg_bins, var_mean, var_err\n else:\n return arg_bins, var_mean, var_err / np.sqrt(counts)\n\n\ndef make_ax_symmetric(ax):\n \"\"\"\n Makes the y-axis symmetric about x=0\n\n Parameters\n ----------\n ax: axes.Axes instance\n\n Returns\n -------\n None\n\n \"\"\"\n ymin, ymax = ax.get_ylim()\n yabs = max(-ymin, ymax)\n ax.set_ylim(-yabs, yabs)\n\ndef make_ticks_scientific(ax):\n \"\"\"\n Make tick labels display in a scientific format\n\n Some other useful lines about tick formats\n ax.set_xticks(np.arange(0, 1.1e-3, 0.5e-3))\n ax.set_yticks(np.arange(0, 1.1e-3, 0.25e-3))\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n ax.xaxis.offsetText.set_fontsize(20)\n ax.yaxis.offsetText.set_fontsize(20)\n\n Parameters\n ----------\n ax\n\n Returns\n -------\n\n \"\"\"\n ax.ticklabel_format(style='sci', scilimits=(0, 0))\n\n\ndef color_axis(ax, locs=['bottom', 'top', 'left'], colors=['t', 'r', 'b', 'g'],\n xlabel_color=None, ylabel_color=None,\n xtick_color=None, ytick_color=None):\n for loc, color in zip(locs, colors):\n ax.spines[loc].set_color(color)\n if loc in ['top', 'bottom'] and xlabel_color is None:\n xlabel_color = color\n elif loc in ['right', 'left'] and ylabel_color is None:\n ylabel_color = color\n if xlabel_color is None: xlabel_color = 'k'\n if ylabel_color is None: ylabel_color = 'k'\n\n # match tick colors with the label colors\n if xtick_color is None: xtick_color = xlabel_color\n if ytick_color is None: ytick_color = ylabel_color\n\n ax.xaxis.label.set_color(xlabel_color)\n ax.tick_params(axis='x', colors=xtick_color)\n ax.xaxis.label.set_color(xlabel_color)\n ax.tick_params(axis='y', colors=ytick_color)\n\n\ndef smooth(x, window_len=11, window='hanning', log=False):\n \"\"\"smooth the data using a window with requested size.\n\n This method is based on the convolution of a scaled window with a given signal.\n The signal is prepared by introducing reflected copies of the signal\n (with the window size) in both ends so that transient parts are minimized\n in the beginning and end part of the output signal.\n\n input:\n x: the input signal\n window_len: the dimension of the smoothing window; should be an odd integer\n window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\n flat window will produce a moving average smoothing.\n\n output:\n the smoothed signal\n\n example:\n\n t=linspace(-2,2,0.1)\n x=sin(t)+randn(len(t))*0.1\n y=smooth(x)\n\n see also:\n\n numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve\n scipy.signal.filter\n\n TODO: the window parameter could be the window itself if an array instead of a string\n NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.\n \"\"\"\n\n if x.ndim != 1:\n raise ValueError(\"smooth() only accepts 1 dimension arrays.\")\n\n if x.size < window_len:\n raise ValueError(\"Input vector needs to be bigger than window size.\")\n\n if window_len < 3:\n return x\n\n if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError(\"Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'\")\n\n if log:\n x = np.log(x)\n\n s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]\n # print(len(s))\n if window == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window + '(window_len)')\n\n y = np.convolve(w / w.sum(), s, mode='valid')\n if not log:\n return y[(window_len//2-1):(window_len//2-1)+len(x)]\n else:\n return np.exp(y[(window_len // 2 - 1):(window_len // 2 - 1) + len(x)])\n\ndef pc2float(x):\n \"\"\"\n Converts a percentage expression (str) to float\n e.g. pc2float(5.2%) returns 0.0052\n Parameters\n ----------\n x: str, e.g. \"5.2%\"\n\n Returns\n -------\n a floating number (e.g. 0.0052)\n \"\"\"\n return float(x.strip('%'))/100.\n\ndef float2pc(x):\n \"\"\"\n Converts a float into a percentage expression\n Parameters\n ----------\n x\n\n Returns\n -------\n a string in float (e.g. 0.0052)\n \"\"\"\n return \"{0}%\".format(x * 100.)\n\n\ndef simple_legend(ax, **kwargs):\n \"Removes the errorbars from the legend\"\n from matplotlib import container\n handles, labels = ax.get_legend_handles_labels()\n handles = [h[0] if isinstance(h, container.ErrorbarContainer) else h for h in handles]\n ax.legend(handles, labels, **kwargs)"
] | [
[
"numpy.nanmax",
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.cumsum",
"numpy.arctan2",
"numpy.searchsorted",
"numpy.exp",
"matplotlib.patches.PathPatch",
"matplotlib.pylab.rcParams.update",
"numpy.sin",
"matplotlib.pyplot.Circle",
"numpy.nansum",
"numpy.diff",
"numpy.log",
"matplotlib.path.Path",
"matplotlib.colors.LightSource",
"numpy.delete",
"numpy.log10",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.suptitle",
"numpy.sum",
"numpy.gradient",
"matplotlib.pyplot.ginput",
"matplotlib.patches.FancyArrowPatch",
"numpy.isinf",
"matplotlib.offsetbox.VPacker",
"matplotlib.ticker.MultipleLocator",
"numpy.nanmedian",
"numpy.asarray",
"matplotlib.ticker.ScalarFormatter.__init__",
"numpy.concatenate",
"numpy.nanmean",
"scipy.stats.binned_statistic",
"scipy.optimize.curve_fit",
"scipy.ndimage.zoom",
"numpy.argmax",
"matplotlib.pyplot.style.use",
"numpy.min",
"numpy.arccos",
"matplotlib.get_backend",
"matplotlib.pyplot.show",
"matplotlib.pyplot.cm.ScalarMappable",
"matplotlib.patches.FancyArrowPatch.draw",
"numpy.ones",
"matplotlib.cm.get_cmap",
"numpy.empty",
"numpy.linspace",
"matplotlib.pyplot.get_cmap",
"numpy.round",
"numpy.mean",
"numpy.zeros_like",
"numpy.roll",
"matplotlib.pyplot.gcf",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.pyplot.close",
"matplotlib.lines.Line2D.markers.items",
"matplotlib.offsetbox.HPacker",
"matplotlib.collections.LineCollection",
"numpy.isnan",
"matplotlib.patches.Rectangle",
"scipy.interpolate.splprep",
"matplotlib.pyplot.Normalize",
"numpy.logical_or",
"matplotlib.pyplot.cla",
"numpy.linalg.norm",
"matplotlib.colors.Normalize",
"numpy.cos",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"numpy.ma.masked_invalid",
"numpy.nanmin",
"numpy.max",
"matplotlib.patches.FancyArrowPatch.__init__",
"matplotlib.pyplot.gca",
"matplotlib.patches.Patch",
"scipy.interpolate.UnivariateSpline",
"numpy.ones_like",
"numpy.arange",
"numpy.stack",
"matplotlib.pyplot.subplot",
"scipy.interpolate.interp1d",
"matplotlib.offsetbox.AnchoredOffsetbox",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.colors.ListedColormap",
"matplotlib.rcParams.update",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.logical_and",
"matplotlib.markers.MarkerStyle",
"numpy.abs",
"matplotlib.use"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
DESY-CBPF-UERJ/ANATools | [
"8e0aaca4e653ee8a0ba662c91d8fa1aa34951bfd"
] | [
"examples/efficiency_plots.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gs\nimport anatools.data as data\nimport anatools.analysis as ana\nana.start()\n\n#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n# This is not a complete code. You need to generate the datasets and the lists below: \n# dataframes - list of pandas dataframes\n# labels - list of strings setting the labels\n# colors - list of strings setting the colors\n#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n\n#=================================================================================================================\n# Set up the figure and the subplots grid\n#=================================================================================================================\nfig1 = plt.figure(figsize=(20,6))\ngrid = [2, 3]\ngs1 = gs.GridSpec(grid[0], grid[1], height_ratios=[4, 1])\n\n\n#=================================================================================================================\nN = 1\n#=================================================================================================================\n#==================================================\nax1 = plt.subplot(ana.position(gs1,grid,N,1)) # Positioning at subplot 1 of the plot number 1\n#==================================================\nvar = \"LeadingLep_pt\"\nbins = np.linspace(0,600,61)\nyratio, ye_below, ye_above = ana.efficiency_plot( ax1, var, df_400_100, \"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ\", label=\"binomial\", color='black', bins=bins, histograms=True, uncertainty=\"binomial\" )\nana.labels(ax1, xlabel=r\"$\\mathrm{Leading\\ } p_T^l\\ [\\mathrm{GeV}]$\", ylabel=r'Efficiency') # Set up the label names\nana.style(ax1, lumi=35.9, year=2016, legend_ncol=1, legend_loc='center right') # Set up the plot style and information on top\n\n\n#=================================================================================================================\nN = 2\n#=================================================================================================================\n#==================================================\nax1 = plt.subplot(ana.position(gs1,grid,N,1)) # Positioning at subplot 1 of the plot number 1\n#==================================================\nvar = \"LeadingLep_pt\"\nbins = np.linspace(0,600,61)\nyratio, ye_below, ye_above = ana.efficiency_plot( ax1, var, df_400_100, \"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ\", label=\"bayesian\", color='black', bins=bins, histograms=True )\nana.labels(ax1, xlabel=r\"$\\mathrm{Leading\\ } p_T^l\\ [\\mathrm{GeV}]$\", ylabel=r'Efficiency') # Set up the label names\nana.style(ax1, lumi=35.9, year=2016, legend_ncol=1, legend_loc='center right') # Set up the plot style and information on top\n\n\n#=================================================================================================================\nN = 3\n#=================================================================================================================\n#==================================================\nax1 = plt.subplot(ana.position(gs1,grid,N,1)) # Positioning at subplot 1 of the plot number 1\n#==================================================\nvar = \"LeadingLep_pt\"\nbins = np.linspace(0,600,61)\nyratio, ye_below, ye_above = ana.efficiency_plot( ax1, var, df_400_100, \"HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ\", label=\"HLT_Ele23_Ele12\", color='blue', bins=bins )\nyratio, ye_below, ye_above = ana.efficiency_plot( ax1, var, df_400_100, \"HLT_DoubleEle33_CaloIdL_GsfTrkIdVL_MW\", label=\"HLT_DoubleEle33\", color='red', bins=bins )\nana.labels(ax1, xlabel=r\"$\\mathrm{Leading\\ } p_T^l\\ [\\mathrm{GeV}]$\", ylabel=r'Efficiency') # Set up the label names\nana.style(ax1, lumi=35.9, year=2016, legend_ncol=1, legend_loc='lower right') # Set up the plot style and information on top\n\n\n\n#=================================================================================================================\n# Make final setup, save and show plots\n#=================================================================================================================\nplt.subplots_adjust(left=0.055, bottom=0.115, right=0.96, top=0.95, wspace=0.35, hspace=0.0)\nplt.savefig('efficiency_plots.png')\nplt.savefig('efficiency_plots.pdf')\nplt.show()\n\n\n\n"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.savefig",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cenkcorapci/fashion-tagger | [
"059cb893ecc7ef31c6365501308e7c8229d8debe"
] | [
"experiment.py"
] | [
"import argparse\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom commons.config import STYLES_DATA_SET_PATH\nfrom commons.data_utils import get_target_list\nfrom experiments.fashion_tagger_experiment import FashionClassifierExperiment\nfrom models.fashion_tagger import FashionTagger\n\nusage_docs = \"\"\"\n--epochs <integer> Number of epochs\n--val_split <float> Set validation split(between 0 and 1)\n--batch_size <int> Batch size for training\n\"\"\"\n\nparser = argparse.ArgumentParser(usage=usage_docs)\n\nparser.add_argument('--epochs', type=int, default=10)\nparser.add_argument('--val_split', type=float, default=.1)\nparser.add_argument('--batch_size', type=int, default=128)\n\nargs = parser.parse_args()\n\n\ndef get_df():\n df = pd.read_csv(STYLES_DATA_SET_PATH, error_bad_lines=False)\n df = df.dropna()\n df['image'] = df.apply(lambda row: str(row['id']) + \".jpg\", axis=1)\n return df\n\n\ndf = get_df()\n\nexclude_list = ['Travel', 'Smart Casual', 'Home', 'Party']\ndf = df.drop(columns=['productDisplayName', 'year'])\n\nfor exclude in exclude_list:\n df = df.loc[df.masterCategory != exclude]\n\ncolumn_list = ['gender', 'masterCategory', 'subCategory', 'articleType', 'baseColour', 'season']\nfor column in tqdm(column_list, desc='Pruning attributes by counts'):\n contain_list = []\n for index, row in df.groupby(column).count().iterrows():\n if row['id'] >= 2000:\n contain_list.append(index)\n df = df.loc[df[column].isin(contain_list)]\n\ndf = df.drop(columns=['id'])\ntargets = get_target_list(df)\nmodel = FashionTagger(len(targets))\nexperiment = FashionClassifierExperiment(df,\n targets,\n 'dense_net_201_fashion_attribute_tagger',\n model,\n val_split=args.val_split,\n nb_epochs=args.epochs,\n batch_size=args.batch_size)\nexperiment.train_model()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ClementPla/Retinal-Lesions-Segmentation | [
"20fa4ac8eae24814470095bb6e7f08d6751c4e11"
] | [
"tools/losses.py"
] | [
"import random\n\nimport torch\nimport torch.nn as nn\nfrom nntools.nnet import register_loss\n\n\nclass MultiLabelSoftBinaryCrossEntropy(nn.Module):\n def __init__(self, smooth_factor: float = 0, weighted: bool = True,\n mcb: bool = False, hp_lambda: int = 10,\n epsilon: float = 0.1, logits=True,\n first_class_bg=False):\n super(MultiLabelSoftBinaryCrossEntropy, self).__init__()\n self.smooth_factor = smooth_factor\n self.logits = logits\n if logits:\n self.criterion = nn.BCEWithLogitsLoss(reduction='none' if weighted else 'mean')\n else:\n self.criterion = nn.BCELoss(reduction='none' if weighted else 'mean')\n self.weighted = weighted\n self.hp_lambda = hp_lambda\n self.MCB = mcb\n self.epsilon = epsilon\n self.first_class_bg = first_class_bg\n\n def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:\n if y_pred.size() != y_true.size():\n \"\"\"\n Case in which y_pred.shape == b x c+1 x h x w and y_true.shape == b x c x h x w\n \"\"\"\n y_pred = y_pred[:, 1:] # We don't consider the first class (assuming it is background)\n\n b, c, h, w = y_true.shape\n y_true = y_true.float()\n\n if self.smooth_factor:\n smooth = random.uniform(0, self.smooth_factor)\n soft_targets = (1 - y_true) * smooth + y_true * (1 - smooth)\n else:\n soft_targets = y_true\n\n bce_loss = self.criterion(y_pred, soft_targets)\n\n if self.weighted and not self.MCB:\n N = h * w\n weights = y_true.sum(dim=(2, 3), keepdim=True) / N\n betas = 1 - weights\n bce_loss = y_true * bce_loss * betas + (1 - y_true) * bce_loss * weights\n bce_loss = bce_loss.sum() / (b * N)\n\n if self.weighted and self.MCB:\n Ypos = y_true.sum(dim=(0, 2, 3), keepdim=False)\n mcb_loss = 0\n for i, k in enumerate(Ypos):\n if self.first_class_bg and i == 0:\n tmp = (y_true[:, i] * bce_loss[:, i]).flatten(1, 2)\n mcb_loss += torch.topk(tmp, k=self.hp_lambda*25, dim=1, sorted=False).values.mean()\n\n else:\n tmp = ((1 - y_true[:, i]) * bce_loss[:, i]).flatten(1, 2)\n topk = max(min((k * self.hp_lambda) // b, (1 - y_true[:, i]).sum() // b), self.hp_lambda)\n ik = torch.topk(tmp, k=int(topk), dim=1, sorted=False).values\n # We can't compute a \"k\" per image on the batch, so we take an average value\n # (limitation of the topk function)\n\n beta_k = (ik.shape[1] / (k/b + ik.shape[1] + self.epsilon))\n # For the same reason, beta_k is batch-wise, not image-wise.\n # The original paper defines a single beta instead of beta_k; the rational of this choice is unclear.\n # On the other hand, here beta_k=lambda/(1+lambda)\n mcb_loss += (ik * (1 - beta_k)).mean() # Negative loss\n tmp = y_true[:, i] * bce_loss[:, i] # Positive Loss\n mcb_loss += (tmp * beta_k).sum() / (y_true[:, i].sum() + self.epsilon)\n bce_loss = mcb_loss\n\n return bce_loss\n\n\nregister_loss('MultiLabelSoftBinaryCrossEntropy', MultiLabelSoftBinaryCrossEntropy)\n\n\nclass MultiDatasetCrossEntropy(nn.Module):\n def __init__(self, smooth_factor: float = 0, weighted: bool = True, mcb: bool = False,\n hp_lambda: int = 10, alpha=None,\n epsilon: float = 1e-5,\n criterion='CustomCrossEntropy'):\n super(MultiDatasetCrossEntropy, self).__init__()\n self.epsilon = epsilon\n self.alpha = alpha\n self.criterion = criterion\n if self.criterion == 'CustomCrossEntropy':\n self.loss = MultiLabelSoftBinaryCrossEntropy(smooth_factor, weighted, mcb, hp_lambda, epsilon, logits=False,\n first_class_bg=True)\n elif self.criterion == 'NLL':\n self.loss = nn.NLLLoss()\n\n def forward(self, y_pred: torch.Tensor, cm_predictions: list, tag: torch.Tensor, y_true: torch.Tensor):\n \"\"\"\n :param y_pred: Estimation of real labels, BxCxHxW\n :param cm_predictions: List of predicted confusion matrix. Each element of the list is a tuple.\n tuple[0] = dataset id, tuple[1] = CM tensor of size BxC**2xHxW\n :param tag: Tensor of size B\n :param y_true: Labels associated to each image (depends of the dataset): BxCxHxW\n :return:\n \"\"\"\n loss = 0.0\n regularization = 0.0\n # y_pred = torch.softmax(y_pred, 1)\n y_pred = torch.sigmoid(y_pred)\n y_background = torch.clamp(1-y_pred.max(1, keepdim=True).values, 0, 1)\n y_pred = torch.cat([y_background, y_pred], 1)\n\n y_bg_true = ~torch.any(y_true, 1, keepdim=True).long()\n y_true = torch.cat([y_bg_true, y_true], 1)\n \n if self.criterion == 'NLL':\n max_arg = torch.max(y_true, 1, keepdim=False)\n gt = max_arg.indices + 1\n gt[max_arg.values == 0] = 0\n y_true = gt\n\n for d_id, cm in cm_predictions:\n y_true_did = y_true[tag == d_id]\n y_pred_did = y_pred[tag == d_id]\n b, c, h, w = y_pred_did.shape\n y_pred_did = y_pred_did.view(b, c, h*w).permute(0, 2, 1).reshape(b*h*w, c, 1)\n cm = cm.view(b, c**2, h*w).permute(0, 2, 1)\n cm = cm.reshape(b*h*w, c**2).view(b*h*w, c, c)\n cm = cm / (cm.sum(1, keepdim=True)+self.epsilon)\n # cm = torch.sigmoid(cm)\n y_pred_n = torch.bmm(cm, y_pred_did).view(b*h*w, c)\n y_pred_n = y_pred_n.view(b, h*w, c).permute(0, 2, 1).reshape(b, c, h, w)\n loss += self.loss(torch.clamp(y_pred_n, self.epsilon, 1), y_true_did)\n regularization += torch.trace(torch.sum(cm, dim=0)) / (b*h*w)\n return loss + self.alpha*regularization\n\n\nregister_loss('MultiDatasetCrossEntropy', MultiDatasetCrossEntropy)\n\n\nclass MultiLabelToCrossEntropyLoss(nn.Module):\n def __init__(self, from_logits=False):\n super(MultiLabelToCrossEntropyLoss, self).__init__()\n\n if from_logits:\n self.loss = nn.CrossEntropyLoss()\n else:\n self.loss = nn.NLLLoss()\n\n def forward(self, y_pred, y_true):\n max_arg = torch.max(y_true, 1, keepdim=False)\n gt = max_arg.indices + 1\n gt[max_arg.values == 0] = 0\n y_true = gt\n return self.loss(y_pred, y_true)\n"
] | [
[
"torch.sigmoid",
"torch.nn.CrossEntropyLoss",
"torch.nn.NLLLoss",
"torch.max",
"torch.cat",
"torch.sum",
"torch.nn.BCELoss",
"torch.any",
"torch.nn.BCEWithLogitsLoss",
"torch.bmm",
"torch.topk",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erdeq-upenn/code_public | [
"1b4c2c79ea5f92042a94232a579b531241af672b"
] | [
"1-python/lol-champion-picker-master/lol-champion-picker-master/lol-champion-picker/tensorflow/test.py"
] | [
"import os\nimport argparse\nimport sys\n\nimport tflearn\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nTRAIN_DIR = \"./new_data/cleaned_train.txt\"\nTEST_DIR = \"./new_data/cleaned_test.txt\"\n\n# with open('./new_data/dataset.txt','r') as f:\n# lines = f.read().splitlines()\n# cleaned = lines[0][2:-2].split('], [') #[2:-2] to remove '[[' and ']]', then separate each row\n\n# data_size = len(cleaned)\n# test_set_size = int(np.floor(0.2 * data_size))\n# train_set_size = int(data_size - test_set_size)\n\n# #create new text file with cleaned train data\n# with open(TRAIN_DIR, 'w') as f:\n# for row in cleaned[:train_set_size]:\n# f.write(\"%s\\n\" % row)\n\n# #create new text file with cleaned test data\n# with open(TEST_DIR, 'w') as f:\n# for row in cleaned[train_set_size:]:\n# f.write(\"%s\\n\" % row)\n\ndef load_train_data():\n df_train = pd.read_csv(\"./tensorflow/data/training_set_15000.txt\", names=CSV_COLUMNS)\n df_train[\"opposite_result\"] = df_train[\"result\"].map(lambda x: 1 if x == 0 else 0)\n X_train = df_train.iloc[:, :-2].as_matrix()\n Y_train = labels = df_train[[\"result\", \"opposite_result\"]].as_matrix()\n return X_train, Y_train\n\ndef load_test_data():\n df_test = pd.read_csv(\"./tensorflow/data/test2.txt\", names=CSV_COLUMNS)\n df_test[\"opposite_result\"] = df_test[\"result\"].map(lambda x: 1 if x == 0 else 0)\n X_test = df_test.iloc[:, :-2].as_matrix()\n Y_test = labels = df_test[[\"result\", \"opposite_result\"]].as_matrix()\n return X_train, Y_train\n\n# Create neural network model\ndef create_model(num_features, lr=1E-3, two_fc=True, board_dir=\"\", dropout=0.5, activation=\"relu\"):\n net = tflearn.input_data(shape=[None, num_features])\n net = tflearn.fully_connected(net, 100, activation=activation)\n #two_fc: whether to use 1 hidden layer or 2\n if two_fc:\n #dropout to prevent overfitting\n net = tflearn.dropout(net, dropout)\n net = tflearn.fully_connected(net, 80, activation=activation)\n net = tflearn.fully_connected(net, 2, activation='softmax')\n net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', learning_rate=lr)\n #name of tensorboard file\n tensorboard_dir = \"/tmp/tflearn_logs/lol/\" + board_dir\n # Define model\n model = tflearn.DNN(net, tensorboard_verbose=2, tensorboard_dir=tensorboard_dir)\n return model\n\ndef fit_model(model, x_train, y_train, x_test, y_test, epoch=8):\n # Start training (apply gradient descent algorithm)\n model.fit(x_train, y_train, n_epoch=epoch, validation_set=(x_test, y_test),\n snapshot_step=100, show_metric=True, run_id=\"lol\")\n\n# Generate unique file name for tensorboard visualization\ndef make_hparam_string(learning_rate=1E-3, use_two_fc=True, epoch=5, dropout=0.5):\n layers = \"2\" if use_two_fc else \"1\"\n return \"lr=\" + str(learning_rate) + \",fc=\" + layers + \",epochs=\" + str(epoch) + \",dropout=\" + str(dropout)\n\n# Find the optimal learning rate and number of layers\ndef find_optimal_lr_and_fc():\n for learn_rate in [1E-3, 1E-4, 1E-5]:\n for use_two_fc in [True, False]:\n param_string = make_hparam_string(learning_rate=learn_rate, use_two_fc=use_two_fc)\n model = create_model(num_features=X_train.shape[1], lr=learn_rate, two_fc=use_two_fc, board_dir=param_string)\n fit_model(model, X_train, Y_train, X_test, Y_test)\n\n# Finds optimal # of epochs\ndef find_optimal_epochs():\n for epoch in [5, 6, 7, 8, 9, 10]:\n param_string = make_hparam_string(epoch=epoch)\n model = create_model(num_features=X_train.shape[1], board_dir=param_string, dropout=dropout)\n fit_model(model, X_train, Y_train, X_test, Y_test, epoch=epoch)\n # weird bug, have to reset graph every time fit_model run again\n tf.reset_default_graph()\n\nX_train, Y_train = load_train_data()\nX_test, Y_test = load_test_data()\n\n# fit_model(model, X_train, Y_train, X_test, Y_test)\n\n# find_optimal_lr_and_fc()\n# OPTIMAL LR: 1E-3\n# OPTIMAL LAYERS: Two hidden layers (0.6664 log loss vs 0.6669 log loss)\n\n# find_optimal_epochs()\n# OPTIMAL EPOCHS: Around 8 (minimal differences)\n\nFLAGS = None\n\n\ndef main(_):\n model = create_model(num_features=X_train.shape[1])\n model.load('./tensorflow/model/tflearnlol.model')\n if FLAGS.user_input:\n y = np.array(eval(FLAGS.user_input)).reshape(1, 276)\n # print(\"HI\")\n # return model.predict(y)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\") #can accept booleans as well\n parser.add_argument(\n \"--user_input\",\n type=str,\n default=\"\",\n help=\"String of an array representing new user input.\"\n )\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] | [
[
"tensorflow.reset_default_graph",
"pandas.read_csv",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ahovhannes/label-studio | [
"22707f6b908a612ecd85039b0b10cba900fbb329"
] | [
"label_studio/blueprint.py"
] | [
"import os\nimport io\nimport attr\nimport lxml\nimport time\nimport shutil\nimport flask\nimport pathlib\nimport functools\nimport logging\nimport logging.config\nimport pandas as pd\nimport traceback as tb\nimport lxml.etree\nimport label_studio\n\ntry:\n import ujson as json\nexcept ModuleNotFoundError:\n import json\n\n# setup default config for logging\nwith io.open(os.path.join(os.path.dirname(__file__), 'logger.json')) as f:\n logging.config.dictConfig(json.load(f))\n\nfrom uuid import uuid4\nfrom urllib.parse import unquote\nfrom datetime import datetime\nfrom inspect import currentframe, getframeinfo\nfrom gevent.pywsgi import WSGIServer\nfrom flask import (\n request, jsonify, make_response, Response, Response as HttpResponse,\n send_file, session, redirect, current_app, Blueprint, url_for, g\n)\nfrom flask_api import status\nfrom types import SimpleNamespace\n\nfrom label_studio.utils import uploader\nfrom label_studio.utils.io import find_dir, find_editor_files\nfrom label_studio.utils.validation import TaskValidator\nfrom label_studio.utils.exceptions import ValidationError, LabelStudioError\nfrom label_studio.utils.functions import (\n set_external_hostname, set_web_protocol, get_web_protocol,\n generate_time_series_json, generate_sample_task, get_sample_task\n)\nfrom label_studio.utils.misc import (\n exception_handler, exception_handler_page, check_port_in_use, start_browser, str2datetime,\n config_line_stripped, get_config_templates, convert_string_to_hash, serialize_class\n)\nfrom label_studio.utils.analytics import Analytics\nfrom label_studio.utils.argparser import parse_input_args\nfrom label_studio.utils.uri_resolver import resolve_task_data_uri\nfrom label_studio.utils.auth import requires_auth\nfrom label_studio.storage import get_storage_form\nfrom label_studio.project import Project\nfrom label_studio.tasks import Tasks\nfrom label_studio.utils.data_manager import prepare_tasks\n\nINPUT_ARGUMENTS_PATH = pathlib.Path(\"server.json\")\n\nlogger = logging.getLogger(__name__)\nblueprint = Blueprint(__package__, __name__,\n static_folder='static', static_url_path='/static',\n template_folder='templates')\nblueprint.add_app_template_filter(str2datetime, 'str2datetime')\n\n\[email protected](frozen=True)\nclass LabelStudioConfig:\n input_args = attr.ib()\n\n\ndef set_input_arguments_path(path):\n global INPUT_ARGUMENTS_PATH\n INPUT_ARGUMENTS_PATH = pathlib.Path(path)\n\n\[email protected]_cache(maxsize=1)\ndef config_from_file():\n try:\n config_file = INPUT_ARGUMENTS_PATH.open(encoding='utf8')\n except OSError:\n raise LabelStudioError(\"Can't open input_args file: \" + str(INPUT_ARGUMENTS_PATH) + \", \" \n \"use set_input_arguments_path() to setup it\")\n\n with config_file:\n data = json.load(config_file)\n return LabelStudioConfig(input_args=SimpleNamespace(**data))\n\n\ndef create_app(label_studio_config=None):\n \"\"\" Create application factory, as explained here:\n http://flask.pocoo.org/docs/patterns/appfactories/.\n\n :param label_studio_config: LabelStudioConfig object to use with input_args params\n \"\"\"\n app = flask.Flask(__package__, static_url_path='')\n app.secret_key = 'A0Zrdqwf1AQWj12ajkhgFN]dddd/,?RfDWQQT'\n app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n app.config['WTF_CSRF_ENABLED'] = False\n app.url_map.strict_slashes = False\n app.label_studio = label_studio_config or config_from_file()\n\n # check LabelStudioConfig correct loading\n if app.label_studio is None:\n raise LabelStudioError('LabelStudioConfig is not loaded correctly')\n\n app.register_blueprint(blueprint)\n return app\n\n\ndef project_get_or_create(multi_session_force_recreate=False):\n \"\"\" Return existed or create new project based on environment. Currently supported methods:\n - \"fixed\": project is based on \"project_name\" attribute specified by input args when app starts\n - \"session\": project is based on \"project_name\" key restored from flask.session object\n\n :param multi_session_force_recreate: create a new project if True\n :return: project\n \"\"\"\n input_args = current_app.label_studio.input_args\n if input_args and input_args.command == 'start-multi-session':\n # get user from session\n if 'user' not in session:\n session['user'] = str(uuid4())\n user = session['user']\n g.user = user\n\n # get project from session\n if 'project' not in session or multi_session_force_recreate:\n session['project'] = str(uuid4())\n project = session['project']\n\n # check for shared projects and get owner user\n if project in session.get('shared_projects', []):\n owner = Project.get_user_by_project(project, input_args.root_dir)\n if owner is None: # owner is None when project doesn't exist\n raise Exception('No such shared project found: project_uuid = ' + project)\n else:\n user = owner\n\n project_name = user + '/' + project\n return Project.get_or_create(project_name, input_args, context={\n 'multi_session': True,\n 'user': convert_string_to_hash(user)\n })\n else:\n if multi_session_force_recreate:\n raise NotImplementedError(\n '\"multi_session_force_recreate\" option supported only with \"start-multi-session\" mode')\n return Project.get_or_create(input_args.project_name,\n input_args, context={'multi_session': False})\n\n\[email protected]_request\ndef app_before_request_callback():\n # skip endpoints where no project is needed\n if request.endpoint in ('static', 'send_static'):\n return\n\n # prepare global variables\n def prepare_globals():\n # setup session cookie\n if 'session_id' not in session:\n session['session_id'] = str(uuid4())\n g.project = project_get_or_create()\n g.analytics = Analytics(current_app.label_studio.input_args, g.project)\n g.sid = g.analytics.server_id\n\n # show different exception pages for api and other endpoints\n if request.path.startswith('/api'):\n return exception_handler(prepare_globals)()\n else:\n return exception_handler_page(prepare_globals)()\n\n\[email protected]_request\n@exception_handler\ndef app_after_request_callback(response):\n if hasattr(g, 'analytics'):\n g.analytics.send(request, session, response)\n return response\n\n\[email protected]('/static/media/<path:path>')\n@requires_auth\ndef send_media(path):\n \"\"\" Static for label tool js and css\n \"\"\"\n media_dir = find_dir('static/media')\n return flask.send_from_directory(media_dir, path)\n\n\[email protected]('/static/<path:path>')\n@requires_auth\ndef send_static(path):\n \"\"\" Static serving\n \"\"\"\n static_dir = find_dir('static')\n return flask.send_from_directory(static_dir, path)\n\n\[email protected]('/data/<path:filename>')\n@requires_auth\n@exception_handler\ndef get_data_file(filename):\n \"\"\" External resource serving\n \"\"\"\n # support for upload via GUI\n if filename.startswith('upload/'):\n path = os.path.join(g.project.path, filename)\n directory = os.path.abspath(os.path.dirname(path))\n filename = os.path.basename(path)\n return flask.send_from_directory(directory, filename, as_attachment=True)\n\n # serving files from local storage\n if not g.project.config.get('allow_serving_local_files'):\n raise FileNotFoundError('Serving local files is not allowed. '\n 'Use \"allow_serving_local_files\": true config option to enable local serving')\n directory = request.args.get('d')\n return flask.send_from_directory(directory, filename, as_attachment=True)\n\n\[email protected]('/samples/time-series.csv')\n@requires_auth\ndef samples_time_series():\n \"\"\" Generate time series example for preview\n \"\"\"\n time_column = request.args.get('time', '')\n value_columns = request.args.get('values', '').split(',')\n time_format = request.args.get('tf')\n\n # separator processing\n separator = request.args.get('sep', ',')\n separator = separator.replace('\\\\t', '\\t')\n aliases = {'dot': '.', 'comma': ',', 'tab': '\\t', 'space': ' '}\n if separator in aliases:\n separator = aliases[separator]\n\n # check headless or not\n header = True\n if all(n.isdigit() for n in [time_column] + value_columns):\n header = False\n\n # generate all columns for headless csv\n if not header:\n max_column_n = max([int(v) for v in value_columns] + [0])\n value_columns = range(1, max_column_n+1)\n\n ts = generate_time_series_json(time_column, value_columns, time_format)\n csv_data = pd.DataFrame.from_dict(ts).to_csv(index=False, header=header, sep=separator).encode('utf-8')\n\n mem = io.BytesIO()\n mem.write(csv_data)\n mem.seek(0)\n return send_file(\n mem,\n as_attachment=False,\n attachment_filename='time-series.csv',\n mimetype='text/csv'\n )\n\n\[email protected]('/')\n@requires_auth\n@exception_handler_page\ndef labeling_page():\n \"\"\" Label stream for tasks\n \"\"\"\n if g.project.no_tasks():\n return redirect(url_for('label_studio.welcome_page'))\n\n # task data: load task or task with completions if it exists\n task_data = None\n task_id = request.args.get('task_id', None)\n\n if task_id is not None:\n task_id = int(task_id)\n # Task explore mode\n task_data = g.project.get_task_with_completions(task_id) or g.project.source_storage.get(task_id)\n task_data = resolve_task_data_uri(task_data, project=g.project)\n\n if g.project.ml_backends_connected:\n task_data = g.project.make_predictions(task_data)\n\n return flask.render_template(\n 'labeling.html',\n project=g.project,\n config=g.project.config,\n label_config_line=g.project.label_config_line,\n task_id=task_id,\n task_data=task_data,\n **find_editor_files()\n )\n\n\[email protected]('/welcome')\n@requires_auth\n@exception_handler_page\ndef welcome_page():\n \"\"\" On-boarding page\n \"\"\"\n g.project.update_on_boarding_state()\n return flask.render_template(\n 'welcome.html',\n config=g.project.config,\n project=g.project,\n on_boarding=g.project.on_boarding\n )\n\n\[email protected]('/tasks', methods=['GET', 'POST'])\n@requires_auth\n@exception_handler_page\ndef tasks_page():\n \"\"\" Tasks and completions page\n \"\"\"\n serialized_project = g.project.serialize()\n serialized_project['multi_session_mode'] = current_app.label_studio.input_args.command != 'start-multi-session'\n return flask.render_template(\n 'tasks.html',\n config=g.project.config,\n project=g.project,\n serialized_project=serialized_project,\n **find_editor_files()\n )\n\n\[email protected]('/setup')\n@requires_auth\n@exception_handler_page\ndef setup_page():\n \"\"\" Setup labeling config\n \"\"\"\n input_values = {}\n project = g.project\n input_args = current_app.label_studio.input_args\n\n g.project.description = project.get_config(project.name, input_args).get('description', 'Untitled')\n\n # evaluate all projects for this user: user_projects + shared_projects\n if project.config.get(\"show_project_links_in_multisession\", True) and hasattr(g, 'user'):\n user = g.user\n project_ids = g.project.get_user_projects(user, input_args.root_dir)\n\n # own projects\n project_names = [os.path.join(user, uuid) for uuid in project_ids]\n project_desc = [Project.get_config(name, input_args).get('description', 'Untitled') for name in project_names]\n own_projects = dict(zip(project_ids, project_desc))\n\n # shared projects\n shared_projects = {}\n for uuid in session.get('shared_projects', []):\n tmp_user = Project.get_user_by_project(uuid, input_args.root_dir)\n project_name = os.path.join(tmp_user, uuid)\n project_desc = Project.get_config(project_name, input_args).get('description', 'Untitled')\n shared_projects[uuid] = project_desc\n else:\n own_projects, shared_projects = {}, {}\n\n # this is useful for the transfer to playground templates\n template_mode = request.args.get('template_mode')\n page = 'includes/setup_templates.html' if template_mode else 'setup.html'\n\n templates = get_config_templates(g.project.config)\n return flask.render_template(\n page,\n config=g.project.config,\n project=g.project,\n label_config_full=g.project.label_config_full,\n templates=templates,\n input_values=input_values,\n multi_session=input_args.command == 'start-multi-session',\n own_projects=own_projects,\n shared_projects=shared_projects,\n template_mode=template_mode\n )\n\n\[email protected]('/import')\n@requires_auth\n@exception_handler_page\ndef import_page():\n \"\"\" Import tasks from JSON, CSV, ZIP and more\n \"\"\"\n return flask.render_template(\n 'import.html',\n config=g.project.config,\n project=g.project\n )\n\n\[email protected]('/export')\n@requires_auth\n@exception_handler_page\ndef export_page():\n \"\"\" Export page: export completions as JSON or using converters\n \"\"\"\n return flask.render_template(\n 'export.html',\n config=g.project.config,\n formats=g.project.converter.supported_formats,\n project=g.project\n )\n\n\[email protected]('/model')\n@requires_auth\n@exception_handler_page\ndef model_page():\n \"\"\" Machine learning backends page\n \"\"\"\n ml_backends = []\n for ml_backend in g.project.ml_backends:\n if ml_backend.connected:\n try:\n ml_backend.sync(g.project)\n training_status = ml_backend.is_training(g.project)\n ml_backend.training_in_progress = training_status['is_training']\n ml_backend.model_version = training_status['model_version']\n ml_backend.is_connected = True\n ml_backend.is_error = False\n except Exception as exc:\n logger.error(str(exc), exc_info=True)\n ml_backend.is_error = True\n try:\n # try to parse json as the result of @exception_handler\n ml_backend.error = json.loads(str(exc))\n except ValueError:\n ml_backend.error = {'detail': \"Can't parse exception message from ML Backend\"}\n\n else:\n ml_backend.is_connected = False\n ml_backends.append(ml_backend)\n return flask.render_template(\n 'model.html',\n config=g.project.config,\n project=g.project,\n ml_backends=ml_backends\n )\n\n\[email protected]('/version')\n@requires_auth\n@exception_handler\ndef version():\n \"\"\" Show LS backend and LS frontend versions\n \"\"\"\n lsf = json.load(open(find_dir('static/editor') + '/version.json'))\n ver = {\n 'label-studio-frontend': lsf,\n 'label-studio-backend': label_studio.__version__\n }\n return make_response(jsonify(ver), 200)\n\n\[email protected]('/render-label-studio', methods=['GET', 'POST'])\n@requires_auth\ndef api_render_label_studio():\n \"\"\" Label studio frontend rendering for iframe\n \"\"\"\n config = request.args.get('config', request.form.get('config', ''))\n config = unquote(config)\n if not config:\n return make_response('No config in POST', status.HTTP_417_EXPECTATION_FAILED)\n\n task_data, completions, predictions = get_sample_task(config)\n\n example_task_data = {\n 'id': 42,\n 'data': task_data,\n 'completions': completions,\n 'predictions': predictions,\n 'project': g.project.id,\n 'created_at': '2019-02-06T14:06:42.000420Z',\n 'updated_at': '2019-02-06T14:06:42.000420Z'\n }\n\n # prepare context for html\n config_line = config_line_stripped(config)\n response = {\n 'label_config_line': config_line,\n 'task_ser': example_task_data\n }\n response.update(find_editor_files())\n\n return flask.render_template('render_ls.html', **response)\n\n\[email protected]('/api/validate-config', methods=['POST'])\n@requires_auth\ndef api_validate_config():\n \"\"\" Validate label config via tags schema\n \"\"\"\n if 'label_config' not in request.form:\n return make_response('No label_config in POST', status.HTTP_417_EXPECTATION_FAILED)\n try:\n g.project.validate_label_config(request.form['label_config'])\n except ValidationError as e:\n return make_response(jsonify({'label_config': e.msg_to_list()}), status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return make_response(jsonify({'label_config': [str(e)]}), status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\[email protected]('/api/import-example', methods=['GET', 'POST'])\n@requires_auth\ndef api_import_example():\n \"\"\" Generate upload data example by config only\n \"\"\"\n # django compatibility\n request.GET = request.args\n request.POST = request.form\n config = request.GET.get('label_config', '')\n if not config:\n config = request.POST.get('label_config', '')\n try:\n g.project.validate_label_config(config)\n task_data, _, _ = get_sample_task(config)\n except (ValueError, ValidationError, lxml.etree.Error, KeyError):\n response = HttpResponse('error while example generating', status=status.HTTP_400_BAD_REQUEST)\n else:\n response = HttpResponse(json.dumps(task_data))\n return response\n\n\[email protected]('/api/import-example-file')\n@requires_auth\ndef api_import_example_file():\n \"\"\" Task examples for import\n \"\"\"\n request.GET = request.args # django compatibility\n\n q = request.GET.get('q', 'json')\n filename = 'sample-' + datetime.now().strftime('%Y-%m-%d-%H-%M')\n try:\n task = generate_sample_task(g.project)\n except (ValueError, ValidationError, lxml.etree.Error):\n return HttpResponse('error while example generating', status=status.HTTP_400_BAD_REQUEST)\n\n tasks = [task, task]\n\n if q == 'json':\n filename += '.json'\n output = json.dumps(tasks)\n\n elif q == 'csv':\n filename += '.csv'\n output = pd.read_json(json.dumps(tasks), orient='records').to_csv(index=False)\n\n elif q == 'tsv':\n filename += '.tsv'\n output = pd.read_json(json.dumps(tasks), orient='records').to_csv(index=False, sep='\\t')\n\n elif q == 'txt':\n if len(g.project.data_types.keys()) > 1:\n raise ValueError('TXT is unsupported for projects with multiple sources in config')\n\n filename += '.txt'\n output = ''\n for t in tasks:\n output += list(t.values())[0] + '\\n'\n\n else:\n raise ValueError('Incorrect format (\"q\") in request')\n\n if request.GET.get('raw', '0') == '1':\n return HttpResponse(output)\n\n response = HttpResponse(output)\n response.headers['Content-Disposition'] = 'attachment; filename=%s' % filename\n response.headers['filename'] = filename\n return response\n\n\[email protected]('/api/project', methods=['POST', 'GET', 'PATCH'])\n@requires_auth\n@exception_handler\ndef api_project():\n \"\"\" Project properties and create a new for multi-session mode\n \"\"\"\n code = 200\n input_args = current_app.label_studio.input_args\n\n # new project\n if request.method == 'POST' and request.args.get('new', False):\n input_args.web_gui_project_desc = request.args.get('desc')\n g.project = project_get_or_create(multi_session_force_recreate=True)\n code = 201\n\n # update project params, ml backend settings\n elif request.method == 'PATCH':\n g.project.update_params(request.json)\n code = 201\n\n output = g.project.serialize()\n output['multi_session_mode'] = input_args.command != 'start-multi-session'\n return make_response(jsonify(output), code)\n\n\[email protected]('/api/project/config', methods=['POST'])\n@requires_auth\ndef api_save_config():\n \"\"\" Save labeling config\n \"\"\"\n label_config = None\n if 'label_config' in request.form:\n label_config = request.form['label_config']\n elif 'label_config' in request.json:\n label_config = request.json['label_config']\n\n # check config before save\n try:\n g.project.validate_label_config(label_config)\n except ValidationError as e:\n return make_response(jsonify({'label_config': e.msg_to_list()}), status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return make_response(jsonify({'label_config': [str(e)]}), status.HTTP_400_BAD_REQUEST)\n\n # update config states\n try:\n g.project.update_label_config(label_config)\n except Exception as e:\n return make_response(jsonify({'label_config': [str(e)]}), status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_201_CREATED)\n\n\[email protected]('/api/project/import', methods=['POST'])\n@requires_auth\n@exception_handler\ndef api_import():\n \"\"\" The main API for task import, supports\n * json task data\n * files (as web form, files will be hosted by this flask server)\n * url links to images, audio, csv (if you use TimeSeries in labeling config)\n \"\"\"\n # make django compatibility for uploader module\n class DjangoRequest:\n def __init__(self): pass\n POST = request.form\n GET = request.args\n FILES = request.files\n data = request.json if request.json else request.form\n content_type = request.content_type\n\n start = time.time()\n # get tasks from request\n parsed_data, formats = uploader.load_tasks(DjangoRequest(), g.project)\n # validate tasks\n validator = TaskValidator(g.project)\n try:\n new_tasks = validator.to_internal_value(parsed_data)\n except ValidationError as e:\n return make_response(jsonify(e.msg_to_list()), status.HTTP_400_BAD_REQUEST)\n\n # get the last task id\n max_id_in_old_tasks = -1\n if not g.project.no_tasks():\n max_id_in_old_tasks = g.project.source_storage.max_id()\n\n new_tasks = Tasks().from_list_of_dicts(new_tasks, max_id_in_old_tasks + 1)\n try:\n g.project.source_storage.set_many(new_tasks.keys(), new_tasks.values())\n except NotImplementedError:\n raise NotImplementedError('Import is not supported for the current storage ' + str(g.project.source_storage))\n\n # if tasks have completion - we need to implicitly save it to target\n for i in new_tasks.keys():\n for completion in new_tasks[i].get('completions', []):\n g.project.save_completion(int(i), completion)\n\n # update schemas based on newly uploaded tasks\n g.project.update_derived_input_schema()\n g.project.update_derived_output_schema()\n\n duration = time.time() - start\n return make_response(jsonify({\n 'task_count': len(new_tasks),\n 'completion_count': validator.completion_count,\n 'prediction_count': validator.prediction_count,\n 'duration': duration,\n 'formats': formats,\n 'new_task_ids': [t for t in new_tasks]\n }), status.HTTP_201_CREATED)\n\n\[email protected]('/api/project/export', methods=['GET'])\n@requires_auth\n@exception_handler\ndef api_export():\n \"\"\" Export labeling results using label-studio-converter to popular formats\n \"\"\"\n export_format = request.args.get('format')\n now = datetime.now()\n\n os.makedirs(g.project.export_dir, exist_ok=True)\n\n zip_dir = os.path.join(g.project.export_dir, now.strftime('%Y-%m-%d-%H-%M-%S'))\n os.makedirs(zip_dir, exist_ok=True)\n g.project.converter.convert(g.project.output_dir, zip_dir, format=export_format)\n shutil.make_archive(zip_dir, 'zip', zip_dir)\n shutil.rmtree(zip_dir)\n\n zip_dir_full_path = os.path.abspath(zip_dir + '.zip')\n response = send_file(zip_dir_full_path, as_attachment=True)\n response.headers['filename'] = os.path.basename(zip_dir_full_path)\n return response\n\n\[email protected]('/api/project/next', methods=['GET'])\n@requires_auth\n@exception_handler\ndef api_generate_next_task():\n \"\"\" Generate next task for labeling page (label stream)\n \"\"\"\n # try to find task is not presented in completions\n completed_tasks_ids = g.project.get_completions_ids()\n task = g.project.next_task(completed_tasks_ids)\n if task is None:\n # no tasks found\n return make_response('', 404)\n\n task = resolve_task_data_uri(task, project=g.project)\n\n # collect prediction from multiple ml backends\n if g.project.ml_backends_connected:\n task = g.project.make_predictions(task)\n logger.debug('Next task:\\n' + str(task.get('id', None)))\n return make_response(jsonify(task), 200)\n\n\[email protected]('/api/project/storage-settings', methods=['GET', 'POST'])\n@requires_auth\n@exception_handler\ndef api_project_storage_settings():\n \"\"\" Set project storage settings: Amazon S3, Google CS, local file storages.\n Source storages store input tasks in json formats.\n Target storage store completions with labeling results\n \"\"\"\n\n # GET: return selected form, populated with current storage parameters\n if request.method == 'GET':\n # render all forms for caching in web\n all_forms = {'source': {}, 'target': {}}\n for storage_for in all_forms:\n for name, description in g.project.get_available_storage_names(storage_for).items():\n current_type = g.project.config.get(storage_for, {'type': ''})['type']\n current = name == current_type\n form_class = get_storage_form(name)\n form = form_class(data=g.project.get_storage(storage_for).get_params()) if current else form_class()\n all_forms[storage_for][name] = {\n 'fields': [serialize_class(field) for field in form],\n 'type': name, 'current': current, 'description': description,\n 'path': getattr(g.project, storage_for + '_storage').readable_path\n }\n # generate data key automatically\n if g.project.data_types.keys():\n for field in all_forms[storage_for][name]['fields']:\n if field['name'] == 'data_key' and not field['data']:\n field['data'] = list(g.project.data_types.keys())[0]\n return make_response(jsonify(all_forms), 200)\n\n # POST: update storage given filled form\n if request.method == 'POST':\n selected_type = request.args.get('type', '')\n storage_for = request.args.get('storage_for')\n current_type = g.project.config.get(storage_for, {'type': ''})['type']\n selected_type = selected_type if selected_type else current_type\n\n form = get_storage_form(selected_type)(data=request.json)\n\n if form.validate_on_submit():\n storage_kwargs = dict(form.data)\n storage_kwargs['type'] = request.json['type'] # storage type\n try:\n g.project.update_storage(storage_for, storage_kwargs)\n except Exception as e:\n traceback = tb.format_exc()\n logger.error(str(traceback))\n return make_response(jsonify({'detail': 'Error while storage update: ' + str(e)}), 400)\n else:\n return make_response(jsonify({'result': 'ok'}), 201)\n else:\n logger.error('Errors: ' + str(form.errors) + ' for request body ' + str(request.json))\n return make_response(jsonify({'errors': form.errors}), 400)\n\n\[email protected]('/api/project-switch', methods=['GET', 'POST'])\n@requires_auth\n@exception_handler\ndef api_project_switch():\n \"\"\" Switch projects in multi-session mode\n \"\"\"\n input_args = current_app.label_studio.input_args\n\n if request.args.get('uuid') is None:\n return make_response(\"Not a valid UUID\", 400)\n\n uuid = request.args.get('uuid')\n user = Project.get_user_by_project(uuid, input_args.root_dir)\n\n # not owner user tries to open shared project\n if user != g.user:\n # create/append shared projects for user\n if 'shared_projects' not in session:\n session['shared_projects'] = {}\n session['shared_projects'].update({uuid: {}})\n\n # switch project\n session['project'] = uuid\n\n output = g.project.serialize()\n output['multi_session_mode'] = input_args.command == 'start-multi-session'\n if request.method == 'GET':\n return redirect(url_for('label_studio.setup_page'))\n else:\n return make_response(jsonify(output), 200)\n\n\[email protected]('/api/tasks', methods=['GET', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_all_tasks():\n \"\"\" Tasks API: retrieve by filters, delete all tasks\n \"\"\"\n # retrieve tasks (plus completions and predictions) with pagination & ordering\n if request.method == 'GET':\n # get filter parameters from request\n fields = request.values.get('fields', 'all').split(',')\n page, page_size = int(request.values.get('page', 1)), int(request.values.get('page_size', 10))\n order = request.values.get('order', 'id')\n if page < 1 or page_size < 1:\n return make_response(jsonify({'detail': 'Incorrect page or page_size'}), 422)\n\n params = SimpleNamespace(fields=fields, page=page, page_size=page_size, order=order)\n tasks = prepare_tasks(g.project, params)\n return make_response(jsonify(tasks), 200)\n\n # delete all tasks with completions\n if request.method == 'DELETE':\n g.project.delete_tasks()\n return make_response(jsonify({'detail': 'deleted'}), 204)\n\n\[email protected]('/api/tasks/<task_id>', methods=['GET', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_task_by_id(task_id):\n \"\"\" Get task by id, this call will refresh this task predictions\n \"\"\"\n task_id = int(task_id)\n\n # try to get task with completions first\n if request.method == 'GET':\n task_data = g.project.get_task_with_completions(task_id) or g.project.source_storage.get(task_id)\n task_data = resolve_task_data_uri(task_data, project=g.project)\n\n if g.project.ml_backends_connected:\n task_data = g.project.make_predictions(task_data)\n\n # change indent for pretty jsonify\n indent = 2 if request.values.get('pretty', False) else None\n response = current_app.response_class(\n json.dumps(task_data, indent=indent) + \"\\n\",\n mimetype=current_app.config[\"JSONIFY_MIMETYPE\"],\n )\n return make_response(response, 200)\n\n # delete task\n elif request.method == 'DELETE':\n g.project.remove_task(task_id)\n return make_response(jsonify('Task deleted.'), 204)\n\n\[email protected]('/api/tasks/<task_id>/completions', methods=['POST', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_tasks_completions(task_id):\n \"\"\" Save new completion or delete all completions\n \"\"\"\n task_id = int(task_id)\n\n # save completion\n if request.method == 'POST':\n completion = request.json\n\n # cancelled completion\n was_cancelled = request.values.get('was_cancelled', False)\n if was_cancelled:\n completion['was_cancelled'] = True\n\n # regular completion\n else:\n completion.pop('skipped', None) # deprecated\n completion.pop('was_cancelled', None)\n\n completion_id = g.project.save_completion(task_id, completion)\n return make_response(json.dumps({'id': completion_id}), 201)\n\n # remove all task completions\n if request.method == 'DELETE':\n if g.project.config.get('allow_delete_completions', False):\n g.project.delete_task_completions(task_id)\n return make_response('deleted', 204)\n else:\n return make_response({'detail': 'Completion removing is not allowed in server config'}, 422)\n\n\[email protected]('/api/tasks/<task_id>/completions/<completion_id>', methods=['PATCH', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_completion_by_id(task_id, completion_id):\n \"\"\" Update existing completion with patch.\n \"\"\"\n # catch case when completion is not submitted yet, but user tries to act with it\n if completion_id == 'null':\n return make_response('completion id is null', 200)\n\n task_id = int(task_id)\n completion_id = int(completion_id)\n\n # update completion\n if request.method == 'PATCH':\n completion = request.json\n completion['id'] = completion_id\n if 'was_cancelled' in completion:\n completion['was_cancelled'] = False\n\n g.project.save_completion(task_id, completion)\n return make_response('ok', 201)\n\n # delete completion\n elif request.method == 'DELETE':\n if g.project.config.get('allow_delete_completions', False):\n g.project.delete_task_completion(task_id, completion_id)\n return make_response('deleted', 204)\n else:\n return make_response({'detail': 'Completion removing is not allowed in server config'}, 422)\n\n\[email protected]('/api/completions', methods=['GET', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_all_completions():\n \"\"\" Get all completion ids\n Delete all project completions\n \"\"\"\n # delete all completions\n if request.method == 'DELETE':\n g.project.delete_all_completions()\n return make_response('done', 201)\n\n # get all completions ids\n elif request.method == 'GET':\n ids = g.project.get_completions_ids()\n return make_response(jsonify({'ids': ids}), 200)\n\n else:\n return make_response('Incorrect request method', 500)\n\n\[email protected]('/api/models', methods=['GET', 'DELETE'])\n@requires_auth\n@exception_handler\ndef api_models():\n \"\"\" List ML backends names and remove it by name\n \"\"\"\n # list all ml backends\n if request.method == 'GET':\n model_names = [model.model_name for model in g.project.ml_backends]\n return make_response(jsonify({'models': model_names}), 200)\n\n # delete specified ml backend\n if request.method == 'DELETE':\n ml_backend_name = request.json['name']\n g.project.remove_ml_backend(ml_backend_name)\n return make_response(jsonify('ML backend deleted'), 204)\n\n\[email protected]('/api/models/train', methods=['POST'])\n@requires_auth\n@exception_handler\ndef api_train():\n \"\"\" Send train signal to ML backend\n \"\"\"\n if g.project.ml_backends_connected:\n training_started = g.project.train()\n if training_started:\n logger.debug('Training started.')\n return make_response(jsonify({'details': 'Training started'}), 200)\n else:\n logger.debug('Training failed.')\n return make_response(\n jsonify('Training is not started: seems that you don\\'t have any ML backend connected'), 400)\n else:\n return make_response(jsonify(\"No ML backend\"), 400)\n\n\[email protected]('/api/models/predictions', methods=['GET', 'POST'])\n@requires_auth\n@exception_handler\ndef api_predictions():\n \"\"\" Make ML predictions using ML backends\n\n param mode: \"data\" [default] - task data will be taken and predicted from request.json\n \"all_tasks\" - make predictions for all tasks in DB\n \"\"\"\n mode = request.values.get('mode', 'data') # data | all_tasks\n if g.project.ml_backends_connected:\n\n # make prediction for task data from request\n if mode == 'data':\n if request.json is None:\n return make_response(jsonify({'detail': 'no task data found in request json'}), 422)\n\n task = request.json if 'data' in request.json else {'data': request.json}\n task_with_predictions = g.project.make_predictions(task)\n return make_response(jsonify(task_with_predictions), 200)\n\n # make prediction for all tasks\n elif mode == 'all_tasks':\n # get tasks ids without predictions\n tasks_with_predictions = {}\n for task_id, task in g.project.source_storage.items():\n task_pred = g.project.make_predictions(task)\n tasks_with_predictions[task_pred['id']] = task_pred\n\n # save tasks with predictions to storage\n g.project.source_storage.set_many(tasks_with_predictions.keys(), tasks_with_predictions.values())\n return make_response(jsonify({'details': 'predictions are ready'}), 200)\n\n # unknown mode\n else:\n return make_response(jsonify({'detail': 'unknown mode'}), 422)\n else:\n return make_response(jsonify(\"No ML backend\"), 400)\n\n\[email protected]('/api/states', methods=['GET'])\n@requires_auth\n@exception_handler\ndef stats():\n \"\"\" Save states\n \"\"\"\n return make_response('{\"status\": \"done\"}', 200)\n\n\[email protected]('/api/health', methods=['GET'])\n@requires_auth\n@exception_handler\ndef health():\n \"\"\" Health check\n \"\"\"\n return make_response('{\"status\": \"up\"}', 200)\n\n\[email protected](ValidationError)\ndef validation_error_handler(error):\n logger.error(error)\n return str(error), 500\n\n\[email protected]_template_filter('json')\ndef json_filter(s):\n return json.dumps(s)\n\n\ndef main():\n # this will avoid looped imports and will register deprecated endpoints in the blueprint\n import label_studio.deprecated\n\n input_args = parse_input_args()\n app = create_app(LabelStudioConfig(input_args=input_args))\n\n # setup logging level\n if input_args.log_level:\n logging.root.setLevel(input_args.log_level)\n\n # On `init` command, create directory args.project_name with initial project state and exit\n if input_args.command == 'init':\n Project.create_project_dir(input_args.project_name, input_args)\n return\n\n elif input_args.command == 'start':\n\n # If `start --init` option is specified, do the same as with `init` command, but continue to run app\n if input_args.init:\n Project.create_project_dir(input_args.project_name, input_args)\n\n if not os.path.exists(Project.get_project_dir(input_args.project_name, input_args)):\n raise FileNotFoundError(\n 'Project directory \"{pdir}\" not found. '\n 'Did you miss create it first with `label-studio init {pdir}` ?'.format(\n pdir=Project.get_project_dir(input_args.project_name, input_args)))\n\n # On `start` command, launch browser if --no-browser is not specified and start label studio server\n if input_args.command == 'start':\n import label_studio.utils.functions\n import label_studio.utils.auth\n config = Project.get_config(input_args.project_name, input_args)\n\n # set username and password\n label_studio.utils.auth.USERNAME = input_args.username or \\\n config.get('username') or label_studio.utils.auth.USERNAME\n label_studio.utils.auth.PASSWORD = input_args.password or config.get('password', '')\n\n # set host name\n host = input_args.host or config.get('host', 'localhost')\n port = input_args.port or config.get('port', 8080)\n server_host = 'localhost' if host == 'localhost' else '0.0.0.0' # web server host\n\n # ssl certificate and key\n cert_file = input_args.cert_file or config.get('cert')\n key_file = input_args.key_file or config.get('key')\n ssl_context = None\n if cert_file and key_file:\n config['protocol'] = 'https://'\n ssl_context = (cert_file, key_file)\n\n # check port is busy\n if not input_args.debug and check_port_in_use('localhost', port):\n old_port = port\n port = int(port) + 1\n print('\\n*** WARNING! ***\\n* Port ' + str(old_port) + ' is in use.\\n' +\n '* Trying to start at ' + str(port) +\n '\\n****************\\n')\n\n # external hostname is used for data import paths, they must be absolute always,\n # otherwise machine learning backends couldn't access them\n set_web_protocol(input_args.protocol or config.get('protocol', 'http://'))\n external_hostname = get_web_protocol() + host.replace('0.0.0.0', 'localhost')\n if host in ['0.0.0.0', 'localhost', '127.0.0.1']:\n external_hostname += ':' + str(port)\n set_external_hostname(external_hostname)\n\n start_browser('http://localhost:' + str(port), input_args.no_browser)\n if input_args.use_gevent:\n app.debug = input_args.debug\n ssl_args = {'keyfile': key_file, 'certfile': cert_file} if ssl_context else {}\n http_server = WSGIServer((server_host, port), app, log=app.logger, **ssl_args)\n http_server.serve_forever()\n else:\n app.run(host=server_host, port=port, debug=input_args.debug, ssl_context=ssl_context)\n\n # On `start-multi-session` command, server creates one project per each browser sessions\n elif input_args.command == 'start-multi-session':\n server_host = input_args.host or '0.0.0.0'\n port = input_args.port or 8080\n\n if input_args.use_gevent:\n app.debug = input_args.debug\n http_server = WSGIServer((server_host, port), app, log=app.logger)\n http_server.serve_forever()\n else:\n app.run(host=server_host, port=port, debug=input_args.debug)\n"
] | [
[
"pandas.DataFrame.from_dict"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
popcornell/tfelm | [
"ff3fa7eb3359ac67d532345c9971bcb1401c9f8f"
] | [
"tfelm/elm.py"
] | [
"import tensorflow as tf\nfrom tfelm.base_slfn import Fdnn\n\nimport time, math\nimport os\nimport numpy as np\n\n\nclass ELM(Fdnn):\n 'Single Layer ELM object'\n\n def __init__(self, input_size,\n output_size,\n type='c',\n l2norm=None,\n name=\"elm\"\n ):\n\n super(__class__, self).__init__(input_size,\n output_size,\n name=name,\n )\n\n self.l2norm = l2norm\n if self.l2norm is None:\n self.l2norm = 1e-05 # from CuDNN.h\n\n self.HH_HT_op = None\n self.B_op = None\n self.type = type\n\n def compile(self):\n\n assert self.n_hidden_layer is 1, \\\n \"elm object supports only one hidden layer and before compiling one hidden layer should be created\"\n\n super(__class__, self).compile()\n\n # define training structure\n with tf.name_scope(\"training_\" + self.name):\n # initialization and training graph definition\n self.HH = tf.Variable(tf.multiply(tf.eye(self.n_neurons[self.n_hidden_layer], dtype=tf.float32),\n tf.cast(self.l2norm, tf.float32)),\n name='HH', trainable=False)\n\n self.HT = tf.Variable(tf.zeros([self.n_neurons[self.n_hidden_layer], self.n_neurons[-1]]), name='HT',\n trainable=False)\n\n self.HH_HT_op = tf.group(\n tf.assign_add(self.HH, tf.matmul(self.H[- 1], self.H[- 1], transpose_a=True)),\n tf.assign_add(self.HT, tf.matmul(self.H[- 1], self.y, transpose_a=True)), name='HH_HT_op'\n )\n\n self.B_op = tf.assign(self.B, tf.matmul(tf.matrix_inverse(self.HH), self.HT), name='B_op')\n\n self.sess.run([self.HH.initializer, self.HT.initializer])\n\n def train(self, tf_iterator, n_batches=None):\n\n next_batch = tf_iterator.get_next()\n\n t0 = time.time()\n\n batch = 1\n while True:\n try:\n start = time.time()\n # get next batch of data\n x_batch, y_batch = self.sess.run(next_batch)\n\n # Run the training op\n self.sess.run(self.HH_HT_op, feed_dict={self.x: x_batch,\n self.y: y_batch})\n\n if n_batches is not None:\n if batch % 25 == 0:\n eta = (time.time() - start) * (n_batches - batch)\n eta = '%d:%02d' % (eta // 60, eta % 60)\n print(\"{}/{} ETA:{}\".format(batch, n_batches, eta))\n batch += 1\n\n except tf.errors.OutOfRangeError or IndexError:\n break\n\n self.sess.run(self.B_op)\n print(\"Training of ELM {} ended in {}:{}:{:5f}\".format(self.name, math.floor((time.time() - t0) // 3600),\n math.floor((time.time() - t0) % 3600 // 60),\n ((time.time() - t0) % 3600 % 60)))\n print(\"#\" * 100)\n\n self.saver = tf.train.Saver()\n\n def fit(self, x, y, batch_size=1024):\n\n print(\"Creating Dataset and Iterator from tensors\")\n\n # from https://www.tensorflow.org/programmers_guide/datasets#consuming_numpy_arrays\n # recommended method:\n\n n_batches = int(np.ceil(x.shape[0] / batch_size))\n\n dataset = tf.data.Dataset.from_tensor_slices((self.x, self.y)).batch(batch_size=batch_size)\n iterator = dataset.make_initializable_iterator()\n\n self.sess.run(iterator.initializer, feed_dict={self.x: x, self.y: y})\n\n self.train(iterator, n_batches)\n\n # re-initialize the iterator\n self.sess.run(iterator.initializer, feed_dict={self.x: x, self.y: y})\n\n if self.type is 'c':\n train_perf = self.evaluate(tf_iterator=iterator, metric='acc')\n\n\n elif self.type is 'r':\n train_perf = self.evaluate(tf_iterator=iterator, metric='mse')\n\n return train_perf\n\n def evaluate(self, x=None, y=None, tf_iterator=None, metric='acc', batch_size=1024):\n\n if tf_iterator is None:\n # create iterator\n assert x is not None and y is not None, \\\n \"Both feature and labels arrays should be provided when an iterator is not passed to the function\"\n\n dataset = tf.data.Dataset.from_tensor_slices((self.x, self.y)).batch(batch_size=batch_size)\n tf_iterator = dataset.make_initializable_iterator()\n\n self.sess.run(tf_iterator.initializer, feed_dict={self.x: x, self.y: y})\n\n print(\"Evaluating network performance\")\n\n next_batch = tf_iterator.get_next()\n\n if metric is 'acc':\n with tf.name_scope('accuracy'):\n correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_out, 1))\n eval_metric = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n elif metric is 'mse':\n with tf.name_scope('mse'):\n eval_metric = tf.reduce_mean(tf.squared_difference(self.y_out, self.y, name='mse'))\n\n else:\n ValueError(\"Invalid performance metric, use mse or acc\")\n\n metric_vect = []\n\n while True:\n try:\n x_batch, y_batch = self.sess.run(next_batch)\n\n metric_vect.append(self.sess.run(eval_metric, feed_dict={self.x: x_batch, self.y: y_batch}))\n except tf.errors.OutOfRangeError:\n break\n\n mean_metric = np.mean(metric_vect)\n\n if metric is 'acc':\n print('Accuracy: %.7f' % mean_metric)\n elif metric is 'mse':\n print('MSE: %.7f' % mean_metric)\n\n return mean_metric\n\n def predict(self, x=None, tf_iterator=None, batch_size=1024):\n\n if tf_iterator is None:\n # create iterator\n assert x is not None, \\\n \"Feature array should be provided when an iterator is not passed to the function\"\n\n dataset = tf.data.Dataset.from_tensor_slices((self.x)).batch(batch_size=batch_size)\n tf_iterator = dataset.make_initializable_iterator()\n\n self.sess.run(tf_iterator.initializer, feed_dict={self.x: x})\n\n print(\"Predicting...\")\n\n next_batch = tf_iterator.get_next()\n\n y_out = []\n while True:\n try:\n x_batch, _ = self.sess.run(next_batch)\n y_out.extend(self.sess.run(self.y_out, feed_dict={self.x: x_batch}))\n\n except tf.errors.OutOfRangeError:\n break\n\n print(\"Done\")\n\n return np.asarray(y_out)\n\n def reset(self):\n\n self.compile()\n\n def save(self, ckpt_path=None):\n\n if ckpt_path is None:\n ckpt_path = os.path.join(os.getcwd(), self.name)\n\n self.saver.save(self.sess, ckpt_path, write_meta_graph=True)\n print(\"Model saved in path: %s\" % ckpt_path)\n\n def load(self, ckpt_path=None):\n\n if ckpt_path is None:\n ckpt_path = os.path.join(os.getcwd(), self.name + '.ckpt')\n\n saver = tf.train.import_meta_graph(ckpt_path + '.meta')\n\n saver.restore(self.sess, ckpt_path)\n\n # TODO fix load\n ''' \n graph = tf.get_default_graph()\n self.x = graph.get_tensor_by_name(\"input_\" + self.name + ':' + \"x:0\")\n self.y = graph.get_tensor_by_name(\"y\")\n self.y_out = graph.get_operation_by_name(\"y_out:0\")\n self.HH_HT_op = graph.get_operation_by_name(\"HH_HT_op:0\")\n self.B_op = graph.get_operation_by_name(\"B_op:0\")\n\n print(\"Model restored.\")\n '''\n"
] | [
[
"tensorflow.matmul",
"tensorflow.zeros",
"numpy.asarray",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.cast",
"tensorflow.matrix_inverse",
"tensorflow.eye",
"tensorflow.train.import_meta_graph",
"numpy.ceil",
"numpy.mean",
"tensorflow.name_scope",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.squared_difference"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
YaoChengTang/DecNet | [
"b623ac8d0505ec68eb930ad7a21fe9d84dd07543"
] | [
"modules/loss.py"
] | [
"import os\nimport sys\nimport time\nimport math\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.nn.modules.module import Module\n\nimport utils.utils as utils\n\n\n\nclass Loss(nn.Module):\n def __init__(self, loss_type, if_overmask=False, stop_stage_id=4, if_train=True, thold=0.5, alpha=0.1):\n super(Loss, self).__init__()\n \n self.loss_type = loss_type.lower()\n self.if_overmask = if_overmask\n self.stop_stage_id = stop_stage_id\n self.if_train = if_train\n self.thold = thold\n self.alpha = alpha\n\n assert self.loss_type in [\"chamfer\", \"multi_stage_regression_uploss\", \"LR_consistency\", \"multi_stage_regression_upsampleloss\", \"multi_stage_regression_upmaskloss\"], \"No such loss: {}\".format(self.loss_type)\n \n \n def forward(self, pred_list=None, fusion_list=None, dense_list=None, sparse_list=None, left_mask_list=None, gt=None, weights=None, num_stage=None, down_func_name=None, down_scale=None, max_disp=None, sparse_mask_list=None, left_feature_map_all=None, right_feature_map_all=None, left_detail_list=None, right_detail_list=None, right_mask_list=None):\n \"\"\"function: multiple stage chamfer loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n fusion_list: the list of disparity map after fusion, N*H/s*W/s;\n dense_list: the list of initial dense disparity map, which is the upsampled version from low layer, N*H/s*W/s;\n sparse_list: the list of sparse disparity map, N*H/s*W/s;\n left_mask_list: the list of binary mask representing the fine-grained areas;\n gt: the ground truth, N*H*W;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;\n down_func_name: the name of downsampling fucntion;\n down_scale: scale size of each downsampling;\n max_disp: the maximum disparity value;\n sparse_mask_list: the generated soft mask for fusion;\n left_feature_map_all: the left feature maps;\n right_feature_map_all: the right feature maps;\n left_detail_list: the generated details from left view;\n right_detail_list: the generated details from right view;\n right_mask_list: the list of binary mask representing the fine-grained areas;\n \"\"\"\n if self.loss_type == \"chamfer\" :\n return self.multi_stage_chamfer_loss(pred_list, fusion_list, dense_list, sparse_list, left_mask_list, gt, weights, num_stage, down_func_name, down_scale, max_disp, sparse_mask_list=sparse_mask_list)\n elif self.loss_type == \"multi_stage_regression_uploss\" :\n return self.multi_stage_regression_Uploss(pred_list, fusion_list, dense_list, sparse_list, left_mask_list, gt, weights, num_stage, down_func_name, down_scale, max_disp, sparse_mask_list=sparse_mask_list)\n elif self.loss_type == \"multi_stage_regression_upmaskloss\" :\n return self.multi_stage_regression_UpMaskloss(pred_list, fusion_list, dense_list, sparse_list, left_mask_list, gt, weights, num_stage, down_func_name, down_scale, max_disp, sparse_mask_list=sparse_mask_list, left_detail_list=left_detail_list, right_detail_list=right_detail_list, right_mask_list=right_mask_list,\n left_feature_map_all=left_feature_map_all, right_feature_map_all=right_feature_map_all)\n elif self.loss_type == \"LR_consistency\" :\n return self.LR_consistency_loss(pred_list, left_feature_map_all, right_feature_map_all, weights, num_stage, down_func_name, down_scale, max_disp)\n elif self.loss_type == \"self_training\" :\n return self.test_loss_func(pred_list[0], gt, max_disp)\n elif self.loss_type == \"multi_stage_regression_upsampleloss\" :\n return self.multi_stage_regression_UpSampleloss(pred_list, fusion_list, dense_list, sparse_list, left_mask_list, gt, weights, num_stage, down_func_name, down_scale, max_disp, sparse_mask_list=sparse_mask_list)\n \n \n def sparseChamfer(self, x, y, down_ratio):\n \"\"\"function: compute the chamfer distance\n args:\n x: B*1*H*W;\n y: B*1*sH*sW;\n down_ratio: the current total downsampling size;\n return:\n chmafer distance\n \"\"\"\n b,c,h,w = x.shape\n down_ratio = int(down_ratio)\n y = F.unfold(y, kernel_size=(down_ratio,down_ratio), stride=(down_ratio,down_ratio)).view( b, down_ratio*down_ratio, h, w ) # B*ss*H*W\n \n mask = y==0\n chamfer_dist = torch.pow(x-y,2)\n chamfer_dist = chamfer_dist + mask*1e6\n \n return torch.min(chamfer_dist, dim=1)[0]\n \n \n def multi_stage_chamfer_loss(self, pred_list, fusion_list, dense_list, sparse_list, left_mask_list, gt, weights, num_stage, down_func_name, down_scale, max_disp, sparse_mask_list=None):\n \"\"\"function: multiple stage chamfer loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n gt: the ground truth, N*H*W;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;;\n down_func_name: the name of downsampling fucntion\n down_scale: scale size of each downsampling\n max_disp: the maximum disparity value\n \"\"\"\n assert (pred_list[-1].size()[-2], pred_list[-1].size()[-1]) == (gt.size()[-2],gt.size()[-1]), \"the size of predcited disparity map is not equal to the size of groundtruth.\"\n tot_loss = 0.\n gt_list = []\n loss_list = []\n for stage_id in range(num_stage) :\n pred = pred_list[stage_id]\n \n if stage_id+1 < num_stage : # we do not need to interpolate the gt with original resolution\n down_size = down_scale**(num_stage-stage_id-1)\n if down_func_name in [\"bilinear\", \"bicubic\"] :\n cur_gt = F.interpolate(gt.unsqueeze(1) / down_size, scale_factor=1/down_size, mode=down_func_name).squeeze(1)\n elif down_func_name==\"max\" :\n cur_gt = F.max_pool2d(gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n elif down_func_name==\"min\" :\n tmp_gt = gt*(gt>0) + 1e6*(gt==0)\n cur_gt = -F.max_pool2d(-tmp_gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n else :\n raise Exception(\"down_func_name should be bilinear or max, but current it is {}\")\n else :\n cur_gt = gt\n down_size = 1.\n gt_list.append(cur_gt)\n \n if stage_id == 0 :\n error = self.sparseChamfer(pred.unsqueeze(1)*down_size, gt.unsqueeze(1), down_size)\n error = torch.sqrt(error+1e-6)\n mask = error<100\n disp_loss = MyHubeLoss.apply(error[mask], 1, 2)\n tot_loss += disp_loss * weights[stage_id]\n loss_list.append(disp_loss)\n \n else :\n dense = dense_list[stage_id-1]\n sparse = sparse_list[stage_id-1]\n fusion = fusion_list[stage_id-1]\n \n dense_error = self.sparseChamfer(dense.unsqueeze(1)*down_size, gt.unsqueeze(1), down_size)\n dense_error = torch.sqrt(dense_error+1e-6)\n mask = dense_error<100\n dense_disp_loss = MyHubeLoss.apply(dense_error[mask], 1, 2)\n \n sparse_error = self.sparseChamfer(sparse.unsqueeze(1)*down_size, gt.unsqueeze(1), down_size)\n sparse_error = torch.sqrt(sparse_error+1e-6)\n mask = (sparse_error<100) & (left_mask_list[stage_id-1] == 1)\n sparse_disp_loss = MyHubeLoss.apply(sparse_error[mask], 1, 2)\n \n fusion_error = self.sparseChamfer(fusion.unsqueeze(1)*down_size, gt.unsqueeze(1), down_size)\n fusion_error = torch.sqrt(fusion_error+1e-6)\n mask = fusion_error<100\n fusion_disp_loss = MyHubeLoss.apply(fusion_error[mask], 1, 2)\n \n pred_error = self.sparseChamfer(pred.unsqueeze(1)*down_size, gt.unsqueeze(1), down_size)\n pred_error = torch.sqrt(pred_error+1e-6)\n mask = pred_error<100\n pred_disp_loss = MyHubeLoss.apply(pred_error[mask], 1, 2)\n \n # print((left_mask_list[stage_id-1]).shape, sparse_mask_list[stage_id-1].shape)\n \n loss_list.append(dense_disp_loss)\n loss_list.append(sparse_disp_loss)\n loss_list.append( sparse_mask_list[stage_id-1][ left_mask_list[stage_id-1]==1 ].mean() )\n loss_list.append(fusion_disp_loss)\n loss_list.append(pred_disp_loss)\n \n tot_loss += (pred_disp_loss*0.5 + dense_disp_loss*0.1 + sparse_disp_loss*0.2*1/(10+stage_id*3.75) + fusion_disp_loss*0.2) * weights[stage_id]\n # tot_loss += (pred_loss*0.5 + dense_loss*0.1 + sparse_loss*0.2 + fusion_loss*0.2) * weights[stage_id]\n \n return gt_list, pred_list, tot_loss, loss_list\n \n \n def multi_stage_regression_Uploss(self, pred_list, fusion_list=None, dense_list=None, sparse_list=None, left_mask_list=None, gt=None, weights=None, num_stage=None, down_func_name=None, down_scale=None, max_disp=None, sparse_mask_list=None):\n \"\"\"function: multiple stage loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n gt: the ground truth, N*H*W;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;;\n down_func_name: the name of downsampling fucntion\n down_scale: scale size of each downsampling\n max_disp: the maximum disparity value\n \"\"\"\n assert (pred_list[-1].size()[-2], pred_list[-1].size()[-1]) == (gt.size()[-2],gt.size()[-1]), \"the size of predcited disparity map is not equal to the size of groundtruth.\"\n tot_loss = 0.\n gt_list = []\n loss_list = []\n for stage_id in range(num_stage) :\n pred = pred_list[stage_id]\n \n if stage_id+1 < num_stage : # we do not need to interpolate1 the gt with original resolution\n down_size = down_scale**(num_stage-stage_id-1)\n if down_func_name in [\"bilinear\", \"bicubic\"] :\n cur_gt = F.interpolate(gt.unsqueeze(1) / down_size, scale_factor=1/down_size, mode=down_func_name).squeeze(1)\n elif down_func_name==\"max\" :\n cur_gt = F.max_pool2d(gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n elif down_func_name==\"min\" :\n tmp_gt = gt*(gt>0) + 1e6*(gt==0)\n cur_gt = -F.max_pool2d(-tmp_gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n else :\n raise Exception(\"down_func_name should be bilinear or max, but current it is {}\")\n else :\n cur_gt = gt\n down_size = 1.\n gt_list.append(cur_gt)\n mask = (cur_gt < max_disp/down_size) & (cur_gt > 0)\n \n # if training set is dense disparity map after completion\n if self.if_overmask :\n mask[:, :int(108//down_size), :] = 0\n \n if stage_id == 0 or stage_id>=self.stop_stage_id :\n depth_loss = F.smooth_l1_loss(pred[mask] * down_size, cur_gt[mask] * down_size, reduction='mean')\n # depth_loss = MyLoss2Function.apply(pred[mask], cur_gt[mask], 1/down_size, 2/down_size)\n tot_loss += depth_loss * weights[stage_id]\n loss_list.append(depth_loss)\n \n else :\n dense = dense_list[stage_id-1]\n sparse = sparse_list[stage_id-1]\n fusion = fusion_list[stage_id-1]\n \n left_mask = (left_mask_list[stage_id-1] == 1)\n whole_mask = left_mask * mask\n \n # dense_loss = MyLoss2Function.apply(dense[mask], cur_gt[mask], 1/down_size, 2/down_size)\n # sparse_loss = MyLoss2Function.apply(sparse[whole_mask], cur_gt[whole_mask], 1/down_size, 2/down_size)\n # fusion_loss = MyLoss2Function.apply(fusion[mask], cur_gt[mask], 1/down_size, 2/down_size)\n # pred_loss = MyLoss2Function.apply(pred[mask], cur_gt[mask], 1/down_size, 2/down_size)\n \n dense_loss = F.smooth_l1_loss(dense[mask] * down_size, cur_gt[mask] * down_size, reduction='mean')\n sparse_loss = F.smooth_l1_loss(sparse[whole_mask] * down_size, cur_gt[whole_mask] * down_size, reduction='mean')\n fusion_loss = F.smooth_l1_loss(fusion[mask] * down_size, cur_gt[mask] * down_size, reduction='mean')\n pred_loss = F.smooth_l1_loss(pred[mask] * down_size, cur_gt[mask] * down_size, reduction='mean')\n \n \n \n loss_list.append(dense_loss)\n loss_list.append(sparse_loss)\n loss_list.append( sparse_mask_list[stage_id-1][left_mask].mean() )\n loss_list.append(fusion_loss)\n loss_list.append(pred_loss)\n \n tot_loss += (pred_loss*0.5 + dense_loss*0.1 + sparse_loss*0.2*1/(10+stage_id*3.75) + fusion_loss*0.2) * weights[stage_id]\n # tot_loss += (pred_loss*0.5 + dense_loss*0.1 + sparse_loss*0.2 + fusion_loss*0.2) * weights[stage_id]\n \n return gt_list, pred_list, tot_loss, loss_list\n \n \n def focal_loss(self, pt, gt, gamma=2, alpha=0.8):\n loss = - alpha * (1-pt)**gamma * gt * torch.log(pt+0.00001) - (1-alpha) * pt**gamma * (1-gt) * torch.log(1-pt+0.00001)\n return torch.mean(loss)\n \n def dice_loss(self, x, gt, smooth=1):\n N = gt.size(0)\n x_flat = x.view(N, -1)\n gt_flat = gt.view(N, -1)\n \n intersection = x_flat * gt_flat\n \n loss = 2 * (intersection.sum(1) + smooth) / (x_flat.sum(1) + gt_flat.sum(1) + smooth)\n loss = 1 - loss.sum() / N\n \n return loss\n \n def mask_l1_loss(self, x, gt):\n valid_mask = gt>0.1\n loss = F.smooth_l1_loss(x[valid_mask], gt[valid_mask], reduction='mean')\n return loss\n\n def binary(self, x):\n x[x <= self.thold] = 0.\n x[x > self.thold] = 1.\n # x = F.threshold(x, 0, 0)\n # x = -F.threshold(-x, -0.00001, -1)\n return x\n \n def multi_stage_regression_UpMaskloss(self, pred_list, fusion_list=None, dense_list=None, sparse_list=None, left_mask_list=None, gt=None, weights=None, num_stage=None, down_func_name=None, down_scale=None, max_disp=None, sparse_mask_list=None, left_feature_map_all=None, right_feature_map_all=None, left_detail_list=None, right_detail_list=None, right_mask_list=None):\n \"\"\"function: multiple stage loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n fusion_list: the fused result of dense prediction and sparse prediction;\n dense_list: the result of dense prediction;\n sparse_list: the result of sparse prediction;\n left_mask_list: the pre-computed lost details of left view;\n gt: the ground truth, N*H*W;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;\n down_func_name: the name of downsampling fucntion;\n down_scale: scale size of each downsampling;\n max_disp: the maximum disparity value;\n sparse_mask_list: the generated soft mask for fusion;\n left_feature_map_all: the left feature maps from adjacent layers for the computation of lost details;\n right_feature_map_all: the right feature maps from adjacent layers for the computation of lost details;\n left_detail_list: the lost details of left view;\n right_detail_list: the lost details of right view;\n right_mask_list: the pre-computed lost details of right view;\n \"\"\"\n assert (pred_list[-1].size()[-2], pred_list[-1].size()[-1]) == (gt.size()[-2],gt.size()[-1]), \"the size of predcited disparity map is not equal to the size of groundtruth.\"\n tot_loss = 0.\n gt_list = []\n loss_list = []\n for stage_id in range(num_stage) :\n pred = pred_list[stage_id]\n \n if stage_id+1 < num_stage : # we do not need to interpolate1 the gt with original resolution\n down_size = down_scale**(num_stage-stage_id-1)\n if down_func_name in [\"bilinear\", \"bicubic\"] :\n cur_gt = F.interpolate(gt.unsqueeze(1) / down_size, scale_factor=1/down_size, mode=down_func_name).squeeze(1)\n elif down_func_name==\"max\" :\n cur_gt = F.max_pool2d(gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n elif down_func_name==\"min\" :\n tmp_gt = gt*(gt>0) + 1e6*(gt==0)\n cur_gt = -F.max_pool2d(-tmp_gt.unsqueeze(1) / down_size, down_size, down_size, 0, 1, False, False).squeeze(1)\n else :\n raise Exception(\"down_func_name should be bilinear or max, but current it is {}\")\n else :\n cur_gt = gt\n down_size = 1.\n gt_list.append(cur_gt)\n\n if stage_id == 0 or stage_id>=self.stop_stage_id :\n pass\n \n else :\n left_mask = left_mask_list[stage_id-1]\n right_mask = right_mask_list[stage_id-1]\n left_detail = left_detail_list[stage_id-1]\n right_detail = right_detail_list[stage_id-1]\n \n # left_cur_fea = left_feature_map_all[ (stage_id-1)*2 ].permute(0,2,3,1)\n # left_pre_fea = left_feature_map_all[ (stage_id-1)*2+1 ].permute(0,2,3,1)\n # right_cur_fea = right_feature_map_all[ (stage_id-1)*2 ].permute(0,2,3,1)\n # right_pre_fea = right_feature_map_all[ (stage_id-1)*2+1 ].permute(0,2,3,1)\n \n # left_detail_pos = torch.zeros_like(left_detail, dtype=torch.bool)\n # left_detail_pos[ left_detail>self.thold ] = 1\n # right_detail_pos = torch.zeros_like(right_detail, dtype=torch.bool)\n # right_detail_pos[ right_detail>self.thold ] = 1\n # N,H,W = left_mask.size()\n # left_loss = torch.mean(torch.sum(left_detail, (1,2))) / (H*W) - self.alpha * F.mse_loss(left_cur_fea[left_detail_pos], left_pre_fea[left_detail_pos])\n # right_loss = torch.mean(torch.sum(right_detail, (1,2))) / (H*W) - self.alpha * F.mse_loss(right_cur_fea[right_detail_pos], right_pre_fea[right_detail_pos])\n # loss_list.append(left_loss)\n # loss_list.append(right_loss)\n\n # tot_loss += (left_loss+right_loss) * weights[stage_id-1]\n\n if self.if_train==False :\n left_detail = self.binary(left_detail)\n right_detail = self.binary(right_detail)\n \n left_fl = self.focal_loss(left_detail, left_mask, gamma=2, alpha=0.5)\n right_fl = self.focal_loss(right_detail, right_mask, gamma=2, alpha=0.5)\n loss_list.append(left_fl)\n loss_list.append(right_fl)\n \n left_l1 = self.mask_l1_loss(left_detail, left_mask)\n right_l1 = self.mask_l1_loss(right_detail, right_mask)\n loss_list.append(left_l1)\n loss_list.append(right_l1)\n\n tot_loss += (left_fl + right_fl + 3*left_l1 + 3*right_l1) * weights[stage_id-1]\n \n return gt_list, pred_list, tot_loss, loss_list\n \n\n def multi_stage_regression_UpSampleloss(self, pred_list, fusion_list=None, dense_list=None, sparse_list=None, left_mask_list=None, gt=None, weights=None, num_stage=None, down_func_name=None, down_scale=None, max_disp=None, sparse_mask_list=None):\n \"\"\"function: multiple stage loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n gt: the ground truth, N*H*W;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;;\n down_func_name: the name of downsampling fucntion\n down_scale: scale size of each downsampling\n max_disp: the maximum disparity value\n \"\"\"\n assert (pred_list[-1].size()[-2], pred_list[-1].size()[-1]) == (gt.size()[-2],gt.size()[-1]), \"the size of predcited disparity map is not equal to the size of groundtruth.\"\n tot_loss = 0.\n gt_list = []\n loss_list = []\n for stage_id in range(num_stage) :\n pred = pred_list[stage_id]\n \n if stage_id+1 < num_stage :\n down_size = down_scale**(num_stage-stage_id-1)\n cur_pred = F.interpolate(pred.unsqueeze(1) * down_size, scale_factor=down_size, mode=down_func_name).squeeze(1)\n else :\n cur_pred = pred\n pred_list.append(pred)\n \n cur_gt = gt\n gt_list.append(cur_gt)\n \n mask = (cur_gt < max_disp) & (cur_gt > 0)\n loss = F.smooth_l1_loss(cur_pred[mask], cur_gt[mask], reduction='mean')\n tot_loss += loss * weights[stage_id]\n loss_list.append(loss)\n \n return gt_list, pred_list, tot_loss, loss_list\n \n \n def LR_consistency_loss(self, pred_list, left_feature_map_all, right_feature_map_all, weights, num_stage, down_func_name, down_scale, max_disp):\n \"\"\"function: multiple stage loss\n args:\n pred_list: the list of predicted multi-scale disparity maps, each size N*H/s*W/s;\n left_feature_map_all: list of feature maps from left view,each N*Cs*H/s*W/s;\n right_feature_map_all: list of feature maps from right view, each N*Cs*H/s*W/s;\n weights: weights for loss in each scale space;\n num_stage: total number of stages, the number of pyramid's level, num_stage-1 is equal to the number of downsampling iteration;;\n down_func_name: the name of downsampling fucntion\n down_scale: scale size of each downsampling\n max_disp: the maximum disparity value\n \"\"\"\n tot_loss = 0.\n gt_list = []\n loss_list = []\n for stage_id in range(num_stage) :\n pred = pred_list[stage_id]\n \n warp_right_fea = utils.warp(pred_list[stage_id], right_feature_map_all[\"stage{}\".format(stage_id)]).unsqueeze(2)\n \n diff = torch.pow(left_feature_map_all[\"stage{}\".format(stage_id)] - warp_right_fea, 2)\n phmt = torch.mean(torch.sum(diff,dim=1))\n loss_list.append(phmt)\n \n tot_loss += phmt * weights[stage_id]\n \n return gt_list, pred_list, tot_loss, loss_list\n\n\ndef test_loss_func(pred, gt, max_disp):\n \"\"\"\n \"\"\"\n assert (pred.size()[-2], pred.size()[-1]) == (gt.size()[-2],gt.size()[-1]), \"the size of predcited disparity map is not equal to the size of groundtruth.\"\n # max_disp = 192\n batch_size, height, width = pred.size()\n mask = (gt < max_disp) & (gt > 0)\n error_map = torch.where((torch.abs(pred[mask] - gt[mask])<3) | (torch.abs(pred[mask] - gt[mask])<0.05*gt[mask]), torch.ones(1,dtype=torch.float32,device=pred.device).cuda(), torch.zeros(1,dtype=torch.float32,device=pred.device).cuda())\n loss_3 = 100 - torch.sum(error_map)/torch.sum(mask)*100\n epe = torch.mean(torch.abs(pred[mask]-gt[mask]))\n return epe, loss_3\n\n\n\n\n\n\n\n\n"
] | [
[
"torch.mean",
"torch.abs",
"torch.ones",
"torch.zeros",
"torch.sqrt",
"torch.min",
"torch.sum",
"torch.log",
"torch.nn.functional.smooth_l1_loss",
"torch.nn.functional.unfold",
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dc-aichara/signate-jpx | [
"3d17e3fa07f091118f01638969de387664598c4a"
] | [
"competition_code/utils/utils.py"
] | [
"import yaml\nimport pandas as pd\nimport numpy as np\nfrom scipy.stats import spearmanr\nfrom PriceIndices import Indices\nfrom sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, MinMaxScaler\nfrom typing import Tuple, Optional, Union\nimport lightgbm as lgb\n\n\ndef load_data(\n data_dir: str = \"data/raw/\",\n) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"Given a path to the data will load all datasets into pandas dataframes.\n\n Args:\n data_dir (str, optional): Path to the input datasets where stock_labels,\n stock_fin, stock_list, and stock_price are located.\n Defaults to \"data/raw/\".\n\n Returns:\n Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n stock_price df, stock_fin df, stock_list df, stock_labels df\n \"\"\"\n\n print(f\"Loading data from {data_dir}\")\n stock_labels = pd.read_csv(f\"{data_dir}stock_labels.csv.gz\")\n stock_fin = pd.read_csv(f\"{data_dir}stock_fin.csv.gz\")\n stock_list = pd.read_csv(f\"{data_dir}stock_list.csv.gz\")\n stock_price = pd.read_csv(f\"{data_dir}stock_price.csv.gz\")\n\n return stock_price, stock_fin, stock_list, stock_labels\n\n\ndef format_dates(df: pd.DataFrame, columns: list) -> pd.DataFrame:\n \"\"\"Automatically formats all specified columns as date format \"%Y-%m-%d\"\n\n Args:\n df (pd.DataFrame): Original DataFrame of data\n columns (list): List of columns that should be dates\n\n Returns:\n pd.DataFrame: Returns original Dataframe with cleaned up dates.\n \"\"\"\n\n for column in columns:\n df[column] = pd.to_datetime(df[column]).dt.strftime(\"%Y-%m-%d\")\n\n return df\n\n\ndef reduce_mem_usage(df: pd.DataFrame, verbose=True) -> pd.DataFrame:\n \"\"\"Function for reducing memory usage by downcasting of types\n in dataframes.\n\n Args:\n df (pd.DataFrame): DataFrame of Data\n verbose (bool, optional): Whether to print results. Defaults to True.\n\n Returns:\n pd.DataFrame: Dataframe of data after type downcasting\n \"\"\"\n numerics = [\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"]\n start_mem = df.memory_usage().sum() / 1024 ** 2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == \"int\":\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if (\n c_min > np.finfo(np.float16).min\n and c_max < np.finfo(np.float16).max\n ):\n df[col] = df[col].astype(np.float16)\n elif (\n c_min > np.finfo(np.float32).min\n and c_max < np.finfo(np.float32).max\n ):\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024 ** 2\n if verbose:\n print(\n \"Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)\".format(\n end_mem, 100 * (start_mem - end_mem) / start_mem\n )\n )\n return df\n\n\ndef load_config(config_id: str) -> dict:\n \"\"\"Reads Configuration from yaml file.\n\n Args:\n config_id (str): specific configuration ID to use in yaml file\n\n Returns:\n dict: Returns dictionary of configuration parameters\n \"\"\"\n with open(\"config.yml\", \"r\") as f:\n doc = yaml.load(f, yaml.Loader)\n\n config = doc[config_id]\n return config\n\n\ndef calculate_price_indices(\n data: pd.DataFrame, date_col: str = \"date\", price_col: str = \"price\"\n) -> pd.DataFrame:\n \"\"\"\n Calculate share price technical indicators\n Args:\n data (pd.DataFrame): Pandas Dataframe\n date_col (str): Date column in DataFrame\n price_col (str): Price column in DataFrame\n\n Returns:\n pd.DataFrame: A Pandas DataFrame with price indicators as columns.\n\n \"\"\"\n df = data.copy()\n indices = Indices(df, date_col=date_col, price_col=price_col)\n df_rsi = indices.get_rsi()\n df_rsi.drop([price_col, \"RS_Smooth\", \"RSI_1\"], axis=1, inplace=True)\n df_bb = indices.get_bollinger_bands()\n df_bb.drop([price_col], axis=1, inplace=True)\n df_macd = indices.get_moving_average_convergence_divergence()\n df_macd.drop([price_col], axis=1, inplace=True)\n\n df = pd.merge(df, df_macd, on=[date_col, \"Local Code\"], how=\"left\")\n df = pd.merge(df, df_rsi, on=[date_col, \"Local Code\"], how=\"left\")\n df = pd.merge(df, df_bb, on=[date_col, \"Local Code\"], how=\"left\")\n del df_bb, df_macd, df_rsi\n df.rename(columns={\"RSI_2\": \"RSI\"}, inplace=True)\n df.fillna(0)\n df.sort_values(date_col, ascending=False, inplace=True)\n\n return df\n\n\ndef date_feats(\n df: pd.DataFrame,\n date_col: str = \"date\",\n month_col: str = \"month\",\n dom_col: str = \"dayofmonth\",\n dow_col: str = \"dayofweek\",\n) -> pd.DataFrame:\n \"\"\"\n Create Month, DayOfMonth, and DayOfWeek features given DataFrame with date\n column.\n Args:\n df (pd.DataFrame): Pandas DataFrame\n date_col (str): Date column in DataFrame\n month_col (str): Month column\n dom_col (str): DayOfMonth column\n dow_col (str): DayOfWeek column\n\n Returns:\n pd.DataFrame: A DataFrame with three new columns.\n\n \"\"\"\n data = df.copy()\n data[date_col] = pd.to_datetime(data[date_col])\n data[month_col] = data[date_col].dt.month\n data[dom_col] = data[date_col].dt.day\n data[dow_col] = data[date_col].dt.dayofweek\n\n return data\n\n\ndef lgb_spearmanr(preds: np.ndarray, dtrain_data: lgb.basic.Dataset) -> float:\n \"\"\"Spearman's rank correlation coefficient metrics for LightGBM\n\n Args:\n preds (np.ndarray): array of predictions\n dtrain_data (lgb.basic.Dataset): dataset\n\n Returns:\n float: spearman correlation score\n \"\"\"\n\n labels = dtrain_data.get_label()\n corr = spearmanr(labels, preds)[0]\n return \"lgb_corr\", corr, True\n\n\ndef lgb_r2_score(preds: np.ndarray, dtrain_data: lgb.basic.Dataset) -> float:\n \"\"\"R^2 metrics for LightGBM\n\n Args:\n preds (np.ndarray): array of predictions\n dtrain_data (lgb.basic.Dataset): dataset\n\n Returns:\n float: R2 metrics\n \"\"\"\n labels = dtrain_data.get_label()\n return \"r2\", r2_score(labels, preds), True\n\n\ndef final_metric(low_corr: float, high_corr: float) -> float:\n \"\"\"Metric as defined on the page\n https://signate.jp/competitions/423#evaluation\n \n Args:\n low_corr (float): low model spearman\n high_corr (float): high model spearman\n\n Returns:\n float: final evaluation metric as defined on signate\n \"\"\"\n return (low_corr - 1) ** 2 + (high_corr - 1) ** 2\n\n\ndef get_data_rules(config: dict) -> Tuple[list, list, list, list]:\n \"\"\"\n Uses date configuration to determine which columns are of numeric, date,\n categorical type as well as what should be dropped.\n\n Args:\n config (dict): Data Configuration\n\n Returns:\n Tuple[list, list, list, list]:\n List of columns that are numeric, dates, categoricals,\n and should be dropped.\n \"\"\"\n numerics = []\n dates = []\n categoricals = []\n drops = []\n\n for key, value in config.items():\n if value == \"numeric\":\n numerics.append(key)\n if value == \"date\":\n dates.append(key)\n if value == \"categorical\":\n categoricals.append(key)\n if value == \"drop\":\n drops.append(key)\n\n return numerics, dates, categoricals, drops\n\n\ndef auto_categorical(\n df: pd.DataFrame,\n encoder: Optional[Union[OneHotEncoder, OrdinalEncoder]],\n categoricals: list,\n) -> pd.DataFrame:\n \"\"\"Automatic Categorical pre-processing function, Supports Ordinal and OHE.\n\n Args:\n df (pd.DataFrame): dataframe\n encoder (Optional[OneHotEncoder, OrdinalEncoder]): categorical encoder\n categoricals (list): list of columns of categorical type\n\n Returns:\n pd.DataFrame: Dataframe of transformed categorical features\n \"\"\"\n for category in categoricals:\n df[category] = df[category].fillna(\"no_category\").astype(str)\n\n transformed_df = pd.DataFrame(encoder.transform(df[categoricals]))\n\n # if ohe\n if isinstance(encoder, OneHotEncoder):\n print(\"OHE\")\n transformed_df.columns = encoder.get_feature_names()\n\n if isinstance(encoder, OrdinalEncoder):\n print(\"Ordinal Encoder\")\n transformed_df.columns = categoricals\n\n return transformed_df\n\n\ndef auto_numeric(\n df: pd.DataFrame, scaler: MinMaxScaler, numerics: list\n) -> pd.DataFrame:\n \"\"\"Automatic Number pre-processing function.\n For every number column performs MinMaxScaling.\n\n Args:\n df (pd.DataFrame): Dataframe\n scaler (MinMaxScaler): MinMaxScaler object\n numerics (list): List of columns of numeric type\n\n Returns:\n pd.DataFrame: Dataframe of Numeric scaled features\n \"\"\"\n numerics_df = pd.DataFrame(scaler.transform(df[numerics]))\n numerics_df.columns = numerics\n\n return numerics_df\n\n\ndef auto_dates(df: pd.DataFrame, dates: list) -> pd.DataFrame:\n \"\"\"\n Automatic Date pre-processing function, For every date column, gets the\n month, day, and day of week for new features.\n\n Args:\n df (pd.DataFrame): DataFrame\n dates (list): list of date columns\n\n Returns:\n pd.DataFrame: Original dataframe with new date features\n \"\"\"\n # DF with all the dates\n dates_df = pd.DataFrame()\n for date in dates:\n dates_df[f\"{date}_month\"] = pd.to_datetime(df[date]).dt.month\n dates_df[f\"{date}_day\"] = pd.to_datetime(df[date]).dt.day\n dates_df[f\"{date}_dayofweek\"] = pd.to_datetime(df[date]).dt.dayofweek\n\n return dates_df\n\n\ndef get_technical_features(\n df,\n date_col=\"base_date\",\n price_col=\"EndOfDayQuote ExchangeOfficialClose\",\n periods=[7, 14, 21],\n extra_feats=False,\n):\n \"\"\"\n Args:\n df (pd.DataFrame): DataFrame\n date_col (str): Date column in DataFrame\n price_col (str): Price column in DataFrame\n periods (list): List of periods to create technical features\n extra_feats (bool): If create extra features from Priceindices\n Returns:\n pd.DataFrame: Feature DataFrame\n \"\"\"\n data = df[[\"Local Code\", date_col, price_col]]\n data = data.sort_values(date_col)\n datas = []\n for code in data[\"Local Code\"].unique():\n feats = data[data[\"Local Code\"] == code]\n if extra_feats:\n feats = calculate_price_indices(\n feats, date_col=date_col, price_col=price_col\n )\n feats = feats.sort_values(date_col)\n feats[f\"EMA_{periods[1]}\"] = (\n feats[price_col].ewm(span=periods[2], adjust=False).mean()\n )\n feats[f\"return_{periods[0]}\"] = feats[price_col].pct_change(periods[0])\n feats[f\"return_{periods[1]}\"] = feats[price_col].pct_change(periods[1])\n feats[f\"return_{periods[2]}\"] = feats[price_col].pct_change(periods[2])\n feats[f\"volatility_{periods[0]}\"] = (\n np.log(feats[price_col]).diff().rolling(periods[0]).std()\n )\n feats[f\"volatility_{periods[1]}\"] = (\n np.log(feats[price_col]).diff().rolling(periods[2]).std()\n )\n feats[f\"volatility_{periods[2]}\"] = (\n np.log(feats[price_col]).diff().rolling(periods[2]).std()\n )\n feats[f\"MA_gap_{periods[0]}\"] = feats[price_col] / (\n feats[price_col].rolling(periods[0]).mean()\n )\n feats[f\"MA_gap_{periods[1]}\"] = feats[price_col] / (\n feats[price_col].rolling(periods[1]).mean()\n )\n feats[f\"MA_gap_{periods[2]}\"] = feats[price_col] / (\n feats[price_col].rolling(periods[2]).mean()\n )\n feats = feats.fillna(0)\n feats = feats.drop([price_col], axis=1)\n datas.append(feats)\n feats = pd.concat(datas, ignore_index=True)\n del datas, data\n return feats\n"
] | [
[
"pandas.merge",
"pandas.read_csv",
"pandas.to_datetime",
"pandas.concat",
"numpy.log",
"pandas.DataFrame",
"numpy.finfo",
"numpy.iinfo",
"scipy.stats.spearmanr"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
srnthsrdhrn/VehicleTrackingGUI | [
"a18d890176de7547d557dfe7cc18dd37afa37411"
] | [
"plot.py"
] | [
"import tkinter as tk\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport numpy as np\n\napp = tk.Tk()\napp.wm_title(\"Graphs\")\n\nfig = Figure(figsize=(6, 4), dpi=96)\na = np.array([1,2,3])\nax = fig.add_subplot(111)\n\nline, = ax.plot(a,np.array([0,0.5,2]))\nline2, = ax.plot(a,0.55*a)\n\ngraph = FigureCanvasTkAgg(fig, master=app)\ncanvas = graph.get_tk_widget()\ncanvas.grid(row=0, column=0, rowspan = 11, padx =10, pady =5)\n\ndef updateScale(value):\n print(\"scale is now %s\" % (value))\n b = float(value)*a\n # set new data to the line\n line2.set_data(a,b)\n # rescale the axes\n ax.relim()\n ax.autoscale()\n #draw canvas\n fig.canvas.draw_idle()\n\n\nvalue = tk.DoubleVar()\nscale = tk.Scale(app, variable=value, orient=\"horizontal\",length = 100, \n from_=0.55, to=2.75, resolution = 0.01,command=updateScale)\nscale.grid(row=0, column=1)\n\napp.mainloop()"
] | [
[
"numpy.array",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"matplotlib.figure.Figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
valerocar/geometry-blender | [
"3604f9b5b63c12b8d648ceb63719a71c3d16e055"
] | [
"demos/.ipynb_checkpoints/d2-checkpoint.py"
] | [
"import plotly.graph_objs as go\nimport numpy as np\nimport plotly\n\n\ndef triangles_xy(xs, ys, triangles_indices):\n xs_n = []\n ys_n = []\n count = int(len(triangles_indices) / 3)\n for i in range(count):\n ii = list(triangles_indices[3 * i:3 * i + 3]) + [triangles_indices[3 * i]]\n xs_n = xs_n + list(xs[ii]) + [None]\n ys_n = ys_n + list(ys[ii]) + [None]\n return go.Scatter(x=xs_n, y=ys_n, name=None, mode=\"lines\", line=dict(width=1))\n\n\ndef triangles(points, triangles_indices):\n xs = []\n ys = []\n for t in triangles_indices:\n p1, p2, p3 = points[t]\n xs = xs + [p1[0], p2[0], p3[0], p1[0], None]\n ys = ys + [p1[1], p2[1], p3[1], p1[1], None]\n trace = go.Scatter(x=xs, y=ys, mode='lines+markers', marker=dict(size=2, color='rgb(0,0,0)'),\n line=dict(width=2, color='rgb(0,0,0)'))\n return trace\n\n\ndef function_graph(f, domain=[-2, 2], x_res=1000, name=None, showlegend=True, color=None):\n x_min, x_max = domain\n xs = np.linspace(x_min, x_max, x_res)\n ys = f(xs)\n return go.Scatter(x=xs, y=ys, mode=\"lines\", name=name, showlegend=showlegend, line=dict(color=color))\n\n\ndef curve_trace(curve2d, t_res=250, name=None, showlegend=False, color=None):\n t_min, t_max = curve2d.domain\n ts = np.linspace(t_min, t_max, t_res)\n xs, ys = curve2d.eval_position(ts)\n return go.Scatter(x=xs, y=ys, mode=\"lines\", name=name, showlegend=showlegend, line=dict(color=color))\n\n\ndef xys_trace(xs, ys, name=None, showlegend=False, color=None, width=1):\n return go.Scatter(x=xs, y=ys, mode=\"lines\", name=name, showlegend=showlegend,\n line=dict(color=color, width=width))\n\n\ndef points_trace(ps, name=None, showlegend=False, color=None, width=1):\n xs, ys = ps\n return xys_trace(xs, ys, name=name, showlegend=showlegend, color=color)\n\n\ndef function_contour(f, domain=[-1, 1, -1, 1], level=0):\n x_min, x_max, y_min, y_max = domain\n x_res = y_res = 200\n xs, ys = np.meshgrid(np.linspace(x_min, x_max, x_res), np.linspace(y_min, y_max, y_res))\n zs = f(xs, ys)\n dx = (x_max - x_min) / (x_res - 1)\n dy = (y_max - y_min) / (y_res - 1)\n contour_out = go.Contour(z=zs, x0=x_min, y0=y_min, dx=dx, dy=dy, showscale=False,\n contours=dict(coloring='lines', start=level, end=level),\n line=dict(width=2, color='rgb(0,0,0)'))\n return contour_out\n\n\ndef get_basic_layout():\n layout2d = go.Layout(\n title=None,\n paper_bgcolor=\"#000\",\n autosize=True,\n width=800,\n height=800,\n margin=dict(l=60, r=60, b=60, t=60),\n showlegend=False,\n )\n return layout2d\n\n\ndef plot_data(data, frames=None, layout=None):\n if layout is None:\n layout = get_basic_layout()\n\n if frames is not None:\n fig = go.Figure(data=data, layout=layout, frames=frames)\n else:\n fig = go.Figure(data=data, layout=layout)\n\n plotly.offline.iplot(fig, )\n"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suvarchal/hecuba | [
"f52001effb0ad4f72367d632afefbe85f0e6b048"
] | [
"hecuba_py/tests/withcassandra/storagenumpy_tests.py"
] | [
"import gc\nimport unittest\n\nfrom hecuba import config, StorageNumpy\nimport uuid\nimport numpy as np\n\nfrom storage.api import getByID\n\nfrom time import time as timer\nimport random\n\nclass StorageNumpyTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.old = config.execution_name\n config.execution_name = \"StorageNumpyTest\".lower()\n\n @classmethod\n def tearDownClass(cls):\n config.session.execute(\"DROP KEYSPACE IF EXISTS {}\".format(config.execution_name), timeout=60)\n config.execution_name = cls.old\n\n # Create a new keyspace per test\n def setUp(self):\n self.ksp = config.execution_name\n pass\n\n def tearDown(self):\n pass\n\n table = 'numpy_test'\n\n def test_init_empty(self):\n tablename = None\n\n base_array = np.arange(4096).reshape((64, 64))\n storage_id = None\n\n basic_init = StorageNumpy(base_array)\n self.assertTrue(np.array_equal(basic_init, base_array))\n\n complete_init = StorageNumpy(base_array, storage_id, tablename)\n self.assertTrue(np.array_equal(complete_init, base_array))\n\n def test_types_in_memory(self):\n base_array = np.arange(256)\n\n for typecode in np.typecodes['Integer']:\n typed_array = StorageNumpy(base_array.astype(typecode))\n self.assertTrue(np.array_equal(typed_array, base_array.astype(typecode)))\n\n for typecode in np.typecodes['UnsignedInteger']:\n typed_array = StorageNumpy(base_array.astype(typecode))\n self.assertTrue(np.array_equal(typed_array, base_array.astype(typecode)))\n\n def test_reconstruct(self):\n base_array = np.arange(256)\n tablename = self.ksp + '.' + \"test_reconstruct\"\n typecode = 'mytype'\n niter = 2\n\n for _ in range(niter):\n # Build array and store\n typed_array = StorageNumpy(base_array, tablename)\n self.assertTrue(np.array_equal(typed_array, base_array))\n typed_array.sync() # Flush values to cassandra\n # Load array\n typed_array = StorageNumpy(None, tablename)\n self.assertTrue(np.allclose(typed_array, base_array))\n typed_array.delete_persistent()\n\n def test_types_persistence(self):\n base_array = np.arange(256)\n tablename = self.ksp + '.' + \"test_types_persistence\"\n\n for typecode in np.typecodes['Integer']:\n if typecode == 'p':\n # TODO For now skip arrays made of pointers\n pass\n typed_array = StorageNumpy(base_array.astype(typecode), tablename)\n self.assertTrue(np.array_equal(typed_array, base_array.astype(typecode)))\n\n typed_array.sync() # Flush values to cassandra\n\n typed_array = StorageNumpy(None, tablename)\n self.assertTrue(np.allclose(typed_array, base_array.astype(typecode)))\n typed_array.delete_persistent()\n\n for typecode in np.typecodes['UnsignedInteger']:\n if typecode == 'P':\n # TODO For now skip arrays made of pointers\n pass\n typed_array = StorageNumpy(base_array.astype(typecode), tablename)\n self.assertTrue(np.allclose(typed_array, base_array.astype(typecode)))\n\n typed_array.sync() # Flush values to cassandra\n\n typed_array = StorageNumpy(None, tablename)\n self.assertTrue(np.allclose(typed_array, base_array.astype(typecode)))\n typed_array.delete_persistent()\n\n def test_read_all(self):\n nelem = 2 ** 21\n elem_dim = 2 ** 7\n\n base_array = np.arange(nelem).reshape((elem_dim, elem_dim, elem_dim))\n casted = StorageNumpy(input_array=base_array, name=\"test_read_all\")\n\n casted.sync() # Flush values to cassandra\n test_numpy = np.arange(nelem).reshape((elem_dim, elem_dim, elem_dim))\n casted = StorageNumpy(name=\"test_read_all\")\n chunk = casted[slice(None, None, None)]\n self.assertTrue(np.allclose(chunk.view(np.ndarray), test_numpy))\n casted.delete_persistent()\n\n def test_numpy_reserved_5d_read_all(self):\n\n nelem = 100000\n elem_dim = 10\n\n base_array = np.arange(nelem).reshape((elem_dim, elem_dim, elem_dim, elem_dim, elem_dim))\n casted = StorageNumpy(input_array=base_array, name=\"test_5d_read_all\")\n\n casted.sync() # Flush values to cassandra\n\n test_numpy = np.arange(nelem).reshape((elem_dim, elem_dim, elem_dim, elem_dim, elem_dim))\n casted = StorageNumpy(name=\"test_5d_read_all\")\n chunk = casted[slice(None, None, None)]\n self.assertTrue(np.allclose(chunk.view(np.ndarray), test_numpy))\n casted.delete_persistent()\n\n def test_explicit_construct(self):\n # From an explicit constructor - e.g. InfoArray():\n # obj is None\n # (we're in the middle of the InfoArray.__new__\n # constructor, and self.info will be set when we return to\n # InfoArray.__new__)\n\n basic_init = StorageNumpy()\n\n def test_view_cast(self):\n # From view casting - e.g arr.view(InfoArray):\n # obj is arr\n # (type(obj) can be InfoArray)\n\n base_array = np.arange(4096).reshape((64, 64))\n view_cast = base_array.view(StorageNumpy)\n\n def test_new_from_template(self):\n # From new-from-template - e.g infoarr[:3]\n # type(obj) is InfoArray\n base_array = np.arange(4096).reshape((64, 64))\n basic_init = StorageNumpy(base_array)\n new_from_template = basic_init[:32]\n\n def test_new2_from_template(self):\n # From new-from-template - e.g infoarr[:3]\n # type(obj) is InfoArray\n base_array = np.arange(4096).reshape((64, 64))\n basic_init = StorageNumpy(base_array)\n new_from_template = basic_init[32:]\n\n def test_get_subarray(self):\n base = np.arange(8 * 8 * 4).reshape((8, 8, 4))\n hecu_p = StorageNumpy(input_array=base, name='test_get_subarray')\n hecu_p.sync() # Flush values to cassandra\n hecu_r2 = StorageNumpy(name=\"test_get_subarray\")\n res = hecu_r2[:3, :2]\n sum = res.sum()\n res = hecu_r2[:3, :2]\n avg = res.mean()\n self.assertGreater(sum, 0)\n self.assertGreater(avg, 0)\n\n def test_slicing_3d(self):\n base = np.arange(8 * 8 * 4).reshape((8, 8, 4))\n hecu = StorageNumpy(input_array=base, name='test_slicing_3d')\n res_hecu = hecu[6:7, 4:]\n res = base[6:7, 4:]\n self.assertTrue(np.array_equal(res, res_hecu))\n\n hecu.sync() # Flush values to cassandra\n hecu = StorageNumpy(name=\"test_slicing_3d\")\n res_hecu = hecu[6:7, 4:]\n self.assertTrue(np.array_equal(res, res_hecu))\n\n hecu.delete_persistent()\n\n def test_slicing_ndims(self):\n import random\n ndims = 10\n max_elements = 2048\n for dims in range(1, ndims):\n elem_per_dim = int(max_elements ** (1 / dims))\n select = (slice(random.randint(0, elem_per_dim)),) * dims\n base = np.arange(elem_per_dim ** dims).reshape((elem_per_dim,) * dims)\n\n hecu = StorageNumpy(input_array=base, name='test_slicing_ndims')\n res_hecu = hecu[select]\n res = base[select]\n self.assertTrue(np.array_equal(res, res_hecu))\n\n hecu.sync() # Flush values to cassandra\n\n hecu = StorageNumpy(name=\"test_slicing_ndims\")\n res_hecu = hecu[select]\n res = base[select]\n self.assertTrue(np.array_equal(res, res_hecu))\n hecu.delete_persistent()\n del res_hecu\n del hecu\n\n def test_slice_ops(self):\n obj = np.arange(8 * 8 * 8).reshape((8, 8, 8))\n hecu = StorageNumpy(input_array=obj, name='test_slice_ops')\n hecu_sub = hecu[:2, 3:, 4:]\n sum = hecu_sub.sum()\n self.assertGreater(sum, 0)\n description = repr(hecu_sub)\n self.assertIsInstance(description, str)\n hecu.delete_persistent()\n\n def test_slice_ops2(self):\n obj = np.arange(8 * 8 * 8).reshape((8, 8, 8))\n hecu = StorageNumpy(input_array=obj, name='test_slice_ops2')\n hecu_sub = hecu[:2, 3:, 4:]\n hecu_sub2 = hecu_sub[:1, 2:, 3:]\n sum = hecu_sub2.sum()\n self.assertGreater(sum, 0)\n description = repr(hecu_sub2)\n self.assertIsInstance(description, str)\n hecu.delete_persistent()\n\n def test_slice_from_numpy_array(self):\n obj = np.arange(8 * 8 * 8).reshape((8, 8, 8))\n hecu = StorageNumpy(input_array=obj, name='test_slice_numpy')\n l = np.array((0,1))\n hecu_sub = hecu[l] #Access using an array of indexes\n# FIXME add more testing, currently if it does not segfault, then it works\n sum = hecu_sub.sum()\n self.assertEqual(sum, obj[l].sum())\n hecu.delete_persistent()\n\n def test_iter_numpy(self):\n obj = np.arange(8 * 8 * 8).reshape((8, 8, 8))\n hecu = StorageNumpy(input_array=obj, name='test_iter_numpy')\n acc = 0\n for i in hecu:\n acc = acc + 1\n\n hecu_sub = hecu[:2, 3:, 4:]\n\n acc2 = 0\n for i in hecu_sub:\n acc2 = acc2 + 1\n\n self.assertGreater(acc, acc2)\n hecu.delete_persistent()\n\n def test_assign_slice(self):\n base = np.arange(8 * 8 * 4).reshape((8, 8, 4))\n hecu_p = StorageNumpy(input_array=base, name='test_assign_slice')\n sub_hecu = hecu_p[:2, 3:]\n sub_hecu[0][2:] = 0\n hecu_p.sync() # Flush values to cassandra\n hecu_p_load = StorageNumpy(name=\"test_assign_slice\")\n rep = repr(hecu_p_load)\n self.assertIsInstance(rep, str)\n # StorageNumpy in memory and in database should share data\n load_sub_arr = hecu_p_load[:]\n self.assertFalse(np.array_equal(load_sub_arr, np.arange(8 * 8 * 4).reshape((8, 8, 4))))\n self.assertTrue(np.array_equal(sub_hecu, hecu_p_load[:2, 3:]))\n # Clean up\n hecu_p_load.delete_persistent()\n\n def test_assign_element(self):\n base = np.arange(8 * 8 * 4).reshape((8, 8, 4))\n hecu_p = StorageNumpy(input_array=base, name='test_assign_element')\n sub_hecu = hecu_p[:2, 3:]\n sub_hecu[0][1][0] = 0\n hecu_p.sync()\n hecu_p_load = StorageNumpy(name=\"test_assign_element\")\n rep = repr(hecu_p_load)\n self.assertIsInstance(rep, str)\n load_sub_arr = hecu_p_load[:]\n self.assertFalse(np.array_equal(load_sub_arr, np.arange(8 * 8 * 4).reshape((8, 8, 4))))\n sub_hecu_load = hecu_p_load[:2, 3:]\n self.assertTrue(sub_hecu_load[0][1][0] == 0)\n # Clean up\n hecu_p_load.delete_persistent()\n\n def test_load_2_dif_clusters_same_instance(self):\n base = np.arange(50 * 50).reshape((50, 50))\n hecu_p = StorageNumpy(input_array=base, name='load_2_clustrs_same_inst')\n hecu_p.sync() # Flush values to cassandra\n hecu_p_load = StorageNumpy(name=\"load_2_clustrs_same_inst\")\n hecu_p_load[0:1, 0:1]\n self.assertTrue(np.array_equal(hecu_p_load[40:50, 40:50], base[40:50, 40:50]))\n\n def test_split_by_rows(self):\n \"\"\"\n Tests iterating through the rows of the Hecuba array\n \"\"\"\n bn, bm = (1, 10)\n x = np.arange(100).reshape(10, -1)\n blocks = []\n for i in range(0, x.shape[0], bn):\n row = [x[i: i + bn, j: j + bm] for j in range(0, x.shape[1], bm)]\n blocks.append(row)\n\n data = StorageNumpy(input_array=x, name=\"test_split_by_rows\")\n\n data.sync() # Flush values to cassandra\n for i, chunk in enumerate(data.np_split(block_size=(bn, bm))):\n storage_id = chunk.storage_id\n chunk.sync() #Flush data\n del chunk\n chunk = getByID(storage_id)\n self.assertTrue(np.array_equal(list(chunk), blocks[i]))\n\n self.assertEqual(i + 1, len(blocks))\n\n def test_split_by_columns(self):\n \"\"\"\n Tests iterating through the columns of the Hecuba array\n \"\"\"\n bn, bm = (10, 1)\n x = np.arange(100).reshape(10, -1)\n blocks = []\n for i in range(0, x.shape[0], bn):\n row = [x[i: i + bn, j: j + bm] for j in range(0, x.shape[1], bm)]\n blocks.append(row)\n\n data = StorageNumpy(input_array=x, name=\"test_split_by_columns\")\n data.sync() # Flush values to cassandra\n for i, chunk in enumerate(data.np_split(block_size=(bn, bm))):\n storage_id = chunk.storage_id\n chunk.sync() #Flush data\n del chunk\n chunk = getByID(storage_id)\n self.assertTrue(np.array_equal(list(chunk), blocks[i]))\n\n self.assertEqual(i + 1, len(blocks))\n\n def test_split_rows_and_columns(self):\n\n bn, bm = (2, 1)\n x = np.arange(100).reshape(10, -1)\n blocks = []\n for i in range(0, x.shape[0], bn):\n row = [x[i: i + bn, j: j + bm] for j in range(0, x.shape[1], bm)]\n blocks.append(row)\n\n data = StorageNumpy(input_array=x, name=\"test_split_rows_and_columns\")\n\n data.sync() # Flush values to cassandra\n for i, chunk in enumerate(data.np_split(block_size=(bn, bm))):\n storage_id = chunk.storage_id\n chunk.sync() #Flush data\n del chunk\n chunk = getByID(storage_id)\n self.assertTrue(np.array_equal(list(chunk), blocks[i]))\n\n self.assertEqual(i + 1, len(blocks))\n\n def test_split_already_persistent(self):\n\n bn, bm = (2, 1)\n x = np.arange(100).reshape(10, -1)\n blocks = []\n for i in range(0, x.shape[0], bn):\n row = [x[i: i + bn, j: j + bm] for j in range(0, x.shape[1], bm)]\n blocks.append(row)\n\n data = StorageNumpy(input_array=x, name=\"test_split_already_persistent\")\n\n data.sync() # Flush values to cassandra\n for i, chunk in enumerate(data.np_split(block_size=(bn, bm))):\n storage_id = chunk.storage_id\n chunk.sync() #Flush data\n del chunk\n chunk = getByID(storage_id)\n self.assertTrue(np.array_equal(list(chunk), blocks[i]))\n\n del data\n gc.collect()\n\n data = StorageNumpy(name=\"test_split_already_persistent\")\n self.assertTrue(np.array_equal(list(data), x))\n\n for i, chunk in enumerate(data.np_split(block_size=(bn, bm))):\n storage_id = chunk.storage_id\n chunk.sync() #Flush data\n del chunk\n chunk = getByID(storage_id)\n self.assertTrue(np.array_equal(list(chunk), blocks[i]))\n\n self.assertEqual(i + 1, len(blocks))\n\n def test_storagenumpy_copy_memory(self):\n #'''\n #Check that the memory from a StorageNumpy does not share original array\n #'''\n n = np.arange(12).reshape(3,4)\n\n s1 = StorageNumpy(n, \"test_storagenumpy_copy_memory\")\n\n # StorageNumpy s1 and n should NOT share memory\n s1[0][0] = 42\n self.assertTrue(not np.array_equal(s1, n))\n s1[0][0] = n[0][0] # Undo\n\n n[2][2] = 666\n self.assertTrue(not np.array_equal(s1, n))\n # Clean up\n s1.delete_persistent()\n\n\n def test_storagenumpy_from_storagenumpy(self):\n #'''\n #Create a StorageNumpy from another StorageNumpy\n #'''\n\n n = np.arange(12).reshape(3,4)\n\n s1 = StorageNumpy(n, \"test_sn_from_sn\")\n\n s2 = StorageNumpy(s1) # Create a StorageNumpy from another StorageNumpy\n\n self.assertTrue(s2.storage_id is None)\n self.assertTrue(s2._get_name() is None)\n self.assertTrue(np.array_equal(s2, n))\n\n # StorageNumpy s1 and s2 should not share memory\n s1[0][0] = 42\n self.assertTrue(s2[0,0] != s1[0,0])\n\n s2[2][2] = 666\n self.assertTrue(s2[2,2] != s1[2,2])\n\n # Create a third StorageNumpy\n s3 = StorageNumpy(s2)\n\n self.assertTrue(s3.storage_id is None)\n self.assertTrue(s3._get_name() is None)\n self.assertTrue(np.array_equal(s3, s2))\n\n # Clean up\n s1.delete_persistent()\n\n def test_storagenumpy_reshape(self):\n #'''\n #Reshape a StorageNumpy\n #'''\n\n n = np.arange(12).reshape(3,4)\n\n s1 = StorageNumpy(n, \"test_storagenumpy_reshape\")\n\n try:\n r = s1.view()\n r.shape = (4,3)\n self.assertTrue(r.storage_id == s1.storage_id)\n except: # If this exception code is executed means that a COPY is needed and therefore the resulting object is VOLATILE!\n r = s1.reshape(4,3)\n self.assertTrue(r.storage_id is None)\n self.assertTrue(r._is_persistent == False)\n self.assertTrue(r.shape != s1.shape)\n self.assertTrue(r.strides != s1.strides)\n\n\n # Clean up\n s1.delete_persistent()\n\n def test_transpose(self):\n #'''\n #Test the transpose\n #'''\n n=np.arange(12).reshape(3,4)\n\n s=StorageNumpy(n,\"testTranspose\")\n\n t=s.transpose()\n self.assertTrue(t[0,1] == s [1,0])\n\n t[0,1]=42\n\n self.assertTrue(t[0,1] == s[1,0])\n\n # Clean up\n s.delete_persistent()\n\n def test_copy_storageNumpyPersist(self):\n #'''\n #Test that a copy of a StorageNumpy does not share memory (Persistent version)\n #'''\n n=np.arange(12).reshape(3,4)\n\n s=StorageNumpy(n,\"test_copy_storageNumpyPersist\")\n c=s.copy()\n\n self.assertTrue(c.storage_id is None)\n self.assertTrue(c._get_name() is None)\n self.assertTrue(c[0,0]==s[0,0])\n\n c[0,0]=42\n self.assertTrue(c[0,0]!=s[0,0])\n\n # Clean up\n s.delete_persistent()\n\n def test_copy_storageNumpyVolatile(self):\n #'''\n #Test that a copy of a StorageNumpy does not share memory (Volatile version)\n #'''\n n=np.arange(12).reshape(3,4)\n\n s=StorageNumpy(n)\n c=s.copy()\n\n self.assertTrue(s.storage_id is None)\n self.assertTrue(c.storage_id is None)\n\n self.assertTrue(c[0,0]==s[0,0])\n\n c[0,0]=42\n\n self.assertTrue(c[0,0]!=s[0,0])\n\n def test_columnar_access(self):\n # Test accessing a column that traverses different blocks in cassandra\n\n n = np.arange(2*180).reshape(2,180)\n s = StorageNumpy(n, \"test_columnar_access\")\n\n s.sync() # Flush values to cassandra\n del s\n\n s = StorageNumpy(None, \"test_columnar_access\")\n\n tmp=s[0,:]\n\n self.assertTrue(np.array_equal(tmp, n[0,:]))\n\n def test_row_access(self):\n n = np.arange(64*128).reshape(64,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_row_access\")\n s.sync() # Flush values to cassandra\n del s\n s = StorageNumpy(None, \"test_row_access\")\n for i in range(0,64):\n tmp = s[i,:] # Access a whole row\n self.assertTrue(np.array_equal(tmp, n[i,:]))\n\n def test_column_access(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_column_access\")\n for i in range(0,127):\n tmp = s[:,i] # Access a whole column\n self.assertTrue(np.array_equal(tmp, n[:,i]))\n\n def test_slice_after_load(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_slice_after_load\")\n s.sync() # Flush values to cassandra\n del s\n s = StorageNumpy(None, \"test_slice_after_load\")\n tmp = s[0,110:150] # Doing an slice on an unloaded numpy\n self.assertTrue(np.array_equal(tmp, n[0,110:150]))\n\n def test_get_cluster_ids(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_get_cluster_ids\")\n if s._build_args.metas.partition_type != 0: #This test is only valid for ZORDER\n return\n x = s._hcache.get_block_ids(s._build_args.metas)\n # Assuming a BLOCK_SIZE of 4096!! FIXME use an environment variable!\n #print(x)\n self.assertTrue(len(x) == 6)\n #\n #Each element elt of x:\n # elt[0]==zorderix\n # elt[1]==cluster_id\n # elt[2]==block_id\n # elt[3]==block_coord\n goal=[(0, 0, 0, (0, 0)), (2, 0, 2, (0, 1)), (8, 2, 0, (0, 2)), (10, 2, 2, (0, 3)), (32, 8, 0, (0, 4)), (34, 8, 2, (0, 5))]\n for i,elt in enumerate(x):\n self.assertEqual(elt[0], goal[i][0])\n self.assertEqual(elt[1], goal[i][1])\n self.assertEqual(elt[2], goal[i][2])\n self.assertEqual(elt[3], goal[i][3])\n\n def test_split(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_split\")\n splits = 0\n for i in s.split():\n # Assuming a BLOCK_SIZE of 4096!! FIXME use an environment variable!\n if splits <= 4:\n self.assertEqual(i.shape, (2,22))\n else:\n self.assertEqual(i.shape, (2,18))\n self.assertTrue(i[0,0] == splits*22)\n splits = splits + 1\n self.assertTrue(splits == 6)\n\n def test_split_access(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_split_access\")\n if s._build_args.metas.partition_type != 0: #This test is only valid for ZORDER\n return\n splits = 0\n for i in s.split():\n # Assuming a BLOCK_SIZE of 4096!! FIXME use an environment variable!\n if splits <= 4:\n self.assertTrue(np.array_equal(i[:], n[0:22, 22*splits:22*(splits+1)]))\n else:\n self.assertTrue(np.array_equal(i[:], n[0:22, 22*splits:22*(splits)+18]))\n splits = splits + 1\n\n def test_split_nomem(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_split_nomem\")\n splits = 0\n s.sync() # Flush values to cassandra\n for i in s.split():\n sid = i.storage_id\n i.getID() # Store split in hecuba.istorage\n del i\n i = StorageNumpy(None,None,sid)\n # Assuming a BLOCK_SIZE of 4096!! FIXME use an environment variable!\n if splits <= 4:\n self.assertEqual(i.shape, (2,22))\n else:\n self.assertEqual(i.shape, (2,18))\n self.assertTrue(i[0,0] == splits*22)\n splits = splits + 1\n self.assertTrue(splits == 6)\n\n def test_split_access_nomem(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_split_access_nomem\")\n if s._build_args.metas.partition_type != 0: #This test is only valid for ZORDER\n return\n s.sync() # Flush values to cassandra\n u = s.storage_id\n splits = 0\n for i in s.split():\n sid = i.storage_id\n i.getID() # Store split in hecuba.istorage\n del i\n i = StorageNumpy(None,None,sid)\n # Assuming a BLOCK_SIZE of 4096!! FIXME use an environment variable!\n if splits <= 4:\n self.assertTrue(np.array_equal(i[:], n[0:22, 22*splits:22*(splits+1)]))\n else:\n self.assertTrue(np.array_equal(i[:], n[0:22, 22*splits:22*(splits)+18]))\n\n splits = splits + 1\n\n def test_split_content(self):\n n = np.arange(88*66).reshape(88,66)\n s = StorageNumpy(n,\"test_split_content\")\n s.sync() # Flush values to cassandra\n del s\n s = StorageNumpy(None,\"test_split_content\")\n rows = [i for i in s.split(cols=False)]\n self.assertTrue(len(rows)==4)\n columns = [ i for i in s.split(cols=True)]\n self.assertTrue(len(columns)==3)\n blocks = [i for i in s.split()]\n self.assertTrue(len(blocks)==12)\n for i in rows:\n self.assertTrue(i.shape == (22,66))\n for i in columns:\n self.assertTrue(i.shape == (88,22))\n for i in blocks:\n self.assertTrue(i.shape == (22,22))\n self.assertTrue(np.array_equal(rows[0],n[0:22,:]))\n self.assertTrue(np.array_equal(rows[1],n[22:44,:]))\n self.assertTrue(np.array_equal(rows[2],n[44:66,:]))\n self.assertTrue(np.array_equal(rows[3],n[66:,:]))\n self.assertTrue(np.array_equal(columns[0],n[:,0:22]))\n self.assertTrue(np.array_equal(columns[1],n[:,22:44]))\n self.assertTrue(np.array_equal(columns[2],n[:,44:]))\n\n def test_load_StorageNumpy(self):\n n = np.arange(2*128).reshape(2,128) # A matrix with \"some\" columns\n s = StorageNumpy(n, \"test_load_StorageNumpy\")\n s.sync() # Flush values to cassandra\n s2 = StorageNumpy(None, \"test_load_StorageNumpy\")\n self.assertTrue(s2._is_persistent)\n self.assertEqual(s.storage_id, s2.storage_id)\n\n def test_np_dot(self):\n n1 = np.arange(8*8).reshape(8,8)\n n2 = np.arange(8*8).reshape(8,8)\n s1 = StorageNumpy(n1, \"test_np_dot1\")\n s2 = StorageNumpy(n2, \"test_np_dot2\")\n res = np.dot(s1, s2)\n res.make_persistent(\"test_np_dots1xs2\")\n self.assertTrue(np.array_equal(res, np.dot(n1,n2)))\n\n @unittest.skip(\"Only execute for performance reasons\")\n def test_performance_storage_numpy_arrow(self):\n # Test the time to retrieve a column from Cassandra\n\n # Times to repeat the test\n TIMES = 10\n\n # Matrix sizes to test\n matrix_size = (100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)\n n_cols = 3\n\n times = {}\n # Test 1 column\n for s in matrix_size:\n times[s] = [] # empty list for size 's'\n\n # Create a numpy\n n = np.arange(1000*s * n_cols).reshape(1000*s, n_cols)\n matrix_name = \"matrix{}x{}\".format(1000*s, n_cols)\n\n # Make it persistent\n o = StorageNumpy(n, matrix_name)\n\n o.sync() # Flush values to cassandra\n # Clean memory\n del o\n\n for i in range(TIMES):\n # Retrieve numpy from cassandra (NO data in memory)\n o = StorageNumpy(None, matrix_name)\n\n # LOAD_ON_DEMAND must be DISABLED!\n self.assertTrue(o.data.hex()[:40], '0' * 40)\n\n start = timer()\n\n # Load column\n column = random.randint(0, (n_cols-1))\n\n o[:, column]\n\n end = timer()\n\n # Store time\n times[s].append(end - start)\n del o\n\n # All tests done, print results\n print(\"\\nRESULTS:\")\n for s in matrix_size:\n print(\"Matrix size{}x{} = \".format(1000*s, n_cols), times[s])\n print(\"\\n\")\n\n def test_setitem_blocks(self):\n # Ensure that sets and gets on different blocks in cassandra are\n # updated and retrieved This test creates a matrix of 3x3 blocks,\n # modifies an element on each of the blocks and retrieves them.\n n = np.arange(66*66).reshape(66,66)\n s = StorageNumpy(n, \"test_setitem_blocks\")\n magic = [-660 - i for i in range(10)]\n pos = [ (0,0), (0,30), (0,64), (30,0), (30,30), (30,64), (64,0), (64,30), (64,64)]\n # Modify 's' in memory and disk and keep 'n' in the same condition as a baseline\n for i in range(len(pos)):\n s[pos[i]] = magic[i]\n n[pos[i]] = magic[i]\n # Check modified elements in memory\n for i in range(len(pos)):\n self.assertTrue( s[pos[i]] == magic[i] )\n # Check Rest of elements in memory\n self.assertTrue(np.array_equal(n,s))\n s.sync() # Flush values to cassandra\n del s\n s = StorageNumpy(None, \"test_setitem_blocks\")\n # Check modified elements in Cassandra\n for i in range(len(pos)):\n self.assertTrue( s[pos[i]] == magic[i] )\n # Check Rest of elements in Cassandra\n self.assertTrue(np.array_equal(n,s))\n\n del s\n s = StorageNumpy(None, \"test_setitem_blocks\")\n # Modify memory content (not loaded) with different magic values\n for i in range(len(pos)):\n s[pos[i]] = magic[len(pos)-1-i]\n n[pos[i]] = magic[len(pos)-1-i]\n\n for i in range(len(pos)):\n self.assertTrue( s[pos[i]] == magic[len(pos)-1-i] )\n self.assertTrue(np.array_equal(n,s))\n\n def test_store_in_view(self):\n n = np.arange(66*66).reshape(66,66)\n s = StorageNumpy(n, \"test_store_in_view\")\n\n s1 = s[1:65,1:65]\n s1[0,0] = 666\n\n self.assertTrue(s[1,1], 666) # original numpy is modified\n\n s.sync() # Flush values to cassandra\n del s\n\n s = StorageNumpy(None, \"test_store_in_view\")\n self.assertTrue(s[1,1], 666) # Ensure cassandra has been modified\n\n # Persistent Views\n def test_pv_slice_slice(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_slice\")\n sn.sync() # Flush values to cassandra\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_slice\")\n # Caso: slice, slice\n s1 = slice(1,65)\n n1 = sn[s1,s1]\n i=1\n j=1\n self.assertTrue(np.array_equal(n1[i,j], n[s1,s1][i,j]))\n\n def test_loaded(self):\n n = np.arange(88*66).reshape(88,66)\n s = StorageNumpy(n, \"test_loaded\")\n self.assertTrue(s._numpy_full_loaded is True)\n s.sync() # Flush values to cassandra\n del s\n s = StorageNumpy(None, \"test_loaded\")\n self.assertTrue(s._numpy_full_loaded is False)\n\n # The accessed element must be FULL loaded\n row = s[0,:]\n self.assertTrue(s._numpy_full_loaded is False)\n self.assertTrue(row._numpy_full_loaded is True)\n\n del s\n s = StorageNumpy(None, \"test_loaded\")\n col = s[:, 0]\n self.assertTrue(s._numpy_full_loaded is False)\n self.assertTrue(col._numpy_full_loaded is True)\n\n del s\n s = StorageNumpy(None, \"test_loaded\")\n block = s[22:44, 22:44]\n self.assertTrue(s._numpy_full_loaded is False)\n self.assertTrue(block._numpy_full_loaded is True)\n\n # Loading ALL elements must make the object full loaded\n del s\n s = StorageNumpy(None, \"test_loaded\")\n for i in range(s.shape[0]):\n x = s[i,:]\n self.assertTrue(s._numpy_full_loaded is True)\n\n del s\n s = StorageNumpy(None, \"test_loaded\")\n for i in range(s.shape[1]):\n x = s[:,i]\n self.assertTrue(s._numpy_full_loaded is True)\n\n # Split MUST NOT load the object\n del s\n s = StorageNumpy(None, \"test_loaded\")\n rows = [ i for i in s.split(cols=False) ]\n for i in rows:\n self.assertTrue(i._numpy_full_loaded is False)\n\n del s\n s = StorageNumpy(None, \"test_loaded\")\n columns = [ i for i in s.split(cols=True) ]\n for i in columns:\n self.assertTrue(i._numpy_full_loaded is False)\n\n del s\n s = StorageNumpy(None, \"test_loaded\")\n blocks = [ i for i in s.split() ]\n for i in blocks:\n self.assertTrue(i._numpy_full_loaded is False)\n\n\n # TODO: Tranform SNadaptcoords.py\n def test_out_of_bounds_in_numpy(self):\n n = np.arange(88*66).reshape(88,66)\n s = StorageNumpy(n, \"test_bounds_in_numpy\")\n del s\n s = StorageNumpy(None, \"test_bounds_in_numpy\")\n\n with self.assertRaises(IndexError):\n s[:, 100]\n\n with self.assertRaises(IndexError):\n s[100, :]\n\n v = s[1:10,22:50]\n with self.assertRaises(IndexError):\n v[11, :]\n with self.assertRaises(IndexError):\n v[:, 55]\n\n def views_with_steps(self):\n n = np.arange(88*66).reshape(88,66)\n s = StorageNumpy(n, \"views_with_steps\")\n\n self.assertEqual(self._row_elem, 22) # HARDCODED VALUE!\n\n self.assertEqual(s._n_blocks, 12)\n\n v1 = s[:,23:40]\n self.assertEqual(v1._n_blocks, 4)\n\n v = s[:,2:50:2]\n self.assertEqual(v._n_blocks, 12)\n\n v2 = s[:, 23:50:2] # 23/2 == 11 columns\n self.assertEqual(v2._n_blocks, 8)\n\n def test_sync(self):\n n = np.arange(22*22).reshape(22,22)\n s = StorageNumpy(n, \"test_sync\")\n\n del s\n s = StorageNumpy(None, \"test_sync\")\n s[0,0] = 666 # Asynchronous write\n x = StorageNumpy(None, None, s.storage_id)\n self.assertTrue(s[0,0] != x[0,0]) # Data is still in dirty\n self.assertTrue(x[0,0] == 0)\n s.sync()\n x = StorageNumpy(None, None, s.storage_id)\n self.assertTrue(s[0,0] == x[0,0])\n self.assertTrue(x[0,0] == 666)\n\n # Persistent Views: slice, int\n def test_pv_slice_int(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_int\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_int\")\n # Caso: slice, int\n s1 = slice(1,65)\n s2 = 30\n i=1\n n2 = sn[s1,s2]\n self.assertTrue(np.array_equal(n2[i], n[s1,s2][i]))\n\n # Persistent Views: int, slice\n def test_pv_int_slice(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_int_slice\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_int_slice\")\n # Caso: int, slice\n s1 = slice(1,65)\n s2 = 30\n i=1\n n2 = sn[s2,s1]\n self.assertTrue(np.array_equal(n2[i], n[s2,s1][i]))\n\n # Persistent Views: slice_step\n def test_pv_slice_step(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_step\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_step\")\n s1 = slice(1,65,2)\n n2 = sn[s1,s1]\n i=2\n j=30\n self.assertTrue(np.array_equal(n[s1,s1][i,j], n2[i,j]))\n\n # Persistent Views: slice_from_slice\n def test_pv_slice_from_slice(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_from_slice\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_from_slice\")\n s1 = slice(1,65)\n s2 = slice(1,20)\n n1 = sn[s1]\n n2 = n1[s2]\n self.assertTrue(np.array_equal(n2, n[s1][s2]))\n\n # Persistent Views: slice_from_slice_step\n def test_pv_slice_from_from_slice_step(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_from_slice_step\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_from_slice_step\")\n s1 = slice(1,65,2)\n s2 = slice(1,20,2)\n n1 = sn[s1][s2]\n self.assertTrue(np.array_equal(n1, n[s1][s2]))\n\n # Persistent Views: multiple slices\n def test_pv_slice_from_N_slice(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_from_N_slice\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_from_N_slice\")\n s1 = slice(1,65,2)\n s2 = slice(1,20,2)\n n1 = sn[s1,s1][s2,s2]\n self.assertTrue(np.array_equal(n1, n[s1,s1][s2,s2]))\n\n # Persistent Views: only_int\n def test_pv_only_int(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_only_int\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_only_int\")\n s1 = 1\n n1 = sn[s1]\n self.assertTrue(np.array_equal(n[1], n1))\n\n # Persistent Views: big_np\n def test_pv_big_np(self):\n n = np.arange(1000*1000).reshape(1000,1000)\n sn = StorageNumpy(n,\"test_pv_big_np\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_big_np\")\n s1 = (22,22)\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n\n # Persistent Views: one_dim\n def test_pv_one_dim(self):\n n = np.arange(66*66)\n sn = StorageNumpy(n,\"test_pv_one_dim\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_one_dim\")\n s1 = 30\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n\n # Persistent Views: negative_indexes\n def test_pv_negative_indexes(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_negative_indexes\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_negative_indexes\")\n s1 = -1\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n nn = np.arange(66*66)\n snn = StorageNumpy(nn,\"test_pv_negative_indexes_small\")\n del snn\n snn = StorageNumpy(None,\"test_pv_negative_indexes_small\")\n self.assertTrue(np.array_equal(snn[s1], nn[s1]))\n\n # Persistent Views: special_case \n def test_pv_special_case(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_special_case\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_special_case\")\n s1 = slice(1,65)\n s2 = sn[s1,s1]\n ssf=1\n self.assertTrue(np.array_equal(sn[s1,s1][1], n[s1,s1][1]))\n\n # Persistent Views: slice_single_row \n def test_pv_slice_single_row(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_single_row\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_single_row\")\n s1 = slice(1,65)\n s2 = slice(1, None, None)\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n self.assertTrue(np.array_equal(sn[s1][s2], n[s1][s2]))\n self.assertTrue(np.array_equal(sn[s1][s2][s2], n[s1][s2][s2]))\n\n # Persistent Views: load_correct_blocks \n def test_pv_load_correct_blocks(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_load_correct_blocks\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_load_correct_blocks\")\n s1 = (0, slice(None, None, None))\n x = sn[s1]\n self.assertTrue(len(sn._loaded_coordinates) == 3)\n\n # Persistent Views: slice_single_column \n def test_pv_slice_single_column(self):\n n = np.arange(66*66).reshape(66,66)\n sn = StorageNumpy(n,\"test_pv_slice_single_column\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_slice_single_column\")\n s1 = (slice(None, None, None), 30)\n s2 = slice(1, None, None)\n self.assertTrue(sn[s1].shape == n[s1].shape)\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n self.assertTrue(np.array_equal(sn[s1][s2], n[s1][s2]))\n self.assertTrue(np.array_equal(sn[s1][s2][s2], n[s1][s2][s2]))\n\n # Persistent Views: three_dimensions \n def test_pv_three_dimensions(self):\n n = np.arange(3*66*66).reshape(3,66,66)\n sn = StorageNumpy(n,\"test_pv_three_dimensions\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_three_dimensions\")\n s1 = (0, 1, slice(None, None, None))\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n s2 = slice(1,10,1)\n self.assertTrue(np.array_equal(sn[s1][s2], n[s1][s2]))\n\n # Persistent Views: three_dimensions_easy \n def test_pv_three_dimensions_easy(self):\n n = np.arange(4*4*4).reshape(4,4,4)\n sn = StorageNumpy(n,\"test_pv_three_dimensions_easy\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_three_dimensions_easy\")\n orig3 = (slice(None, None, None), slice(None, None, None), slice(None, None, None))\n s1 = (0, 1, slice(None, None, None))\n self.assertTrue(np.array_equal(sn[s1], n[s1]))\n s2 = slice(1, None, None)\n self.assertTrue(np.array_equal(sn[s1][s2], n[s1][s2]))\n self.assertTrue(np.array_equal(sn[s1][s2][s2], n[s1][s2][s2]))\n\n # Persistent Views: three_dimensions_all_coords \n def test_pv_three_dimensions_all_coords(self):\n n = np.arange(8*8*8).reshape(8,8,8)\n sn = StorageNumpy(n,\"test_pv_three_dimensions_all_coords\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_three_dimensions_all_coords\")\n orig3 = (slice(None, None, None), slice(None, None, None), slice(None, None, None))\n coords = []\n for i in sn.calculate_block_coords(orig3):\n coords.append(i)\n expected = [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)]\n result = all(map(lambda x, y: x == y, expected, coords))\n self.assertTrue(result, True)\n\n # Persistent Views: three_dimensions_subslice_onedim_coords \n def test_pv_three_dimensions_slice_onedim(self):\n n = np.arange(8*8*8).reshape(8,8,8)\n sn = StorageNumpy(n,\"test_pv_three_dimensions_slice_onedim\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_three_dimensions_slice_onedim\")\n ss = sn[0,0,slice(None,None,None)]\n coords = []\n for i in ss.calculate_block_coords(ss._build_args.view_serialization):\n coords.append(i)\n expected = [(0, 0, 0), (0, 0, 1)]\n result = all(map(lambda x, y: x == y, expected, coords))\n self.assertTrue(result, True)\n\n # Persistent Views: three_dimensions_subslice_twodim_coords \n def test_pv_three_dimensions_slice_twodim(self):\n n = np.arange(8*8*8).reshape(8,8,8)\n sn = StorageNumpy(n,\"test_pv_three_dimensions_slice_twodim\")\n sn.sync()\n del sn\n sn = StorageNumpy(None,\"test_pv_three_dimensions_slice_twodim\")\n ss = sn[(slice(None,None,None), slice(None,None,None),0)]\n coords = []\n for i in ss.calculate_block_coords(ss._build_args.view_serialization):\n coords.append(i)\n expected = [(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0)]\n result = all(map(lambda x, y: x == y, expected, coords))\n self.assertTrue(result, True)\n\n # Persistent Views: two_dimensions_multiple_accesses \n def test_pv_two_dimensions_multiple_accesses(self):\n n = np.arange(66*66).reshape(66,66)\n s = StorageNumpy(n,\"test_pv_two_dimensions_multiple_accesses\")\n s.sync()\n del s\n s = StorageNumpy(None,\"test_pv_two_dimensions_multiple_accesses\")\n s1 = (slice(None, None, None), slice(2,50,2)) # slice with step != 1\n x = s[s1]\n res = x.shape == n[s1].shape\n self.assertTrue(np.array_equal( x, n[x._build_args.view_serialization] ))\n self.assertTrue(x[0,0] == n[x._build_args.view_serialization][0,0])\n self.assertTrue(x[0,1] == n[x._build_args.view_serialization][0,1])\n self.assertTrue(x[0,-2] == n[x._build_args.view_serialization][0,-2])\n self.assertTrue(x[0,-1] == n[x._build_args.view_serialization][0,-1])\n self.assertTrue(x[-2,0] == n[x._build_args.view_serialization][-2,0])\n self.assertTrue(x[-1,0] == n[x._build_args.view_serialization][-1,0])\n self.assertTrue(x[-2,-2] == n[x._build_args.view_serialization][-2,-2])\n self.assertTrue(x[-1,-1] == n[x._build_args.view_serialization][-1,-1])\n\n del s\n s = StorageNumpy(None,\"test_pv_two_dimensions_multiple_accesses\")\n s1 = (slice(None, None, None), slice(2,50,2))\n x = s[s1]\n s2 = (slice(None, None,None), slice(-10, -3, 1))\n self.assertTrue(np.array_equal(x[s2], n[x._build_args.view_serialization][s2]))\n\n del s\n s = StorageNumpy(None,\"test_pv_two_dimensions_multiple_accesses\")\n s1 = (slice(None, None, None), slice(2,50,2))\n x = s[s1]\n s2 = (slice(None,None,None),slice(-3, -10, 1))\n self.assertTrue(np.array_equal(x[s2], n[x._build_args.view_serialization][s2]))\n\n del s\n s = StorageNumpy(None,\"test_pv_two_dimensions_multiple_accesses\")\n s1 = (slice(None, None, None), slice(2,50,2))\n x = s[s1]\n s2 = (slice(None,None,None),slice(-10, 200, 1))\n self.assertTrue(np.array_equal(x[s2], n[x._build_args.view_serialization][s2]))\n\n del s\n s = StorageNumpy(None,\"test_pv_two_dimensions_multiple_accesses\")\n s1 = (slice(None, None, None), slice(2,50,2))\n x = s[s1]\n s2 = (slice(None,None,None),slice(2, -2, 1))\n self.assertTrue(np.array_equal(x[s2], n[x._build_args.view_serialization][s2]))\n\n # Simple_negative\n def test_simple_negative(self):\n n = np.arange(6)\n s = StorageNumpy(n,\"test_simple_negative\")\n ss = s[2::2]\n self.assertTrue(ss[-1] == n[2::2][-1])\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.dot",
"numpy.allclose",
"numpy.array_equal",
"numpy.arange",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
edgarsmdn/TS | [
"746e6d8220e62841eeb469b9462599e8986f5cba"
] | [
"stoch_optim_utilities.py"
] | [
"import numpy as np\r\nimport random as rnd\r\n'''\r\n\r\n Utilities for Stochastic Optimization Algorithms\r\n\r\n'''\r\ndef LS_f_v3(f, p_init, max_iter, bounds, radius, reduce_iter, reduce_frac):\r\n '''\r\n ---------------------\r\n LOCAL SEARCH ALGORITHM\r\n ---------------------\r\n --- input ---\r\n f: (function) Objective function\r\n p_init: (array) Initial point (where the funtion is going to be evaluated)\r\n max_iter: (integer) Maximum number of iterations\r\n bounds: (list) Bounds on the search domain\r\n radius: (float) Initial search radius\r\n reduce_iter: (integer) Number of iterations with the same optimum that will induce a search radius reduction\r\n reduce_frac: (float) Fraction to which the search radius is reduced. Must be between 0 and 1\r\n\r\n --- output ---\r\n best_value: (float) The best value found with the iteration using the best_position\r\n best_position: (array) The best position in which the final best_value was evaluated\r\n\r\n --- Notes ---\r\n 1. f stands for \"fast implementation\" which means it does not compile results\r\n '''\r\n #Initialization\r\n f = f\r\n p_init = p_init\r\n max_iter = max_iter\r\n bounds = bounds\r\n radius = radius\r\n reduce_iter = reduce_iter\r\n reduce_frac = reduce_frac\r\n # ----------------------------------------------\r\n best_position = p_init\r\n best_value = f(p_init)\r\n dim = len(p_init)\r\n fail_count = 0 \r\n # Iteration Loop\r\n for i_iter in range(max_iter):\r\n # Tries random values in the vecinity of the best position so far\r\n # Assure that every variable is within the bounds\r\n check = False\r\n while not check:\r\n temp_bound = np.array([rnd.uniform(bounds[i][0],bounds[i][1]) for i in range(dim)])\r\n p_trial = - best_position + radius * temp_bound\r\n check = check_bounds(bounds, p_trial)\r\n if not check:\r\n p_trial = best_position - radius * temp_bound\r\n check = check_bounds(bounds, p_trial)\r\n # If the modification of the complete set did not work. It will modify each variable individually\r\n if not check:\r\n p_trial = check_bounds_variable(bounds, p_trial, radius)\r\n check = True\r\n # If trial value is better than best value, this gets substituted\r\n trial_value = f(p_trial)\r\n if trial_value < best_value:\r\n best_position = p_trial\r\n best_value = trial_value\r\n else:\r\n fail_count += 1\r\n # Check whether it's time to set radius to smaller one. Resets failcount\r\n if fail_count == reduce_iter:\r\n radius *= reduce_frac\r\n fail_count = 0\r\n return best_value, best_position\r\n\r\ndef LS_p_v3(f, p_init, max_iter, bounds, radius, reduce_iter, reduce_frac):\r\n '''\r\n ---------------------\r\n LOCAL SEARCH ALGORITHM\r\n ---------------------\r\n --- input ---\r\n f: (function) Objective function\r\n p_init: (array) Initial point (where the funtion is going to be evaluated)\r\n max_iter: (integer) Maximum number of iterations\r\n bounds: (list) Bounds on the search domain\r\n radius: (float) Initial search radius\r\n reduce_iter: (integer) Number of iterations with the same optimum that will induce a search radius reduction\r\n reduce_frac: (float) Fraction to which the search radius is reduced. Must be between 0 and 1\r\n\r\n --- output ---\r\n best_value: (float) The best value found with the iteration using the best_position\r\n best_position: (array) The best position in which the final best_value was evaluated\r\n trajectory: (matrix) Column 0: Number of iteration. Column 1: Value for current iteration\r\n trajectory_x: (matrix) Positions visited during the iterations\r\n\r\n --- Notes ---\r\n 1. The \"p\" states for the \"plot\" version of the algorithm. It outputs all the iteration trajectory\r\n '''\r\n #Initialization\r\n f = f\r\n p_init = p_init\r\n max_iter = max_iter\r\n bounds = bounds\r\n radius = radius\r\n reduce_iter = reduce_iter\r\n reduce_frac = reduce_frac\r\n # ----------------------------------------------\r\n best_position = p_init\r\n best_value = f(p_init)\r\n dim = len(p_init)\r\n fail_count = 0\r\n \r\n trajectory = np.zeros((max_iter + 1, 2))\r\n trajectory[0][0] = 0\r\n trajectory[0][1] = best_value\r\n \r\n trajectory_x = np.zeros((max_iter + 1, dim))\r\n trajectory_x[0] = best_position\r\n \r\n # Iteration Loop\r\n for i_iter in range(max_iter):\r\n # Tries random values in the vecinity of the best position so far\r\n # Assure that every variable is within the bounds\r\n check = False\r\n while not check:\r\n temp_bound = np.array([rnd.uniform(bounds[i][0],bounds[i][1]) for i in range(dim)])\r\n p_trial = - best_position + radius * temp_bound\r\n check = check_bounds(bounds, p_trial)\r\n if not check:\r\n p_trial = best_position - radius * temp_bound\r\n check = check_bounds(bounds, p_trial)\r\n # If the modification of the complete set did not work. It will modify each variable individually\r\n if not check:\r\n p_trial = check_bounds_variable(bounds, p_trial, radius)\r\n check = True\r\n # If trial value is better than best value, this gets substituted\r\n trial_value = f(p_trial)\r\n if trial_value < best_value:\r\n best_position = p_trial\r\n best_value = trial_value\r\n else:\r\n fail_count += 1\r\n # Check whether it's time to set radius to smaller one. Resets failcount\r\n if fail_count == reduce_iter:\r\n radius *= reduce_frac\r\n fail_count = 0\r\n # Stores trajectory\r\n trajectory[i_iter + 1][0] = i_iter + 1\r\n trajectory[i_iter + 1][1] = best_value\r\n trajectory_x[i_iter + 1] = best_position\r\n \r\n \r\n return best_value, best_position, trajectory, trajectory_x\r\n\r\ndef RS_f_v2(f, p_best, max_iter, bounds):\r\n '''\r\n ---------------------\r\n RANDOM SEARCH ALGORITHM\r\n ---------------------\r\n --- input ---\r\n f: objective function\r\n p_best: hot-start a best point\r\n max_iter: maximum number of iterations\r\n bounds: bounds on the search domain\r\n \r\n --- output ---\r\n best_value: (float) The best value found with the iteration using the best_position\r\n best_position: (array) The best position in which the final best_value was evaluated\r\n\r\n --- Notes ---\r\n 1. p_best is used in case a good value is already known\r\n 2. The \"f\" states for the \"fast\" version of the algorithm. It only outputs the best values found\r\n '''\r\n # Initialization\r\n f = f\r\n p_best = p_best\r\n max_iter = max_iter\r\n bounds = bounds\r\n # ----------------------------------------------\r\n best_position = p_best\r\n best_value = f(p_best)\r\n dim = len(p_best)\r\n # Search loop\r\n for i_iter in range(max_iter):\r\n # Tries random values\r\n p_trial = np.array([rnd.uniform(bounds[i][0],bounds[i][1]) for i in range(dim)])\r\n trial_value = f(p_trial)\r\n # If trial values is better than best position, this gets substituted\r\n if trial_value < best_value:\r\n best_position = p_trial\r\n best_value = trial_value\r\n return best_value, best_position\r\n\r\ndef RS_p_v2(f, p_best, max_iter, bounds):\r\n '''\r\n ---------------------\r\n RANDOM SEARCH ALGORTHM\r\n ---------------------\r\n --- input ---\r\n f: objective function\r\n p_best: hot-start a best point\r\n max_iter: maximum number of iterations\r\n bounds: bounds on the search domain\r\n\r\n --- output ---\r\n best_value: (float) The best value found with the iteration using the best_position\r\n best_position: (array) The best position in which the final best_value was evaluated\r\n trajectory: (matrix) Column 0: Number of iteration. Column 1: Value for current iteration\r\n \r\n --- Notes ---\r\n\r\n 1. p_best is used in case a good value is already known.\r\n 2. The \"p\" states for the \"plot\" version of the algorithm. It outputs all the iteration trajectory\r\n\r\n '''\r\n # Initialization\r\n f = f\r\n p_best = p_best\r\n max_iter = max_iter\r\n bounds = bounds\r\n # ----------------------------------------------\r\n best_position = p_best\r\n best_value = f(p_best)\r\n dim = len(p_best)\r\n # Creating arrays for the plots\r\n all_results = np.zeros((max_iter,2))\r\n # Search loop\r\n for i_iter in range(max_iter):\r\n # Tries random values \r\n p_trial = np.array([rnd.uniform(bounds[i][0],bounds[i][1]) for i in range(dim)])\r\n trial_value = f(p_trial)\r\n # If trial values is better than best position, this gets substituted\r\n if trial_value < best_value:\r\n best_position = np.copy(p_trial)\r\n best_value = trial_value\r\n # Compiling results\r\n all_results[i_iter][0] = i_iter\r\n all_results[i_iter][1] = best_value\r\n return best_value, best_position, all_results\r\n\r\ndef check_bounds_variable(bounds, position, radius):\r\n '''\r\n ------------------------------\r\n CHECK BOUNDS VARIABLE BY VARIABLE AND ASSURES THEY ARE WITHIN BOUNDS CHANGING THEM WHEN NECESSARY\r\n ------------------------------\r\n --- input ---\r\n bounds: (list) Bounds on the search domain\r\n position: (array) Proposed current position of the particle\r\n \r\n --- output ---\r\n position: (array) Corrected array to be within bounds in each variable\r\n '''\r\n # Initialization\r\n bounds = bounds\r\n position = position\r\n radius = radius\r\n # ----------------------------------------------\r\n check = False\r\n while not check:\r\n check_var_count = 0 #To count variables which are within bounds\r\n for variable in range(len(position)):\r\n bounds_variable = [bounds[variable]] # Extracts the bounds for the specific variable\r\n check_variable = check_bounds(bounds_variable, np.array([position[variable]]))\r\n if not check_variable:\r\n r1 = variable - radius # Left limit radius \r\n r2 = variable + radius # Right limit radius \r\n \r\n if r2 < bounds_variable[0][0]: # O /------/\r\n position[variable] = bounds_variable[0][0]\r\n elif r1 > bounds_variable[0][1]: # /------/ O\r\n position[variable] = bounds_variable[0][1]\r\n elif r2 > bounds_variable[0][0] and r1 < bounds_variable[0][0]: # O----/\r\n position[variable] = rnd.uniform(bounds_variable[0][0], r2)\r\n elif r1 < bounds_variable[0][1] and r2 > bounds_variable[0][1]: # /----O\r\n position[variable] = rnd.uniform(r1, bounds_variable[0][1])\r\n elif r1 > bounds_variable[0][0] and r2 < bounds_variable[0][1]: # /--O--/\r\n position[variable] = rnd.uniform(r1, r2)\r\n \r\n check_variable = check_bounds(bounds_variable, np.array([position[variable]]))\r\n if check_variable:\r\n check_var_count += 1\r\n if check_var_count == len(position):\r\n check = True\r\n if check:\r\n return position\r\n\r\ndef check_bounds(bounds, position):\r\n '''\r\n ------------------------------\r\n CHECK BOUNDS ALGORITM\r\n ------------------------------\r\n --- input ---\r\n bounds: (list) Bounds on the search domain\r\n position: (array) Proposed current position of the particle\r\n \r\n --- output ---\r\n valid_position: (boolean) \"True\" if position is within the allowed bounds in every dimension and \"False\" otherwise\r\n '''\r\n # Initialization\r\n bounds = bounds\r\n position = position\r\n # ----------------------------------------------\r\n dim = len(bounds)\r\n count = 0\r\n for i in range(dim):\r\n if position[i] <= bounds[i][1] and position[i] >= bounds[i][0]:\r\n count += 1\r\n if count == dim:\r\n return True\r\n else:\r\n return False\r\n\r\ndef tabu_zone(tabu, continuos_radius):\r\n '''\r\n ------------------------------\r\n DEFINES TABU ZONE FOR EACH VARIABLE IN A POINT GIVEN A CONTINUOS RADIUS\r\n ------------------------------\r\n --- input ---\r\n tabu: (array) Point classified as Tabu\r\n continuos_radius: (list) Radius around each variable to define tabu zone for each variable in the tabu point\r\n \r\n --- output ---\r\n tabu_z: (list) It contains the tabu zone per each variable of the point\r\n '''\r\n # Initialization\r\n tabu = tabu\r\n continuos_radius\r\n # ----------------------------------------------\r\n tabu_z = [] # To store tabu zones for each variable\r\n # Defines tabu zone per each variable\r\n for i in range(len(tabu)):\r\n left = tabu[i] - continuos_radius[i] # Defines left bound of the tabu zone\r\n right = tabu[i] + continuos_radius[i] # Defines right bound of the tabu zone\r\n tabu_z.append((left, right))\r\n return tabu_z\r\n\r\ndef tabu_zones(tabuList, continuos_radius):\r\n '''\r\n ------------------------------\r\n GIVES TABU ZONES FOR EACH TABU IN THE LIST\r\n ------------------------------\r\n --- input ---\r\n tabuList: (list) Stores all the current tabu points\r\n continuos_radius: (list) Radius around each variable to define tabu zone for each variable in the tabu point\r\n \r\n --- output ---\r\n tabu_zs: (list) It contains the tabu zones per each tabu point\r\n '''\r\n # Initialization \r\n tabuList = tabuList\r\n continuos_radius = continuos_radius\r\n # ----------------------------------------------\r\n tabu_zs = []\r\n for tabu in tabuList:\r\n t_z = tabu_zone(tabu, continuos_radius)\r\n tabu_zs.append(t_z)\r\n return tabu_zs\r\n\r\ndef check_tabu(position, tabu_zs):\r\n '''\r\n ------------------------------\r\n CHECKS IF A POINT IS A TABU OR NOT\r\n ------------------------------\r\n --- input ---\r\n position: (array) Position within the search space\r\n tabu_zs: (list) It contains the tabu zones per each tabu point\r\n \r\n --- output ---\r\n tabu: (boolean) \"True\" if position is a tabu (it's within the tabu zone in each one of its variables) and \"False\" otherwise\r\n '''\r\n # Initialization\r\n position = position\r\n tabu_zs = tabu_zs\r\n # ----------------------------------------------\r\n for tabu_z in tabu_zs:\r\n check = check_bounds(tabu_z, position) # Checks if the position is within a tabu zone\r\n if check:\r\n return True\r\n if not check:\r\n return False\r\n\r\ndef p_outside_tabu(bounds, tabu_zs):\r\n '''\r\n ------------------------------\r\n GENENRATES A POSITION OUTSIDE TABU ZONES\r\n ------------------------------\r\n --- input ---\r\n bounds: (list) Bounds on the search domain\r\n tabu_zs: (list) It contains the tabu zones per each tabu point\r\n \r\n --- output ---\r\n p_out_tabu: (array) Random particle outside tabu zones and within bounds\r\n '''\r\n # Initialization\r\n bounds = bounds\r\n tabu_zs = tabu_zs\r\n # ----------------------------------------------\r\n p_out_tabu = np.zeros(len(bounds)) # Creates an array of zeros to store new position\r\n \r\n check = True\r\n while check:\r\n select_tabu_z = rnd.choice(tabu_zs) # Selects randomnly one of the sets of tabu zones\r\n for i in range(len(bounds)):\r\n # Selects valid limits\r\n if select_tabu_z[i][0] > bounds[i][0] and select_tabu_z[i][1] < bounds[i][1]: # /--O--/\r\n left1 = bounds[i][0]\r\n right1 = select_tabu_z[i][0]\r\n left2 = select_tabu_z[i][1]\r\n right2 = bounds[i][1]\r\n elif select_tabu_z[i][0] > bounds[i][0] and select_tabu_z[i][1] >= bounds[i][1]: # /----O\r\n left1 = bounds[i][0]\r\n right1 = select_tabu_z[i][0]\r\n left2 = left1\r\n right2 = right1\r\n elif select_tabu_z[i][0] <= bounds[i][0] and select_tabu_z[i][1] < bounds[i][1]: # O----/\r\n left1 = select_tabu_z[i][1]\r\n right1 = bounds[i][1]\r\n left2 = left1\r\n right2 = right1\r\n new_bounds = rnd.choice(((left1, right1),(left2, right2)))\r\n p_out_tabu[i] = rnd.uniform(new_bounds[0], new_bounds[1])\r\n check = check_tabu(p_out_tabu, tabu_zs) # Checks whether the new position is a tabu or not\r\n return p_out_tabu\r\n \r\ndef new_particle(bounds):\r\n '''\r\n ------------------------------\r\n GENERATES NEW PARTICLE (POINT) RANDOMLY\r\n ------------------------------\r\n --- input ---\r\n bounds: (list) Bounds on the search domain\r\n \r\n --- output ---\r\n particle: (array) Random particle within bounds\r\n '''\r\n #Initialization\r\n B = bounds\r\n # ----------------------------------------------\r\n dim = len(B)\r\n #Generate new random particle within the bounds\r\n particle = np.array([rnd.uniform(B[i][0],B[i][1]) for i in range(dim)])\r\n return particle\r\n\r\ndef first_generation(num_p, bounds):\r\n '''\r\n ------------------------------\r\n GENERATES FIRST GENERATION FOR GA\r\n ------------------------------\r\n --- input ---\r\n num_p: (integer) Number of particles in the new generation to be created\r\n bounds: (list) Bounds on the search domain\r\n \r\n --- output ---\r\n generation: (list) Set of new particles\r\n '''\r\n # Initialization\r\n S = num_p\r\n B = bounds\r\n # ----------------------------------------------\r\n generation = []\r\n # Generates a set of num_p new particles\r\n for point in range(S):\r\n particle = new_particle(B)\r\n generation.append(particle)\r\n return generation\r\n\r\ndef sort_standard(f, generation):\r\n '''\r\n ------------------------------\r\n STANDARD SORT (WALKS THROUGH EACH ELEMENT IN LIST)\r\n ------------------------------\r\n --- input ---\r\n f: (function) Objetive function\r\n generation: (list) Set of new particles\r\n \r\n --- output ---\r\n g_sorted: (matrix) Sorted set of new particles. Row: particle, Column: variable\r\n '''\r\n # Initialization\r\n F = f\r\n G = generation\r\n # ----------------------------------------------\r\n dim = len(G)\r\n num_var = len(G[0])\r\n g_sorted = np.reshape([(0.0) for i in range(num_var*dim)], (dim,num_var)) # Creates a matrix of zeros\r\n values = np.zeros((dim,2)) # Creates a matrix of zeros\r\n # Stores the points with their respective value(as a key)\r\n index = 0\r\n for particle in G:\r\n values[index][0] = F(particle)\r\n values[index][1] = index\r\n index += 1\r\n # Sorts values\r\n values_sorted = values[np.argsort(values[:,0])]\r\n # Stores sorted values in the previously created matrix\r\n ind_sorted = values_sorted[:,1]\r\n i = 0\r\n for ind in ind_sorted:\r\n g_sorted[i] = G[int(ind)]\r\n i += 1\r\n return g_sorted\r\n\r\ndef selection(g_sorted, best_num, random_num):\r\n '''\r\n ------------------------------\r\n SELECTION OF THE FITTEST POINTS AND SOME RANDOME ONES\r\n ------------------------------\r\n --- input ---\r\n g_sorted: (matrix) Sorted set of new particles. Row: particle, Column: variable.\r\n best_num: (integer) Number of best particles you want to select\r\n random_num: (integer) Number of random particles you want to select from the rest\r\n \r\n --- output ---\r\n selected: (matrix) Set of particles selected\r\n '''\r\n #Initialization\r\n g = g_sorted\r\n best = best_num\r\n random = random_num\r\n # ----------------------------------------------\r\n num_var = len(g[0])\r\n selected = np.reshape([(0.0) for i in range(num_var*(best + random))], ((best + random),num_var)) # Creates a matrix of zeros\r\n # Stores the best points to the matrix \"selected\"\r\n for i in range(best):\r\n selected[i] = g[i]\r\n # Stores points from the rest of the generation randomly \r\n for i in range(random):\r\n selected[i + best] = rnd.choice(g[best:])\r\n return selected\r\n\r\ndef define_parents(selected, parents_child):\r\n '''\r\n ------------------------------\r\n SELECTION OF POINTS WHICH ARE GONNA BE RECOMBINED AMONG THEM\r\n ------------------------------\r\n --- input ---\r\n selected: (matrix) Set of particles selected\r\n parents_child: (integer) Number of parents per child\r\n \r\n --- output ---\r\n groups_par: (list) Set of groups of parents that are gonna be recombined\r\n '''\r\n # Initialization\r\n parents = selected\r\n N = parents_child\r\n # ----------------------------------------------\r\n groups_par = []\r\n # Loop to define parents\r\n row_parent = 0 # Number of the row in the Matrix for the current parent in the next loop\r\n for parent in parents:\r\n group_repro = np.zeros((N,len(parent))) # Creates a matrix of zeros\r\n # Makes a matrix of the candidates to be reproduced with\r\n candidates = np.delete(parents, row_parent, 0)\r\n # Randomly select the parents from candidates to be later reproduce with.\r\n for i in range (N-1):\r\n cand = rnd.choice(candidates)\r\n group_repro[i] = parent\r\n group_repro[i+1] = cand\r\n index_row = np.where([cand] == [cand])[0][0] # Gives de row number of \"cand\"\r\n candidates = np.delete(candidates, index_row, 0) # Prevent repetition within the group of parents\r\n groups_par.append(group_repro)\r\n row_parent += 1\r\n return groups_par\r\n \r\ndef reproduction(groups_par, num_children):\r\n '''\r\n ------------------------------\r\n REPRODUCTION OF PARENTS BY RANDOM RECOMBINATION\r\n ------------------------------\r\n --- input ---\r\n groups_par: (list) Set of groups of parents that are gonna be recombined\r\n num_children: (integer) Number of children per group of parents\r\n \r\n --- output ---\r\n new_gener_r: (matrix) Set of children (points) produced by recombination of the groups of parents\r\n '''\r\n # Initialization\r\n groups = groups_par\r\n num_ch = num_children\r\n # ----------------------------------------------\r\n new_gener = []\r\n num_ac_child = 0 #Number of current children per group of parents \r\n # Produces the number of children specified per group of parents\r\n while num_ac_child < num_ch:\r\n # Loops per group of parents\r\n for group in groups:\r\n child = []\r\n #Loops per each variable in a point \r\n for variable in range(len(group[0])):\r\n can_var = []\r\n # Selects the variable randomly between the group of parents\r\n for i in range(len(group)):\r\n can_var.append(group[i][variable])\r\n child.append(rnd.choice(can_var))\r\n new_gener.append(child)\r\n num_ac_child += 1\r\n new_gener_r = np.asarray(new_gener)\r\n return new_gener_r\r\n\r\ndef mutation(new_gener_r, bounds, continuos_radius):\r\n '''\r\n ------------------------------\r\n MUTATION OF NEW CHILDREN WITH CERTAIN PROBABILITY\r\n ------------------------------\r\n --- input ---\r\n new_gener_r: (matrix) Set of children (points) produced by recombination of the groups of parents\r\n bounds: (list) Bounds on the search domain\r\n continuos_radius: (list) Radius around each variable to define prohibeted zone for each variable. Continuos numbers application\r\n \r\n --- output ---\r\n g_m: (matrix) Set of children (points) passed through random mutation with certain probability\r\n '''\r\n #Initialization\r\n g_r = new_gener_r\r\n B = bounds\r\n c_r = continuos_radius\r\n # ----------------------------------------------\r\n num_var = len(g_r[0])\r\n # Makes a random change in a random variable per child with certain probability of mutation\r\n i = 0\r\n for child in g_r:\r\n probability = 1/num_var # 1/num of decision variables according to Deb,K. (2001), Multi-Objective Optimization Using Evolutionary Algorithms\r\n if child in np.delete(g_r, i): # Prevents a child to be exactly the same as one of its parents\r\n probability = 1\r\n if rnd.random() < probability:\r\n random_index = np.where( child == (rnd.choice(child)))[0][0]\r\n c_r_v = [c_r[random_index]] # Selects the continuos radius to the chosen variable to be mutated and store it as need it for \"tabu_zones\"\r\n var_mut = [np.array([child[random_index]])] # Selects the chosen variable to be mutated and store it as need it for \"tabu_zones\"\r\n proh_zone = tabu_zones(var_mut, c_r_v) # Defines the zone within the continuos radius\r\n bounds_var = [B[random_index]] # Selects the bounds for the chosen variable to be mutated\r\n mutated_variable = p_outside_tabu(bounds_var, proh_zone) # Gives a random number for the chosen variable outside continuos radius and within bounds\r\n child[random_index] = mutated_variable # Replaces the mutated variable in place\r\n #child[random_index] = rnd.uniform(B[random_index][0],B[random_index][1])\r\n i += 1\r\n new_gener_m = g_r\r\n return new_gener_m\r\n"
] | [
[
"numpy.asarray",
"numpy.delete",
"numpy.copy",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mjklemm/Kernels | [
"0131ab2c0451d3fdd4237ed74efc49b85f6ab475"
] | [
"PYTHON/stencil-numba.py"
] | [
"#!/usr/bin/env python3\n#\n# Copyright (c) 2015, Intel Corporation\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products\n# derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n#\n#\n# *******************************************************************\n#\n# NAME: Stencil\n#\n# PURPOSE: This program tests the efficiency with which a space-invariant,\n# linear, symmetric filter (stencil) can be applied to a square\n# grid or image.\n#\n# USAGE: The program takes as input the linear\n# dimension of the grid, and the number of iterations on the grid\n#\n# <progname> <iterations> <grid size>\n#\n# The output consists of diagnostics to make sure the\n# algorithm worked, and of timing statistics.\n#\n# HISTORY: - Written by Rob Van der Wijngaart, February 2009.\n# - RvdW: Removed unrolling pragmas for clarity;\n# added constant to array \"in\" at end of each iteration to force\n# refreshing of neighbor data in parallel versions; August 2013\n# - Converted to Python by Jeff Hammond, February 2016.\n#\n# *******************************************************************\n\nimport sys\n#from timeit import default_timer as timer\nfrom time import process_time as timer\nfrom numba import jit\nimport numpy\n\n@jit\ndef grid(n,r,W,A,B):\n if r>0:\n b = n-r\n for s in range(-r, r+1):\n for t in range(-r, r+1):\n B[r:b,r:b] += W[r+t,r+s] * A[r+t:b+t,r+s:b+s]\n\n@jit\ndef star2(n,W,A,B):\n B[2:n-2,2:n-2] += W[2,2] * A[2:n-2,2:n-2] \\\n + W[2,0] * A[2:n-2,0:n-4] \\\n + W[2,1] * A[2:n-2,1:n-3] \\\n + W[2,3] * A[2:n-2,3:n-1] \\\n + W[2,4] * A[2:n-2,4:n-0] \\\n + W[0,2] * A[0:n-4,2:n-2] \\\n + W[1,2] * A[1:n-3,2:n-2] \\\n + W[3,2] * A[3:n-1,2:n-2] \\\n + W[4,2] * A[4:n-0,2:n-2]\n\n@jit\ndef star(n,r,W,A,B):\n b = n-r\n B[r:b,r:b] += W[r,r] * A[r:b,r:b]\n for s in range(1,r+1):\n B[r:b,r:b] += W[r,r-s] * A[r:b,r-s:b-s] \\\n + W[r,r+s] * A[r:b,r+s:b+s] \\\n + W[r-s,r] * A[r-s:b-s,r:b] \\\n + W[r+s,r] * A[r+s:b+s,r:b]\n\ndef main():\n\n # ********************************************************************\n # read and test input parameters\n # ********************************************************************\n\n print('Parallel Research Kernels version ') #, PRKVERSION\n print('Python stencil execution on 2D grid')\n\n if len(sys.argv) < 3:\n print('argument count = ', len(sys.argv))\n sys.exit(\"Usage: ./stencil <# iterations> <array dimension> [<star/grid> <radius>]\")\n\n iterations = int(sys.argv[1])\n if iterations < 1:\n sys.exit(\"ERROR: iterations must be >= 1\")\n\n n = int(sys.argv[2])\n if n < 1:\n sys.exit(\"ERROR: array dimension must be >= 1\")\n\n if len(sys.argv) > 3:\n pattern = sys.argv[3]\n else:\n pattern = 'star'\n\n if len(sys.argv) > 4:\n r = int(sys.argv[4])\n if r < 1:\n sys.exit(\"ERROR: Stencil radius should be positive\")\n if (2*r+1) > n:\n sys.exit(\"ERROR: Stencil radius exceeds grid size\")\n else:\n r = 2 # radius=2 is what other impls use right now\n\n print('Grid size = ', n)\n print('Radius of stencil = ', r)\n if pattern == 'star':\n print('Type of stencil = ','star')\n else:\n print('Type of stencil = ','grid')\n\n print('Data type = double precision')\n print('Compact representation of stencil loop body')\n print('Number of iterations = ', iterations)\n\n # there is certainly a more Pythonic way to initialize W,\n # but it will have no impact on performance.\n W = numpy.zeros(((2*r+1),(2*r+1)))\n if pattern == 'star':\n stencil_size = 4*r+1\n for i in range(1,r+1):\n W[r,r+i] = +1./(2*i*r)\n W[r+i,r] = +1./(2*i*r)\n W[r,r-i] = -1./(2*i*r)\n W[r-i,r] = -1./(2*i*r)\n\n else:\n stencil_size = (2*r+1)**2\n for j in range(1,r+1):\n for i in range(-j+1,j):\n W[r+i,r+j] = +1./(4*j*(2*j-1)*r)\n W[r+i,r-j] = -1./(4*j*(2*j-1)*r)\n W[r+j,r+i] = +1./(4*j*(2*j-1)*r)\n W[r-j,r+i] = -1./(4*j*(2*j-1)*r)\n\n W[r+j,r+j] = +1./(4*j*r)\n W[r-j,r-j] = -1./(4*j*r)\n\n A = numpy.fromfunction(lambda i,j: i+j, (n,n), dtype=float)\n B = numpy.zeros((n,n))\n\n for k in range(iterations+1):\n # start timer after a warmup iteration\n if k<1: t0 = timer()\n\n if pattern == 'star':\n if r == 2:\n star2(n,W,A,B)\n else:\n star(n,r,W,A,B)\n\n else: # grid\n grid(n,r,W,A,B)\n A += 1.0\n\n t1 = timer()\n stencil_time = t1 - t0\n\n #******************************************************************************\n #* Analyze and output results.\n #******************************************************************************\n\n norm = numpy.linalg.norm(numpy.reshape(B,n*n),ord=1)\n active_points = (n-2*r)**2\n norm /= active_points\n\n epsilon=1.e-8\n\n # verify correctness\n reference_norm = 2*(iterations+1)\n if abs(norm-reference_norm) < epsilon:\n print('Solution validates')\n flops = (2*stencil_size+1) * active_points\n avgtime = stencil_time/iterations\n print('Rate (MFlops/s): ',1.e-6*flops/avgtime, ' Avg time (s): ',avgtime)\n else:\n print('ERROR: L1 norm = ', norm,' Reference L1 norm = ', reference_norm)\n sys.exit()\n\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"numpy.reshape",
"numpy.fromfunction",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.