repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
vkso/FER | [
"b7207341139ff451753a4c4640530e915673fc7c"
] | [
"train.py"
] | [
"import myMethod as myMethod\nfrom datetime import datetime\nfrom customParameters import *\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport os\nimport argparse\n\n# python train.py --gpus 1 --model myModel --train_name fc1024\nparser = argparse.ArgumentParser(description='train args')\nparser.add_argument('--gpus', type=int, default=1)\nparser.add_argument('--model', type=str, default='myModel')\nparser.add_argument('--train_name', type=str, default='newTrain')\n\nargs = parser.parse_args()\n\n\ntrain_file_path = \"./data/FER2013/train.csv\"\ntest_public_path = \"./data/FER2013/public_test.csv\"\ntest_private_path = \"./data/FER2013/private_test.csv\"\n\ntrain_data = myMethod.get_dataset_train(train_file_path)\npublic_test_data = myMethod.get_dataset_test(test_public_path)\nprivate_test_data = myMethod.get_dataset_test(test_private_path)\n\ntrain_data = train_data.map(myMethod.preprocess_traindata)\npublic_test_data = public_test_data.map(myMethod.preprocess_testdata)\nprivate_test_data = private_test_data.map(myMethod.preprocess_testdata)\n\n# xxx = next(iter(public_test_data))\n# original = xxx[0][1]\n# original = tf.reshape(original, [1, 48, 48, 1])\n# print(original.shape)\n#\n# after = tf.image.random_crop(original, size=[1, 42, 42, 1])\n# print(after.shape)\n# original = tf.reshape(original, [48, 48])\n# after = tf.reshape(after, [42, 42])\n#\n# myMethod.visualize(original, after)\n# plt.show()\n\n\n# ------------------------------------------------------------------------------\n\n# ------------------------------------------------------------------------------\n\n\ndef tmpvisualize(lu, ru, ld, rd, original):\n fig = plt.figure()\n plt.subplot(3, 2, 1)\n plt.title('Original Image')\n plt.imshow(lu, cmap='gray')\n\n plt.subplot(3, 2, 2)\n plt.title('Augmented Image')\n plt.imshow(ru, cmap='gray')\n\n plt.subplot(3, 2, 3)\n plt.title('Augmented Image')\n plt.imshow(ld, cmap='gray')\n\n plt.subplot(3, 2, 4)\n plt.title('Augmented Image')\n plt.imshow(rd, cmap='gray')\n\n plt.subplot(3, 2, 5)\n plt.title('Augmented Image')\n plt.imshow(original, cmap='gray')\n\n\n\n# tmpvisualize(left_up, right_up, left_down, right_down, original)\n# plt.show()\n# myMethod.visualize(original, right_down)\n# plt.show()\n\n# ------------------------------------------------------------------------------\n# TensorBoard\nlogdir = \"./logs/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)\n\n# Save checkpoints\ncheckpoint_path = \"./train_history/\" + args.train_name + '/' + \"cp-{epoch:04d}.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\ncp_callback = keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n verbose=1,\n save_weights_only=True,\n # save_best_only=True)\n period=10)\n\n\n\ndef singleGPU():\n if args.model == 'myVGG':\n model = myMethod.create_myVGG()\n else:\n model = myMethod.create_myModel()\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=[\"accuracy\"])\n model.summary()\n\n model.fit(train_data, epochs=TOTAL_EPOCHS, steps_per_epoch=TOTAL_TRAIN // BATCH_SIZE_TRAIN,\n callbacks=[tensorboard_callback, cp_callback],\n validation_data=public_test_data,\n validation_steps=TOTAL_TEST // BATCH_SIZE_TRAIN)\n model.evaluate(private_test_data, steps=TOTAL_TEST // BATCH_SIZE_TRAIN)\n\ndef multiGPUs():\n GPUS = args.gpus\n strategy = tf.distribute.MirroredStrategy()\n\n with strategy.scope():\n if args.model == 'myVGG':\n model = myMethod.create_myVGG()\n else:\n model = myMethod.create_myModel()\n\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=[\"accuracy\"])\n\n model.fit(train_data, epochs=TOTAL_EPOCHS, steps_per_epoch=TOTAL_TRAIN // BATCH_SIZE_TRAIN // GPUS,\n callbacks=[tensorboard_callback, cp_callback],\n validation_data=public_test_data,\n validation_steps=TOTAL_TEST // BATCH_SIZE_TRAIN // GPUS)\n model.evaluate(private_test_data, steps=TOTAL_TEST // BATCH_SIZE_TRAIN // GPUS)\n\ndef trainModel():\n if args.gpus == 1:\n singleGPU()\n else:\n multiGPUs()\n\nif __name__ == '__main__':\n trainModel()"
] | [
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.title",
"tensorflow.distribute.MirroredStrategy",
"matplotlib.pyplot.subplot",
"tensorflow.keras.callbacks.TensorBoard",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
flavorfan/deep-learning-coursera | [
"6aa1274a450fcb7a57c04072fe3bcf416501bdc6"
] | [
"Sequence Models/Trigger word detection/rnn_model.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\nimport pyaudio\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import TensorBoard\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D\nfrom keras.layers import GRU, Bidirectional, BatchNormalization, Reshape\nfrom keras.optimizers import Adam\n\nTx = 5511 # The number of time steps input to the model from the spectrogram\nn_freq = 101 # Number of frequencies input to the model at each time step of the spectrogram\nTy = 1375 # The number of time steps in the output of our model\n\ndef model(input_shape):\n \"\"\"\n Function creating the model's graph in Keras.\n\n Argument:\n input_shape -- shape of the model's input data (using Keras conventions)\n\n Returns:\n model -- Keras model instance\n \"\"\"\n X_input = Input(shape=input_shape)\n\n # Step 1: CONV layer (≈4 lines)\n X = Conv1D(256, kernel_size=15, strides=4)(X_input) # CONV1D\n\n X = BatchNormalization()(X) # Batch normalization\n X = Activation('relu')(X) # ReLu activation\n X = Dropout(0.8)(X) # dropout (use 0.8)\n\n # Step 2: First GRU Layer (≈4 lines)\n X = GRU(units=128, return_sequences=True)(X) # GRU (use 128 units and return the sequences)\n X = Dropout(0.8)(X) # dropout (use 0.8)\n X = BatchNormalization()(X) # Batch normalization\n\n # Step 3: Second GRU Layer (≈4 lines)\n X = GRU(units=128, return_sequences=True)(X) # GRU (use 128 units and return the sequences)\n X = Dropout(0.8)(X) # dropout (use 0.8)\n X = BatchNormalization()(X) # Batch normalization\n X = Dropout(0.8)(X) # dropout (use 0.8)\n\n # Step 4: Time-distributed dense layer (≈1 line)\n X = TimeDistributed(Dense(1, activation=\"sigmoid\"))(X) # time distributed (sigmoid)\n\n model = Model(inputs=X_input, outputs=X)\n return model\n\ntbCallBack = TensorBoard(log_dir='/data/train_dir/trigger_word', # log 目录\n histogram_freq=0, # 按照何等频率(epoch)来计算直方图,0为不计算\n# batch_size=32, # 用多大量的数据计算直方图\n write_graph=True, # 是否存储网络结构图\n write_grads=True, # 是否可视化梯度直方图\n write_images=True,# 是否可视化参数\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None)\n\n# def detect_triggerword(filename):\n# plt.subplot(2, 1, 1)\n#\n# x = graph_spectrogram(filename)\n# # the spectogram outputs (freqs, Tx) and we want (Tx, freqs) to input into the model\n# x = x.swapaxes(0, 1)\n# x = np.expand_dims(x, axis=0)\n# predictions = model.predict(x)\n#\n# plt.subplot(2, 1, 2)\n# plt.plot(predictions[0, :, 0])\n# plt.ylabel('probability')\n# plt.show()\n# return predictions\n\n\n\n\nif __name__ == '__main__':\n model = model(input_shape=(Tx, n_freq))\n model.summary()\n\n opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.01)\n model.compile(loss='binary_crossentropy', optimizer=opt, metrics=[\"accuracy\"])\n\n # model.fit(X, Y, batch_size=5, epochs=1)\n X = np.load(\"./X.npy\")\n Y = np.load(\"./Y.npy\")\n\n X_dev = np.load(\"./X_test.npy\")\n Y_dev = np.load(\"./Y_test.npy\")\n\n model.fit(X, Y, batch_size=5, epochs=100, callbacks=[tbCallBack])\n model.save(\"./models/fan_trigger_word_model.h5\")\n\n # Dev set accuracy = 0.9359999895095825\n loss, acc = model.evaluate(X_dev, Y_dev)\n print(\"Dev set accuracy = \", acc)\n\n # filename = \"./raw_data/dev/1.wav\"\n # filename = \"./test_dir/test3.wav\"\n # filename = \"./train_dir/train476.wav\"\n # prediction = detect_triggerword(filename)\n\n# model compare\n# model = load_model('./models/tr_model.h5')\n# model = load_model('./models/fan_audio_wake_model_V2.h5')\n\n#######\n\n\n\n"
] | [
[
"numpy.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kortemaki/OpenNMT-py | [
"fa793257966c23280e5a72bd43e56a1e998e47f7"
] | [
"onmt/modules/GlobalAttention.py"
] | [
"import torch\nimport torch.nn as nn\n\nfrom onmt.Utils import aeq, sequence_mask\n\nclass GlobalAttention(nn.Module):\n \"\"\"\n Global attention takes a matrix and a query vector. It\n then computes a parameterized convex combination of the matrix\n based on the input query.\n\n Constructs a unit mapping a query `q` of size `dim`\n and a source matrix `H` of size `n x dim`, to an output\n of size `dim`.\n\n\n .. mermaid::\n\n graph BT\n A[Query]\n subgraph RNN\n C[H 1]\n D[H 2]\n E[H N]\n end\n F[Attn]\n G[Output]\n A --> F\n C --> F\n D --> F\n E --> F\n C -.-> G\n D -.-> G\n E -.-> G\n F --> G\n\n All models compute the output as\n :math:`c = \\sum_{j=1}^{SeqLength} a_j H_j` where\n :math:`a_j` is the softmax of a score function.\n Then then apply a projection layer to [q, c].\n\n However they\n differ on how they compute the attention score.\n\n * Luong Attention (dot, general):\n * dot: :math:`score(H_j,q) = H_j^T q`\n * general: :math:`score(H_j, q) = H_j^T W_a q`\n\n\n * Bahdanau Attention (mlp):\n * :math:`score(H_j, q) = v_a^T tanh(W_a q + U_a h_j)`\n\n\n Args:\n dim (int): dimensionality of query and key\n coverage (bool): use coverage term\n attn_type (str): type of attention to use, options [dot,general,mlp]\n\n \"\"\"\n def __init__(self, dim, coverage=False, attn_type=\"dot\"):\n super(GlobalAttention, self).__init__()\n\n self.dim = dim\n self.attn_type = attn_type\n assert (self.attn_type in [\"dot\", \"general\", \"mlp\"]), (\n \"Please select a valid attention type.\")\n\n if self.attn_type == \"general\":\n self.linear_in = nn.Linear(dim, dim, bias=False)\n elif self.attn_type == \"mlp\":\n self.linear_context = nn.Linear(dim, dim, bias=False)\n self.linear_query = nn.Linear(dim, dim, bias=True)\n self.v = nn.Linear(dim, 1, bias=False)\n # mlp wants it with bias\n out_bias = self.attn_type == \"mlp\"\n self.linear_out = nn.Linear(dim*2, dim, bias=out_bias)\n\n self.sm = nn.Softmax()\n self.tanh = nn.Tanh()\n\n if coverage:\n self.linear_cover = nn.Linear(1, dim, bias=False)\n\n def score(self, h_t, h_s):\n \"\"\"\n Args:\n h_t (`FloatTensor`): sequence of queries `[batch x tgt_len x dim]`\n h_s (`FloatTensor`): sequence of sources `[batch x src_len x dim]`\n\n Returns:\n :obj:`FloatTensor`:\n raw attention scores (unnormalized) for each src index\n `[batch x tgt_len x src_len]`\n\n \"\"\"\n\n # Check input sizes\n src_batch, src_len, src_dim = h_s.size()\n tgt_batch, tgt_len, tgt_dim = h_t.size()\n aeq(src_batch, tgt_batch)\n aeq(src_dim, tgt_dim)\n aeq(self.dim, src_dim)\n\n if self.attn_type in [\"general\", \"dot\"]:\n if self.attn_type == \"general\":\n h_t_ = h_t.view(tgt_batch*tgt_len, tgt_dim)\n h_t_ = self.linear_in(h_t_)\n h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)\n h_s_ = h_s.transpose(1, 2)\n # (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)\n return torch.bmm(h_t, h_s_)\n else:\n dim = self.dim\n wq = self.linear_query(h_t.view(-1, dim))\n wq = wq.view(tgt_batch, tgt_len, 1, dim)\n wq = wq.expand(tgt_batch, tgt_len, src_len, dim)\n\n uh = self.linear_context(h_s.contiguous().view(-1, dim))\n uh = uh.view(src_batch, 1, src_len, dim)\n uh = uh.expand(src_batch, tgt_len, src_len, dim)\n\n # (batch, t_len, s_len, d)\n wquh = self.tanh(wq + uh)\n\n return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)\n\n def forward(self, input, memory_bank, memory_lengths=None, coverage=None):\n \"\"\"\n\n Args:\n input (`FloatTensor`): query vectors `[batch x tgt_len x dim]`\n memory_bank (`FloatTensor`): source vectors `[batch x src_len x dim]`\n memory_lengths (`LongTensor`): the source context lengths `[batch]`\n coverage (`FloatTensor`): None (not supported yet)\n\n Returns:\n (`FloatTensor`, `FloatTensor`):\n\n * Computed vector `[tgt_len x batch x dim]`\n * Attention distribtutions for each query\n `[tgt_len x batch x src_len]`\n \"\"\"\n\n # one step input\n if input.dim() == 2:\n one_step = True\n input = input.unsqueeze(1)\n else:\n one_step = False\n\n batch, sourceL, dim = memory_bank.size()\n batch_, targetL, dim_ = input.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n aeq(self.dim, dim)\n if coverage is not None:\n batch_, sourceL_ = coverage.size()\n aeq(batch, batch_)\n aeq(sourceL, sourceL_)\n\n if coverage is not None:\n cover = coverage.view(-1).unsqueeze(1)\n memory_bank += self.linear_cover(cover).view_as(memory_bank)\n memory_bank = self.tanh(memory_bank)\n\n # compute attention scores, as in Luong et al.\n align = self.score(input, memory_bank)\n\n if memory_lengths is not None:\n mask = sequence_mask(memory_lengths)\n mask = mask.unsqueeze(1) # Make it broadcastable.\n align.data.masked_fill_(1 - mask, -float('inf'))\n\n # Softmax to normalize attention weights\n align_vectors = self.sm(align.view(batch*targetL, sourceL))\n align_vectors = align_vectors.view(batch, targetL, sourceL)\n\n # each context vector c_t is the weighted average\n # over all the source hidden states\n c = torch.bmm(align_vectors, memory_bank)\n\n # concatenate\n concat_c = torch.cat([c, input], 2).view(batch*targetL, dim*2)\n attn_h = self.linear_out(concat_c).view(batch, targetL, dim)\n if self.attn_type in [\"general\", \"dot\"]:\n attn_h = self.tanh(attn_h)\n\n if one_step:\n attn_h = attn_h.squeeze(1)\n align_vectors = align_vectors.squeeze(1)\n\n # Check output sizes\n batch_, dim_ = attn_h.size()\n aeq(batch, batch_)\n aeq(dim, dim_)\n batch_, sourceL_ = align_vectors.size()\n aeq(batch, batch_)\n aeq(sourceL, sourceL_)\n else:\n attn_h = attn_h.transpose(0, 1).contiguous()\n align_vectors = align_vectors.transpose(0, 1).contiguous()\n\n # Check output sizes\n targetL_, batch_, dim_ = attn_h.size()\n aeq(targetL, targetL_)\n aeq(batch, batch_)\n aeq(dim, dim_)\n targetL_, batch_, sourceL_ = align_vectors.size()\n aeq(targetL, targetL_)\n aeq(batch, batch_)\n aeq(sourceL, sourceL_)\n\n return attn_h, align_vectors\n"
] | [
[
"torch.nn.Softmax",
"torch.cat",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.bmm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rkarp/polars | [
"106bf5802126702cee8bc5bc21f2392bd5eebe98"
] | [
"py-polars/tests/test_series.py"
] | [
"from polars import Series\nfrom polars.datatypes import *\nimport polars as pl\nimport numpy as np\nimport pytest\nimport pyarrow as pa\n\n\ndef create_series() -> \"Series\":\n return Series(\"a\", [1, 2])\n\n\ndef test_to_frame():\n assert create_series().to_frame().shape == (2, 1)\n\n\ndef test_bitwise_ops():\n a = Series([True, False, True])\n b = Series([False, True, True])\n assert a & b == [False, False, True]\n assert a | b == [True, True, True]\n\n\ndef test_equality():\n a = create_series()\n b = a\n\n cmp = a == b\n assert isinstance(cmp, Series)\n assert cmp.sum() == 2\n assert (a != b).sum() == 0\n assert (a >= b).sum() == 2\n assert (a <= b).sum() == 2\n assert (a > b).sum() == 0\n assert (a < b).sum() == 0\n assert a.sum() == 3\n assert a.series_equal(b)\n\n a = Series(\"name\", [\"ham\", \"foo\", \"bar\"])\n assert (a == \"ham\").to_list() == [True, False, False]\n\n\ndef test_agg():\n a = create_series()\n assert a.mean() == 1.5\n assert a.min() == 1\n assert a.max() == 2\n\n\ndef test_arithmetic():\n a = create_series()\n b = a\n\n assert ((a * b) == [1, 4]).sum() == 2\n assert ((a / b) == [1.0, 1.0]).sum() == 2\n assert ((a + b) == [2, 4]).sum() == 2\n assert ((a - b) == [0, 0]).sum() == 2\n assert ((a + 1) == [2, 3]).sum() == 2\n assert ((a - 1) == [0, 1]).sum() == 2\n assert ((a / 1) == [1.0, 2.0]).sum() == 2\n assert ((a // 2) == [0, 1]).sum() == 2\n assert ((a * 2) == [2, 4]).sum() == 2\n assert ((1 + a) == [2, 3]).sum() == 2\n assert ((1 - a) == [0, -1]).sum() == 2\n assert ((1 * a) == [1, 2]).sum() == 2\n # integer division\n assert ((1 / a) == [1.0, 0.5]).sum() == 2\n assert ((1 // a) == [1, 0]).sum() == 2\n\n\ndef test_various():\n a = create_series()\n\n assert a.is_null().sum() == 0\n assert a.name == \"a\"\n a.rename(\"b\")\n assert a.name == \"b\"\n assert a.len() == 2\n assert len(a) == 2\n b = a.slice(1, 1)\n assert b.len() == 1\n assert b.series_equal(Series(\"\", [2]))\n a.append(b)\n assert a.series_equal(Series(\"\", [1, 2, 2]))\n\n a = Series(\"a\", range(20))\n assert a.head(5).len() == 5\n assert a.tail(5).len() == 5\n assert a.head(5) != a.tail(5)\n\n a = Series(\"a\", [2, 1, 4])\n a.sort(in_place=True)\n assert a.series_equal(Series(\"\", [1, 2, 4]))\n a = Series(\"a\", [2, 1, 1, 4, 4, 4])\n assert a.arg_unique().to_list() == [0, 1, 3]\n\n assert a.take([2, 3]).series_equal(Series(\"\", [1, 4]))\n assert a.is_numeric()\n a = Series(\"bool\", [True, False])\n assert not a.is_numeric()\n\n\ndef test_filter():\n a = Series(\"a\", range(20))\n assert a[a > 1].len() == 18\n assert a[a < 1].len() == 1\n assert a[a <= 1].len() == 2\n assert a[a >= 1].len() == 19\n assert a[a == 1].len() == 1\n assert a[a != 1].len() == 19\n\n\ndef test_cast():\n a = Series(\"a\", range(20))\n\n assert a.cast(Float32).dtype == Float32\n assert a.cast(Float64).dtype == Float64\n assert a.cast(Int32).dtype == Int32\n assert a.cast(UInt32).dtype == UInt32\n assert a.cast(Date64).dtype == Date64\n assert a.cast(Date32).dtype == Date32\n\n\ndef test_to_python():\n a = Series(\"a\", range(20))\n b = a.to_list()\n assert isinstance(b, list)\n assert len(b) == 20\n\n a = Series(\"a\", [1, None, 2], nullable=True)\n assert a.null_count() == 1\n assert a.to_list() == [1, None, 2]\n\n\ndef test_sort():\n a = Series(\"a\", [2, 1, 3])\n assert a.sort().to_list() == [1, 2, 3]\n assert a.sort(reverse=True) == [3, 2, 1]\n\n\ndef test_rechunk():\n a = Series(\"a\", [1, 2, 3])\n b = Series(\"b\", [4, 5, 6])\n a.append(b)\n assert a.n_chunks() == 2\n assert a.rechunk(in_place=False).n_chunks() == 1\n a.rechunk(in_place=True)\n assert a.n_chunks() == 1\n\n\ndef test_arrow():\n a = Series(\"a\", [1, 2, 3, None])\n out = a.to_arrow()\n assert out == pa.array([1, 2, 3, None])\n\n\ndef test_view():\n a = Series(\"a\", [1.0, 2.0, 3.0])\n assert isinstance(a.view(), np.ndarray)\n assert np.all(a.view() == np.array([1, 2, 3]))\n\n\ndef test_ufunc():\n a = Series(\"a\", [1.0, 2.0, 3.0, 4.0])\n b = np.multiply(a, 4)\n assert isinstance(b, Series)\n assert b == [4, 8, 12, 16]\n\n # test if null bitmask is preserved\n a = Series(\"a\", [1.0, None, 3.0], nullable=True)\n b = np.exp(a)\n assert b.null_count() == 1\n\n\ndef test_get():\n a = Series(\"a\", [1, 2, 3])\n assert a[0] == 1\n assert a[:2] == [1, 2]\n\n\ndef test_set():\n a = Series(\"a\", [True, False, True])\n mask = Series(\"msk\", [True, False, True])\n a[mask] = False\n\n\ndef test_fill_none():\n a = Series(\"a\", [1, 2, None], nullable=True)\n b = a.fill_none(\"forward\")\n assert b == [1, 2, 2]\n\n\ndef test_apply():\n a = Series(\"a\", [1, 2, None], nullable=True)\n b = a.apply(lambda x: x ** 2)\n assert b == [1, 4, None]\n\n a = Series(\"a\", [\"foo\", \"bar\", None], nullable=True)\n b = a.apply(lambda x: x + \"py\")\n assert b == [\"foopy\", \"barpy\", None]\n\n b = a.apply(lambda x: len(x), dtype_out=Int32)\n assert b == [3, 3, None]\n\n b = a.apply(lambda x: len(x))\n assert b == [3, 3, None]\n\n\ndef test_shift():\n a = Series(\"a\", [1, 2, 3])\n assert a.shift(1) == [None, 1, 2]\n assert a.shift(-1) == [1, 2, None]\n assert a.shift(-2) == [1, None, None]\n\n\[email protected](\n \"dtype, fmt, null_values\", [(Date32, \"%d-%m-%Y\", 0), (Date32, \"%Y-%m-%d\", 3)]\n)\ndef test_parse_date(dtype, fmt, null_values):\n dates = [\"25-08-1988\", \"20-01-1993\", \"25-09-2020\"]\n result = Series.parse_date(\"dates\", dates, dtype, fmt)\n # Why results Date64 into `nan`?\n assert result.dtype == dtype\n assert result.is_null().sum() == null_values\n\n\ndef test_rolling():\n a = Series(\"a\", [1, 2, 3, 2, 1])\n assert a.rolling_min(2) == [None, 1, 2, 2, 1]\n assert a.rolling_max(2) == [None, 2, 3, 3, 2]\n assert a.rolling_sum(2) == [None, 3, 5, 5, 3]\n\n\ndef test_object():\n vals = [[12], \"foo\", 9]\n a = Series(\"a\", vals)\n assert a.dtype == Object\n assert a.to_list() == vals\n assert a[1] == \"foo\"\n\n\ndef test_repeat():\n s = pl.repeat(1, 10)\n assert s.dtype == pl.Int64\n assert s.len() == 10\n s = pl.repeat(\"foo\", 10)\n assert s.dtype == pl.Utf8\n assert s.len() == 10\n\n\ndef test_median():\n s = Series([1, 2, 3])\n assert s.median() == 2\n\n\ndef test_quantile():\n s = Series([1, 2, 3])\n assert s.quantile(0.5) == 2\n\n\ndef test_shape():\n s = Series([1, 2, 3])\n assert s.shape == (3,)\n\n\ndef test_create_list_series():\n pass\n # may Segfault: see https://github.com/ritchie46/polars/issues/518\n # a = [[1, 2], None, [None, 3]]\n # s = pl.Series(\"\", a)\n # assert s.to_list() == a\n\n\ndef test_iter():\n s = pl.Series(\"\", [1, 2, 3])\n\n iter = s.__iter__()\n assert iter.__next__() == 1\n assert iter.__next__() == 2\n assert iter.__next__() == 3\n assert sum(s) == 6\n"
] | [
[
"numpy.array",
"numpy.exp",
"numpy.multiply"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BrancoLab/LocomotionControl | [
"6dc16c29c13b31f6ad70af954a237e379ee10846"
] | [
"draw/tracking.py"
] | [
"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom typing import Union\n\nfrom myterial import grey_dark\n\n\nclass Tracking:\n \"\"\"\n Renders tracking as a 2D trace\n \"\"\"\n\n def __init__(\n self,\n x: Union[pd.Series, np.ndarray],\n y: Union[pd.Series, np.ndarray],\n ax: plt.Axes = None,\n **kwargs,\n ):\n ax = ax or plt.gca()\n if isinstance(x, pd.Series):\n for i in np.arange(len(x)):\n ax.plot(\n x[i],\n y[i],\n color=kwargs.pop(\"color\", grey_dark),\n solid_joinstyle=\"round\",\n **kwargs,\n )\n else:\n ax.plot(\n x,\n y,\n color=kwargs.pop(\"color\", grey_dark),\n solid_joinstyle=\"round\",\n **kwargs,\n )\n\n @classmethod\n def scatter(\n cls,\n x: np.ndarray,\n y: np.ndarray,\n s: np.ndarray = None,\n c: np.ndarray = None,\n ax: plt.Axes = None,\n **kwargs,\n ):\n ax = ax or plt.gca()\n\n if isinstance(x, pd.Series):\n for i in np.arange(len(x)):\n ax.scatter(x[i], y[i], s=s, c=c, **kwargs)\n else:\n ax.scatter(x, y, s=s, c=c, **kwargs)\n"
] | [
[
"matplotlib.pyplot.gca"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fujitaushizu/ArknightsAutoHelper | [
"d0bb47b4a141c791369f89093cef27fa25d2cad2"
] | [
"imgreco/stage_ocr.py"
] | [
"from functools import lru_cache\nimport cv2\nimport numpy as np\nfrom . import resources\nimport zipfile\nfrom . import common\nfrom util.richlog import get_logger\nimport config\n\n\nidx2id = ['-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',\n 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\nprefer_svm = config.get('ocr/stage_prefer_svm', True)\nlogger = get_logger(__name__)\n\n\n@lru_cache(maxsize=1)\ndef _load_svm():\n with resources.open_file('stage_ocr/svm_data.zip') as f:\n zf = zipfile.ZipFile(f, 'r')\n ydoc = zf.read('svm_data.dat').decode('utf-8')\n fs = cv2.FileStorage(ydoc, cv2.FileStorage_READ | cv2.FileStorage_MEMORY)\n svm = cv2.ml.SVM_create()\n svm.read(fs.getFirstTopLevelNode())\n assert svm.isTrained()\n return svm\n\n\n@lru_cache(maxsize=1)\ndef _load_onnx_model():\n with resources.open_file('stage_ocr/chars.onnx') as f:\n data = f.read()\n net = cv2.dnn.readNetFromONNX(data)\n return net\n\n\ndef predict_cv(img):\n net = _load_onnx_model()\n char_imgs = crop_char_img(img)\n if not char_imgs:\n return ''\n roi_list = [np.expand_dims(resize_char(x), 2) for x in char_imgs]\n blob = cv2.dnn.blobFromImages(roi_list)\n net.setInput(blob)\n scores = net.forward()\n predicts = scores.argmax(1)\n # softmax = [common.softmax(score) for score in scores]\n # probs = [softmax[i][predicts[i]] for i in range(len(predicts))]\n # print(probs)\n return ''.join([idx2id[p] for p in predicts])\n\n\ndef get_img_feature(img):\n return resize_char(img).reshape((256, 1))\n\n\ndef resize_char(img):\n h, w = img.shape[:2]\n scale = 16 / max(h, w)\n h = int(h * scale)\n w = int(w * scale)\n img2 = np.zeros((16, 16)).astype(np.uint8)\n img = cv2.resize(img, (w, h))\n img2[0:h, 0:w] = ~img\n return img2\n\n\ndef predict(gray_img):\n svm = _load_svm()\n res = svm.predict(np.float32([get_img_feature(gray_img)]))\n return chr(res[1][0][0])\n\n\ndef crop_char_img(img):\n h, w = img.shape[:2]\n has_black = False\n last_x = None\n res = []\n for x in range(0, w):\n for y in range(0, h - 1):\n has_black = False\n if img[y][x] < 127 and img[y+1][x] < 127:\n has_black = True\n if not last_x:\n last_x = x\n break\n if not has_black and last_x:\n if x - last_x >= 3:\n min_y = None\n max_y = None\n for y1 in range(0, h):\n has_black = False\n for x1 in range(last_x, x):\n if img[y1][x1] < 127:\n has_black = True\n if min_y is None:\n min_y = y1\n break\n if not has_black and min_y is not None and max_y is None:\n max_y = y1\n break\n res.append(img[min_y:max_y, last_x:x])\n last_x = None\n return res\n\n\ndef thresholding(image):\n img = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n if img[0, 0] < 127:\n img = ~img\n return img\n\n\ndef pil_to_cv_gray_img(pil_img):\n arr = np.asarray(pil_img, dtype=np.uint8)\n return cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)\n\n\ndef invert_cv_gray_img_color(img):\n return ~img\n\n\ndef cut_tag(screen, w, pt):\n img_h, img_w = screen.shape[:2]\n tag_w = 130\n tag = thresholding(screen[pt[1] - 1:pt[1] + 40, pt[0] + w + 3:pt[0] + tag_w + w])\n # 130 像素不一定能将 tag 截全,所以再检查一次看是否需要拓宽 tag 长度\n for i in range(3):\n for j in range(40):\n if tag[j][tag_w - 4 - i] < 127:\n tag_w = 160\n if pt[0] + w + tag_w >= img_w:\n return None\n tag = thresholding(screen[pt[1] - 1:pt[1] + 40, pt[0] + w + 3:pt[0] + tag_w + w])\n break\n return tag\n\n\ndef remove_holes(img):\n # 去除小连通域\n # findContours 只能处理黑底白字的图像, 所以需要进行一下翻转\n contours, hierarchy = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n for i in range(len(contours)):\n # 计算区块面积\n area = cv2.contourArea(contours[i])\n if area < 8:\n # 将面积较小的点涂成白色,以去除噪点\n cv2.drawContours(img, [contours[i]], 0, 255, -1)\n\n\ndef recognize_stage_tags(pil_screen, template, ccoeff_threshold=0.75):\n screen = pil_to_cv_gray_img(pil_screen)\n img_h, img_w = screen.shape[:2]\n ratio = 1080 / img_h\n if ratio != 1:\n ratio = 1080 / img_h\n screen = cv2.resize(screen, (int(img_w * ratio), 1080))\n result = cv2.matchTemplate(screen, template, cv2.TM_CCOEFF_NORMED)\n loc = np.where(result >= ccoeff_threshold)\n h, w = template.shape[:2]\n img_h, img_w = screen.shape[:2]\n tag_set = set()\n tag_set2 = set()\n res = []\n dbg_screen = None\n for pt in zip(*loc[::-1]):\n pos_key = (pt[0] // 100, pt[1] // 100)\n pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))\n if pos_key in tag_set or pos_key2 in tag_set2:\n continue\n tag_set.add(pos_key)\n tag_set2.add(pos_key2)\n tag_w = 130\n # 检查边缘像素是否超出截图的范围\n if pt[0] + w + tag_w < img_w:\n tag = cut_tag(screen, w, pt)\n if tag is None:\n continue\n remove_holes(tag)\n tag_str = do_tag_ocr(tag)\n if len(tag_str) < 3:\n if dbg_screen is None:\n dbg_screen = screen.copy()\n cv2.rectangle(dbg_screen, pt, (pt[0] + w + tag_w, pt[1] + h), 0, 3)\n continue\n pos = (int((pt[0] + (tag_w / 2)) / ratio), int((pt[1] + 20) / ratio))\n # logger.logtext('pos: %s' % str(pos))\n # res.append({'tag_img': tag, 'pos': (pt[0] + (tag_w / 2), pt[1] + 20), 'tag_str': tag_str})\n res.append({'pos': pos, 'tag_str': tag_str})\n if dbg_screen is not None:\n logger.logimage(common.convert_to_pil(dbg_screen))\n return res\n\n\ndef do_tag_ocr(img):\n logger.logimage(common.convert_to_pil(img))\n res = do_tag_ocr_svm(img) if prefer_svm else do_tag_ocr_dnn(img)\n logger.logtext('%s, res: %s' % ('svm' if prefer_svm else 'dnn', res))\n return res\n\n\ndef do_tag_ocr_svm(img):\n char_imgs = crop_char_img(img)\n s = ''\n for char_img in char_imgs:\n c = predict(char_img)\n s += c\n return s\n\n\ndef do_tag_ocr_dnn(img):\n return predict_cv(img)\n\n\ndef do_img_ocr(pil_img):\n img = pil_to_cv_gray_img(pil_img)\n # cv2.imshow('test', img)\n # cv2.waitKey()\n img = thresholding(img)\n remove_holes(img)\n return do_tag_ocr(img)\n\n\nstage_icon1 = pil_to_cv_gray_img(resources.load_image('stage_ocr/stage_icon1.png'))\nstage_icon2 = pil_to_cv_gray_img(resources.load_image('stage_ocr/stage_icon2.png'))\n\n\ndef recognize_all_screen_stage_tags(pil_screen):\n tags_map = {}\n for tag in recognize_stage_tags(pil_screen, stage_icon1):\n tags_map[tag['tag_str']] = tag['pos']\n for tag in recognize_stage_tags(pil_screen, stage_icon2):\n tags_map[tag['tag_str']] = tag['pos']\n return tags_map\n"
] | [
[
"numpy.asarray",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chqiwang/sa-nmt | [
"0793130c916483f2a93c85d73c6ed4831da05146"
] | [
"train_wkd.py"
] | [
"import os\nimport time\nimport logging\nfrom argparse import ArgumentParser\nimport tensorflow as tf\nimport yaml\n\nfrom evaluate import Evaluator\nfrom models import *\nfrom utils import DataReader, AttrDict, available_variables, expand_feed_dict\n\n\nclass BreakLoopException(Exception):\n pass\n\n\ndef wrap_scope(input_ckpt_path, output_ckpt_path, scope):\n with tf.Graph().as_default():\n with tf.Session() as sess:\n with tf.variable_scope(scope):\n var_list = tf.contrib.framework.list_variables(input_ckpt_path)\n var_names, var_shapes = zip(*var_list)\n reader = tf.contrib.framework.load_checkpoint(input_ckpt_path)\n var_values = [reader.get_tensor(name) for name in var_names]\n new_var_list = [tf.get_variable(name, initializer=value)\n for name, value in zip(var_names, var_values)]\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver(new_var_list)\n saver.save(sess, output_ckpt_path)\n\n\ndef train(config, teacher_config):\n \"\"\"Train a model with a config file.\"\"\"\n logger = logging.getLogger('')\n data_reader = DataReader(config=config)\n model = eval(config.model)(config=config, num_gpus=config.train.num_gpus)\n with tf.variable_scope('teacher'):\n teacher_model = eval(teacher_config.model)(config=teacher_config, num_gpus=0)\n model.build_train_model(test=config.train.eval_on_dev, teacher_model=teacher_model)\n\n train_op, loss_op = model.get_train_op(name=None)\n global_saver = tf.train.Saver([v for v in tf.global_variables() if not v.name.startswith('teacher')])\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess_config.allow_soft_placement = True\n\n summary_writer = tf.summary.FileWriter(config.model_dir)\n\n with tf.Session(config=sess_config) as sess:\n # Initialize all variables.\n sess.run(tf.global_variables_initializer())\n\n # Reload teacher variables from disk.\n logger.info('Load teacher model parameters...')\n teacher_vars = tf.global_variables('teacher')\n teacher_saver = tf.train.Saver(var_list=teacher_vars)\n tmp_ckpt = '/tmp/teacher-{}.ckpt'.format(os.getpid())\n wrap_scope(tf.train.latest_checkpoint(teacher_config.model_dir), tmp_ckpt, 'teacher')\n teacher_saver.restore(sess, tmp_ckpt)\n for v in teacher_vars:\n logger.info('Reload {} from disk.'.format(v.name))\n\n # Reload student variables from disk.\n logger.info('Load student model parameters...')\n if tf.train.latest_checkpoint(config.model_dir):\n available_vars = available_variables(config.model_dir)\n if available_vars:\n saver = tf.train.Saver(var_list=available_vars)\n saver.restore(sess, tf.train.latest_checkpoint(config.model_dir))\n for v in available_vars:\n logger.info('Reload {} from disk.'.format(v.name))\n else:\n logger.info('Nothing to be reload from disk.')\n else:\n logger.info('Nothing to be reload from disk.')\n\n evaluator = Evaluator()\n evaluator.init_from_existed(model, sess, data_reader)\n\n global dev_bleu, toleration\n dev_bleu = evaluator.evaluate(**config.dev) if config.train.eval_on_dev else 0\n toleration = config.train.toleration\n\n def train_one_step(batch, loss_op, train_op):\n feed_dict = expand_feed_dict({model.src_pls: batch[0], model.dst_pls: batch[1]})\n step, lr, loss, _ = sess.run(\n [model.global_step, model.learning_rate,\n loss_op, train_op],\n feed_dict=feed_dict)\n if step % config.train.summary_freq == 0:\n summary = sess.run(model.summary_op, feed_dict=feed_dict)\n summary_writer.add_summary(summary, global_step=step)\n return step, lr, loss\n\n def maybe_save_model():\n global dev_bleu, toleration\n\n def save():\n mp = config.model_dir + '/model_step_{}'.format(step)\n global_saver.save(sess, mp)\n logger.info('Save model in %s.' % mp)\n\n if config.train.eval_on_dev:\n new_dev_bleu = evaluator.evaluate(**config.dev)\n if config.train.toleration is None:\n save()\n else:\n if new_dev_bleu >= dev_bleu:\n save()\n toleration = config.train.toleration\n dev_bleu = new_dev_bleu\n else:\n toleration -= 1\n else:\n save()\n\n try:\n step = 0\n for epoch in range(1, config.train.num_epochs+1):\n for batch in data_reader.get_training_batches(epoches=1):\n\n # Train normal instances.\n start_time = time.time()\n step, lr, loss = train_one_step(batch, loss_op, train_op)\n logger.info(\n 'epoch: {0}\\tstep: {1}\\tlr: {2:.6f}\\tloss: {3:.4f}\\ttime: {4:.4f}'.\n format(epoch, step, lr, loss, time.time() - start_time))\n # Save model\n if config.train.save_freq > 0 \\\n and step > 0 \\\n and step % config.train.save_freq == 0:\n maybe_save_model()\n\n if config.train.num_steps is not None and step >= config.train.num_steps:\n raise BreakLoopException(\"BreakLoop\")\n\n if toleration is not None and toleration <= 0:\n raise BreakLoopException(\"BreakLoop\")\n\n # Save model per epoch if config.train.save_freq is less or equal than zero\n if config.train.save_freq <= 0:\n maybe_save_model()\n except BreakLoopException as e:\n logger.info(e)\n\n logger.info(\"Finish training.\")\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('-c', '--config', dest='config')\n parser.add_argument('-t', '--teacher_config', dest='teacher_config')\n args = parser.parse_args()\n # Read config\n config = AttrDict(yaml.load(open(args.config)))\n teacher_config = AttrDict(yaml.load(open(args.teacher_config)))\n # Logger\n if not os.path.exists(config.model_dir):\n os.makedirs(config.model_dir)\n logging.basicConfig(filename=config.model_dir + '/train.log', level=logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger('').addHandler(console)\n # Train\n train(config, teacher_config)\n"
] | [
[
"tensorflow.Graph",
"tensorflow.get_variable",
"tensorflow.summary.FileWriter",
"tensorflow.train.latest_checkpoint",
"tensorflow.contrib.framework.load_checkpoint",
"tensorflow.global_variables",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.variable_scope",
"tensorflow.train.Saver",
"tensorflow.contrib.framework.list_variables"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
skiran252/FARM | [
"8460d78910a20d19a5da12de6e9bff11f68332a7"
] | [
"farm/file_utils.py"
] | [
"\"\"\"\nUtilities for working with the local dataset cache.\nThis file is adapted from the AllenNLP library at https://github.com/allenai/allennlp\nCopyright by the AllenNLP authors.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport json\nimport logging\nimport os\nimport tempfile\nimport tarfile\nimport zipfile\n\nfrom functools import wraps\nfrom hashlib import sha256\nfrom io import open\nfrom pathlib import Path\n\nimport boto3\nimport numpy as np\nimport requests\nfrom botocore.exceptions import ClientError\nfrom dotmap import DotMap\nfrom tqdm import tqdm\nfrom transformers.file_utils import cached_path\n\ntry:\n from torch.hub import _get_torch_home\n\n torch_cache_home = Path(_get_torch_home())\nexcept ImportError:\n torch_cache_home = Path(os.path.expanduser(\n os.getenv(\n \"TORCH_HOME\", Path(os.getenv(\"XDG_CACHE_HOME\", \"~/.cache\")) / \"torch\"\n )\n ))\ndefault_cache_path = torch_cache_home / \"farm\"\n\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\ntry:\n from pathlib import Path\n\n FARM_CACHE = Path(os.getenv(\"FARM_CACHE\", default_cache_path))\nexcept (AttributeError, ImportError):\n FARM_CACHE = os.getenv(\"FARM_CACHE\", default_cache_path)\n\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\n\ndef url_to_filename(url, etag=None):\n \"\"\"\n Convert `url` into a hashed filename in a repeatable way.\n If `etag` is specified, append its hash to the url's, delimited\n by a period.\n \"\"\"\n url_bytes = url.encode(\"utf-8\")\n url_hash = sha256(url_bytes)\n filename = url_hash.hexdigest()\n\n if etag:\n etag_bytes = etag.encode(\"utf-8\")\n etag_hash = sha256(etag_bytes)\n filename += \".\" + etag_hash.hexdigest()\n\n return filename\n\n\ndef filename_to_url(filename, cache_dir=None):\n \"\"\"\n Return the url and etag (which may be ``None``) stored for `filename`.\n Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.\n \"\"\"\n if cache_dir is None:\n cache_dir = FARM_CACHE\n\n cache_path = cache_dir / filename\n if not os.path.exists(cache_path):\n raise EnvironmentError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + \".json\"\n if not os.path.exists(meta_path):\n raise EnvironmentError(\"file {} not found\".format(meta_path))\n\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata[\"url\"]\n etag = metadata[\"etag\"]\n\n return url, etag\n\n\ndef download_from_s3(s3_url: str, cache_dir: str = None, access_key: str = None,\n secret_access_key: str = None, region_name: str = None):\n \"\"\"\n Download a \"folder\" from s3 to local. Skip already existing files. Useful for downloading all files of one model\n The default and recommended authentication follows boto3's trajectory of checking for ENV variables,\n .aws/credentials etc. (see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html).\n However, there's also the option to pass `access_key`, `secret_access_key` and `region_name` directly\n as this is needed in some enterprise enviroments with local s3 deployments.\n\n :param s3_url: Url of the \"folder\" in s3 (e.g. s3://mybucket/my_modelname)\n :param cache_dir: Optional local directory where the files shall be stored.\n If not supplied, we'll use a subfolder in torch's cache dir (~/.cache/torch/farm)\n :param access_key: Optional S3 Access Key\n :param secret_access_key: Optional S3 Secret Access Key\n :param region_name: Optional Region Name\n :return: local path of the folder\n \"\"\"\n\n if cache_dir is None:\n cache_dir = FARM_CACHE\n\n logger.info(f\"Downloading from {s3_url} to {cache_dir}\")\n\n if access_key or secret_access_key:\n assert secret_access_key and access_key, \"You only supplied one of secret_access_key and access_key. We need both.\"\n\n session = boto3.Session(\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_access_key,\n region_name=region_name\n )\n s3_resource = session.resource('s3')\n else:\n s3_resource = boto3.resource('s3')\n\n bucket_name, s3_path = split_s3_path(s3_url)\n bucket = s3_resource.Bucket(bucket_name)\n objects = bucket.objects.filter(Prefix=s3_path)\n if not objects:\n raise ValueError(\"Could not find s3_url: {s3_url}\")\n\n for obj in objects:\n path, filename = os.path.split(obj.key)\n path = os.path.join(cache_dir, path)\n # Create local folder\n if not os.path.exists(path):\n os.makedirs(path)\n # Download file if not present locally\n if filename:\n filepath = os.path.join(path, filename)\n if os.path.exists(filepath):\n logger.info(f\"Skipping {obj.key} (exists locally)\")\n else:\n logger.info(f\"Downloading {obj.key} to {filepath} (size: {obj.size/1000000} MB)\")\n bucket.download_file(obj.key, filepath)\n return path\n\ndef split_s3_path(url):\n \"\"\"Split a full s3 path into the bucket name and path.\"\"\"\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path\n\n\ndef s3_request(func):\n \"\"\"\n Wrapper function for s3 requests in order to create more helpful error\n messages.\n \"\"\"\n\n @wraps(func)\n def wrapper(url, *args, **kwargs):\n try:\n return func(url, *args, **kwargs)\n except ClientError as exc:\n if int(exc.response[\"Error\"][\"Code\"]) == 404:\n raise EnvironmentError(\"file {} not found\".format(url))\n else:\n raise\n\n return wrapper\n\n\n@s3_request\ndef s3_etag(url):\n \"\"\"Check ETag on S3 object.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_object = s3_resource.Object(bucket_name, s3_path)\n return s3_object.e_tag\n\n\n@s3_request\ndef s3_get(url, temp_file):\n \"\"\"Pull a file directly from S3.\"\"\"\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)\n\n\ndef http_get(url, temp_file, proxies=None):\n req = requests.get(url, stream=True, proxies=proxies)\n content_length = req.headers.get(\"Content-Length\")\n total = int(content_length) if content_length is not None else None\n progress = tqdm(unit=\"B\", total=total)\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n progress.update(len(chunk))\n temp_file.write(chunk)\n progress.close()\n\ndef fetch_archive_from_http(url, output_dir, proxies=None):\n \"\"\"\n Fetch an archive (zip or tar.gz) from a url via http and extract content to an output directory.\n\n :param url: http address\n :type url: str\n :param output_dir: local path\n :type output_dir: str\n :param proxies: proxies details as required by requests library\n :type proxies: dict\n :return: bool if anything got fetched\n \"\"\"\n # verify & prepare local directory\n path = Path(output_dir)\n if not path.exists():\n path.mkdir(parents=True)\n\n is_not_empty = len(list(Path(path).rglob(\"*\"))) > 0\n if is_not_empty:\n logger.info(\n f\"Found data stored in `{output_dir}`. Delete this first if you really want to fetch new data.\"\n )\n return False\n else:\n logger.info(f\"Fetching from {url} to `{output_dir}`\")\n\n # download & extract\n with tempfile.NamedTemporaryFile() as temp_file:\n http_get(url, temp_file, proxies=proxies)\n temp_file.flush()\n temp_file.seek(0) # making tempfile accessible\n # extract\n if url[-4:] == \".zip\":\n archive = zipfile.ZipFile(temp_file.name)\n archive.extractall(output_dir)\n elif url[-7:] == \".tar.gz\":\n archive = tarfile.open(temp_file.name)\n archive.extractall(output_dir)\n # temp_file gets deleted here\n return True\n\ndef load_from_cache(pretrained_model_name_or_path, s3_dict, **kwargs):\n # Adjusted from HF Transformers to fit loading WordEmbeddings from deepsets s3\n # Load from URL or cache if already cached\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n\n s3_file = s3_dict[pretrained_model_name_or_path]\n try:\n resolved_file = cached_path(\n s3_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n )\n\n if resolved_file is None:\n raise EnvironmentError\n\n except EnvironmentError:\n if pretrained_model_name_or_path in s3_dict:\n msg = \"Couldn't reach server at '{}' to download data.\".format(\n s3_file\n )\n else:\n msg = (\n \"Model name '{}' was not found in model name list. \"\n \"We assumed '{}' was a path, a model identifier, or url to a configuration file or \"\n \"a directory containing such a file but couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path, s3_file,\n )\n )\n raise EnvironmentError(msg)\n\n if resolved_file == s3_file:\n logger.info(\"loading file {}\".format(s3_file))\n else:\n logger.info(\"loading file {} from cache at {}\".format(s3_file, resolved_file))\n\n return resolved_file\n\n\ndef read_set_from_file(filename):\n \"\"\"\n Extract a de-duped collection (set) of text from a file.\n Expected file format is one item per line.\n \"\"\"\n collection = set()\n with open(filename, \"r\", encoding=\"utf-8\") as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection\n\n\ndef get_file_extension(path, dot=True, lower=True):\n ext = os.path.splitext(path)[1]\n ext = ext if dot else ext[1:]\n return ext.lower() if lower else ext\n\n\ndef read_config(path):\n if path:\n with open(path) as json_data_file:\n conf_args = json.load(json_data_file)\n else:\n raise ValueError(\"No config provided for classifier\")\n\n # flatten last part of config, take either value or default as value\n for gk, gv in conf_args.items():\n for k, v in gv.items():\n conf_args[gk][k] = v[\"value\"] if (v[\"value\"] is not None) else v[\"default\"]\n\n # DotMap for making nested dictionary accessible through dot notation\n args = DotMap(conf_args, _dynamic=False)\n\n return args\n\n\ndef unnestConfig(config):\n \"\"\"\n This function creates a list of config files for evaluating parameters with different values. If a config parameter\n is of type list this list is iterated over and a config object without lists is returned. Can handle lists inside any\n number of parameters.\n\n Can handle nested (one level) configs\n \"\"\"\n nestedKeys = []\n nestedVals = []\n\n for gk, gv in config.items():\n if(gk != \"task\"):\n for k, v in gv.items():\n if isinstance(v, list):\n if (\n k != \"layer_dims\"\n ): # exclude layer dims, since it is already a list\n nestedKeys.append([gk, k])\n nestedVals.append(v)\n elif isinstance(v, dict):\n logger.warning(\"Config too deep! Working on %s\" %(str(v)))\n\n if len(nestedKeys) == 0:\n unnestedConfig = [config]\n else:\n logger.info(\n \"Nested config at parameters: %s\"\n % (\", \".join(\".\".join(x) for x in nestedKeys))\n )\n unnestedConfig = []\n mesh = np.meshgrid(\n *nestedVals\n ) # get all combinations, each dimension corresponds to one parameter type\n # flatten mesh into shape: [num_parameters, num_combinations] so we can iterate in 2d over any paramter combinations\n mesh = [x.flatten() for x in mesh]\n\n # loop over all combinations\n for i in range(len(mesh[0])):\n tempconfig = config.copy()\n for j, k in enumerate(nestedKeys):\n if isinstance(k, str):\n tempconfig[k] = mesh[j][\n i\n ] # get ith val of correct param value and overwrite original config\n elif len(k) == 2:\n tempconfig[k[0]][k[1]] = mesh[j][i] # set nested dictionary keys\n else:\n logger.warning(\"Config too deep! Working on %s\" %(str(k)))\n unnestedConfig.append(tempconfig)\n\n return unnestedConfig\n"
] | [
[
"torch.hub._get_torch_home",
"numpy.meshgrid"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NrctcV/BigQuery-Antifraud-reporting | [
"6167a44159f939c5993423b1196992d6c6ecc34f"
] | [
"main.py"
] | [
"\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport google.oauth2.credentials\nimport pandas_gbq\n\n\n# Setup gcloud SDK\n# login to gcloud\n# gcloud auth application-default login\n# print token\n# fill oauth2 creds and project name\n# install tqdm\n# install xlxswriter\n\n\ndef create_date():\n year = int(input('Enter a year'))\n month = int(input('Enter a month'))\n day = int(input('Enter a day'))\n try:\n return datetime.date(year, month, day)\n except Exception as error:\n print(error)\n return None\n\n\ndef main():\n\n #connect to BQ via default-login token\n credentials = google.oauth2.credentials.Credentials(\n )\n project_id = \"al-bi-bq-prod\"\n\n\n date1 = create_date()\n date2 = create_date()\n #affiliate_id = [int(x) for x in input(\"Enter affiliate_ids\\n\").split(',')]\n advertiser_id = int(input('Enter an adv_id\\n'))\n offer_ids = [x for x in input(\"Enter offer_ids\\n\").split(',')]\n\n\n #query the data\n sql = f\"\"\"\n select\n MP, event_name, round(100*(state_count / total),2) as percentage \n from (\n SELECT a.network_affiliate_id as MP, event_name, count(*) AS state_count, sum(count(*)) OVER(partition by a.network_affiliate_id) AS total\n \n FROM \n `al-bi-bq-prod.dwh.fact_conversions_and_events` a left join `al-bi-bq-prod.dwh.dim_affiliates` b on a.network_affiliate_id = b.affiliate_id\n \n WHERE\n event_name != 'app_open' and event_name != 'rejected_install'\n AND network_offer_id in {'(%s)' % ', '.join([str(i) for i in offer_ids])}\n AND network_advertiser_id = {advertiser_id} \n AND _partitiondate between '{date1}' and '{date2}'\n GROUP by 2,1\n order by MP) s\n \"\"\"\n\n sql2 = f\"\"\"\n SELECT\n conversion_id,transaction_id, network_offer_id, conversion_status, event_name, network_affiliate_id, affiliate_name, source_id, click_date, date,\n timestamp_diff (date,click_date, second) as timediff,\n CASE \n WHEN timestamp_diff (date,click_date, second) >= 0 \n AND timestamp_diff (date,click_date, second) < 10 THEN '1_<10secs'\n WHEN timestamp_diff (date,click_date, second) >= 10 \n AND timestamp_diff (date,click_date, second) < 60 THEN '2_<1min'\n WHEN timestamp_diff (date,click_date, second) >= 60 \n AND timestamp_diff (date,click_date, second) < 300 THEN '3_<5min'\n WHEN timestamp_diff (date,click_date, second) >= 300 \n AND timestamp_diff (date,click_date, second) < 600 THEN '4_<10min'\n WHEN timestamp_diff (date,click_date, second) >= 600 \n AND timestamp_diff (date,click_date, second) < 1800 THEN '5_<30min'\n WHEN timestamp_diff (date,click_date, second) >= 1800 \n AND timestamp_diff (date,click_date, second) < 3600 THEN '6_<1hr'\n WHEN timestamp_diff (date,click_date, second) >= 3600\n AND timestamp_diff (date,click_date, second) < 86400 THEN '7_<24hr'\n ELSE 'more than 24h' END as buckets, \n session_user_ip, conversion_user_ip, payout, revenue, query_parameters_device_detail, email, carrier, browser,brand, device_type, os_version, language, region, country, city, android_id, adv1, adv2, adv3, adv4, adv5, is_view_through, is_scrub, is_cookie_based, idfa, google_ad_id, sub1\n \n FROM \n `al-bi-bq-prod.dwh.fact_conversions_and_events` a left join `al-bi-bq-prod.dwh.dim_affiliates` b on a.network_affiliate_id = b.affiliate_id\n \n WHERE\n _partitionDATE BETWEEN '{date1}' and '{date2}'\n AND network_advertiser_id = {advertiser_id}\n AND network_offer_id in {'(%s)' % ', '.join([str(i) for i in offer_ids])}\n AND conversion_status = 'approved' \n AND event_name != 'app_open'\n AND event_name != 'rejected_install'\n \"\"\"\n\n\n sql3 = f\"\"\"\n #standardSQL\n SELECT\n affiliate_id,\n round(b.IP_dups_percentage,2) as IP_dups_percentage, \n round(b.Wifi_percentage,2) as wifi_percentage,\n sum(approved_installs)/sum(clicks) as Click_to_installs,\n sum(conversions)/sum(approved_installs) as Install_to_action_rate,\n sum(mmp_rejected_installs)/sum(total_installs) as rejection_rate,\n sum(clicks) as total_clicks, \n sum(approved_installs) as approved_installs,\n sum(actions) as App_and_pend_conv,\n sum(conversions) as approved_conversions,\n sum(a.payout) as total_payout, \n sum(a.revenue) as total_revenue\n FROM\n `al-bi-bq-prod.dwh.fact_daily_stats` a \n left join\n (select network_affiliate_id,\n (count(session_user_ip) - count(distinct(session_user_ip)))/count(session_user_ip) as IP_dups_percentage,\n SUM( CASE WHEN carrier ='' THEN 1 ELSE 0 END )/count(carrier) as Wifi_percentage \n \n FROM \n `al-bi-bq-prod.dwh.fact_conversions_and_events`\n where conversion_status = 'approved' AND payout != 0\n group by 1) b on a.affiliate_id = b.network_affiliate_id\n \n WHERE\n _partitionDATE BETWEEN '{date1}' and '{date2}'\n AND advertiser_id = {advertiser_id}\n AND offer_id in {'(%s)' % ', '.join([str(i) for i in offer_ids])}\n group by 1,2,3\n having approved_installs >0 and total_clicks >0 and total_payout > 100\n order by total_payout desc\n \"\"\"\n\n sql4 = f\"\"\"\n SELECT\n conversion_id,transaction_id, network_offer_id, conversion_status, event_name, network_affiliate_id, affiliate_name, source_id, click_date, date,\n timestamp_diff (date,click_date, second) as timediff,\n CASE \n WHEN timestamp_diff (date,click_date, second) >= 0 \n AND timestamp_diff (date,click_date, second) < 10 THEN '1_<10secs'\n WHEN timestamp_diff (date,click_date, second) >= 10 \n AND timestamp_diff (date,click_date, second) < 60 THEN '2_<1min'\n WHEN timestamp_diff (date,click_date, second) >= 60 \n AND timestamp_diff (date,click_date, second) < 300 THEN '3_<5min'\n WHEN timestamp_diff (date,click_date, second) >= 300 \n AND timestamp_diff (date,click_date, second) < 600 THEN '4_<10min'\n WHEN timestamp_diff (date,click_date, second) >= 600 \n AND timestamp_diff (date,click_date, second) < 1800 THEN '5_<30min'\n WHEN timestamp_diff (date,click_date, second) >= 1800 \n AND timestamp_diff (date,click_date, second) < 3600 THEN '6_<1hr'\n WHEN timestamp_diff (date,click_date, second) >= 3600\n AND timestamp_diff (date,click_date, second) < 86400 THEN '7_<24hr'\n ELSE 'more than 24h' END as buckets, \n session_user_ip, conversion_user_ip, payout, revenue, query_parameters_device_detail, email, carrier, browser,brand, device_type, os_version, language, region, country, city, android_id, adv1, adv2, adv3, adv4, adv5, is_view_through, is_scrub, is_cookie_based, idfa, google_ad_id, sub1\n \n FROM \n `al-bi-bq-prod.dwh.fact_conversions_and_events` a left join `al-bi-bq-prod.dwh.dim_affiliates` b on a.network_affiliate_id = b.affiliate_id\n\n WHERE\n _partitionDATE BETWEEN '{date1}' and '{date2}'\n AND network_advertiser_id = {advertiser_id}\n AND network_offer_id in {'(%s)' % ', '.join([str(i) for i in offer_ids])}\n AND conversion_status = 'approved' \n AND network_offer_payout_revenue_id = 0\n \"\"\"\n\n sql5 = f\"\"\"\n SELECT\n conversion_id,transaction_id, network_offer_id, conversion_status, event_name, network_affiliate_id, affiliate_name, source_id, click_date, date,\n timestamp_diff (date,click_date, second) as timediff,\n CASE \n WHEN timestamp_diff (date,click_date, second) >= 0 \n AND timestamp_diff (date,click_date, second) < 10 THEN '1_<10secs'\n WHEN timestamp_diff (date,click_date, second) >= 10 \n AND timestamp_diff (date,click_date, second) < 60 THEN '2_<1min'\n WHEN timestamp_diff (date,click_date, second) >= 60 \n AND timestamp_diff (date,click_date, second) < 300 THEN '3_<5min'\n WHEN timestamp_diff (date,click_date, second) >= 300 \n AND timestamp_diff (date,click_date, second) < 600 THEN '4_<10min'\n WHEN timestamp_diff (date,click_date, second) >= 600 \n AND timestamp_diff (date,click_date, second) < 1800 THEN '5_<30min'\n WHEN timestamp_diff (date,click_date, second) >= 1800 \n AND timestamp_diff (date,click_date, second) < 3600 THEN '6_<1hr'\n WHEN timestamp_diff (date,click_date, second) >= 3600\n AND timestamp_diff (date,click_date, second) < 86400 THEN '7_<24hr'\n ELSE 'more than 24h' END as buckets, \n session_user_ip, conversion_user_ip, payout, revenue, query_parameters_device_detail, email, carrier, browser,brand, device_type, os_version, language, region, country, city, android_id, adv1, adv2, adv3, adv4, adv5, is_view_through, is_scrub, is_cookie_based, idfa, google_ad_id, sub1\n \n FROM \n `al-bi-bq-prod.dwh.fact_conversions_and_events` a left join `al-bi-bq-prod.dwh.dim_affiliates` b on a.network_affiliate_id = b.affiliate_id\n\n WHERE\n _partitionDATE BETWEEN '{date1}' and '{date2}'\n AND network_advertiser_id = {advertiser_id}\n AND network_offer_id in {'(%s)' % ', '.join([str(i) for i in offer_ids])}\n AND conversion_status = 'approved' \n AND payout != 0\n \"\"\"\n\n #examples for testing\n #(76771, 76772)\n #10415\n #'2020-10-25' and '2020-10-30'\n\n df1 = pandas_gbq.read_gbq(sql, project_id=project_id)\n\n df2 = pandas_gbq.read_gbq(sql2, project_id=project_id)\n df2['click_date'] = df2['click_date'].dt.tz_localize(None)\n df2['date'] = df2['date'].dt.tz_localize(None)\n\n df3 = pandas_gbq.read_gbq(sql3, project_id=project_id)\n\n df4 = pandas_gbq.read_gbq(sql4, project_id=project_id)\n df4['click_date'] = df4['click_date'].dt.tz_localize(None)\n df4['date'] = df4['date'].dt.tz_localize(None)\n\n df5 = pandas_gbq.read_gbq(sql5, project_id=project_id)\n df5['click_date'] = df5['click_date'].dt.tz_localize(None)\n df5['date'] = df5['date'].dt.tz_localize(None)\n\n df6 = pd.pivot_table(df4, values = 'timediff', index=['network_affiliate_id'], columns=['buckets'], aggfunc=np.count_nonzero)\n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter('/Users/user/Downloads/report 2020.xlsx', engine='xlsxwriter')\n\n # Write each dataframe to a different worksheet.\n df5.to_excel(writer, sheet_name='Conversions')\n df4.to_excel(writer, sheet_name='Installs')\n df3.to_excel(writer, sheet_name='metrics_table')\n df6.to_excel(writer, sheet_name='CTIT')\n df2.to_excel(writer, sheet_name='full_report')\n df1.to_excel(writer, sheet_name='In-app_activity_behavior')\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n\n\nif __name__ == \"__main__\":\n main()\n\n"
] | [
[
"pandas.ExcelWriter",
"pandas.pivot_table"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
EleutherAGI/summarisation | [
"d432873e1ba171f47371b8b0df7235478b52ca99"
] | [
"preprocess_tldr_dataset.py"
] | [
"import json\nfrom collections import OrderedDict, Counter\nfrom transformers import GPT2TokenizerFast\nfrom tqdm import tqdm\nfrom sklearn.model_selection import train_test_split\n\ndata = []\nwith open('./data/tldr-training-data.jsonl') as f:\n for line in f:\n data.append(json.loads(line))\n\ndef view_sample(idx):\n print(f\"content: {data[idx]['content']} \\n\")\n print(f\"summary: {data[idx]['summary']} \\n\")\n print(f\"subreddit: {data[idx]['subreddit']}\")\n \nview_sample(5)\n\nlen(data)\n\ncatagories = ['relationships',\n 'AskReddit',\n 'relationship_advice',\n 'tifu',\n 'dating_advice' ,\n 'personalfinance',\n 'Advice',\n 'legaladvice',\n 'offmychest',\n 'loseit',\n 'jobs',\n 'self',\n 'BreakUps',\n 'askwomenadvice',\n 'dogs',\n 'running',\n 'pettyrevenge',\n 'needadvice',\n 'travel',\n 'Parenting',\n 'weddingplanning',\n 'Pets',\n 'Dogtraining',\n 'cats',\n 'AskDocs',\n 'college',\n 'GetMotivated',\n 'books',\n 'Cooking']\n\n#Whitelist catagories\nwhitelist = [item for item in data if 'subreddit' in item and item[\"subreddit\"] in catagories]\n\n#Remove duplicates\nod = OrderedDict((hash(item['body']), item) for item in whitelist)\nwhitelist = list(od.values())\n\n#Remove items whose body is longer than 512\n#probably could just be done in training\n\ntokenizer = GPT2TokenizerFast.from_pretrained(\"gpt2\")\n\ntrimmed = []\nfor item in tqdm(whitelist):\n if len(tokenizer(item['content'] + ' TLDR:' + item['summary'])['input_ids']) <= 512:\n trimmed.append(item)\n\nsubreddits = [item['subreddit'] for item in trimmed]\nkeys = list(Counter(subreddits).keys())\nvals = list(Counter(subreddits).values())\ntot = 0\nfor key, val in zip(keys, vals):\n tot += val\n print(f'{key}: {val}')\nprint(f'\\ntotal items: {tot}')\n\ntrain, test = train_test_split(trimmed, test_size = .05)\n\nwith open('.data/tldr-filtered-test.json', 'w') as outfile:\n json.dump({'data': test}, outfile)\n \nwith open('.data/tldr-filtered-train.json', 'w') as outfile:\n json.dump({'data': train}, outfile)\n\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KantiCodes/flatland-rl | [
"fcc10e83d2548470ebaa5540b967db0940eb30dd"
] | [
"baselines/reinforcement_learning/multi_agent_training.py"
] | [
"from datetime import datetime\nimport os\nimport random\nimport sys\nfrom argparse import ArgumentParser, Namespace\nfrom pathlib import Path\nfrom pprint import pprint\n\nimport psutil\nfrom flatland.utils.rendertools import RenderTool\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\nimport torch\n\nfrom flatland.envs.rail_env import RailEnv, RailEnvActions\nfrom flatland.envs.rail_generators import sparse_rail_generator\nfrom flatland.envs.line_generators import sparse_line_generator\nfrom flatland.envs.observations import TreeObsForRailEnv\n\nfrom flatland.envs.malfunction_generators import malfunction_from_params, MalfunctionParameters\nfrom flatland.envs.predictions import ShortestPathPredictorForRailEnv\n\nbase_dir = Path(__file__).resolve().parent.parent\nsys.path.append(str(base_dir))\n\nfrom utils.timer import Timer\nfrom utils.observation_utils import normalize_observation\nfrom reinforcement_learning.dddqn_policy import DDDQNPolicy\n\n\"\"\"\nThis file shows how to train multiple agents using a reinforcement learning approach.\nAfter training an agent, you can submit it straight away to the NeurIPS 2020 Flatland challenge!\n\nAgent documentation: https://flatland.aicrowd.com/getting-started/rl/multi-agent.html\nSubmission documentation: https://flatland.aicrowd.com/getting-started/first-submission.html\n\"\"\"\n\n\ndef create_rail_env(env_params, tree_observation):\n n_agents = env_params.n_agents\n x_dim = env_params.x_dim\n y_dim = env_params.y_dim\n n_cities = env_params.n_cities\n max_rails_between_cities = env_params.max_rails_between_cities\n max_rails_in_city = env_params.max_rails_in_city\n seed = env_params.seed\n\n # Break agents from time to time\n malfunction_parameters = MalfunctionParameters(\n malfunction_rate=env_params.malfunction_rate,\n min_duration=20,\n max_duration=50\n )\n\n return RailEnv(\n width=x_dim, height=y_dim,\n rail_generator=sparse_rail_generator(\n max_num_cities=n_cities,\n grid_mode=False,\n max_rails_between_cities=max_rails_between_cities,\n max_rail_pairs_in_city=max_rails_in_city//2\n ),\n line_generator=sparse_line_generator(),\n number_of_agents=n_agents,\n malfunction_generator_and_process_data=malfunction_from_params(malfunction_parameters),\n obs_builder_object=tree_observation,\n random_seed=seed\n )\n\n\ndef train_agent(train_params, train_env_params, eval_env_params, obs_params):\n # Environment parameters\n n_agents = train_env_params.n_agents\n x_dim = train_env_params.x_dim\n y_dim = train_env_params.y_dim\n n_cities = train_env_params.n_cities\n max_rails_between_cities = train_env_params.max_rails_between_cities\n max_rails_in_city = train_env_params.max_rails_in_city\n seed = train_env_params.seed\n\n # Unique ID for this training\n now = datetime.now()\n training_id = now.strftime('%y%m%d%H%M%S')\n\n # Observation parameters\n observation_tree_depth = obs_params.observation_tree_depth\n observation_radius = obs_params.observation_radius\n observation_max_path_depth = obs_params.observation_max_path_depth\n\n # Training parameters\n eps_start = train_params.eps_start\n eps_end = train_params.eps_end\n eps_decay = train_params.eps_decay\n n_episodes = train_params.n_episodes\n checkpoint_interval = train_params.checkpoint_interval\n n_eval_episodes = train_params.n_evaluation_episodes\n restore_replay_buffer = train_params.restore_replay_buffer\n save_replay_buffer = train_params.save_replay_buffer\n\n # Set the seeds\n random.seed(seed)\n np.random.seed(seed)\n\n # Observation builder\n predictor = ShortestPathPredictorForRailEnv(observation_max_path_depth)\n tree_observation = TreeObsForRailEnv(max_depth=observation_tree_depth, predictor=predictor)\n\n # Setup the environments\n train_env = create_rail_env(train_env_params, tree_observation)\n train_env.reset(regenerate_schedule=True, regenerate_rail=True)\n eval_env = create_rail_env(eval_env_params, tree_observation)\n eval_env.reset(regenerate_schedule=True, regenerate_rail=True)\n\n # Setup renderer\n if train_params.render:\n env_renderer = RenderTool(train_env, gl=\"PGL\")\n\n # Calculate the state size given the depth of the tree observation and the number of features\n n_features_per_node = train_env.obs_builder.observation_dim\n n_nodes = sum([np.power(4, i) for i in range(observation_tree_depth + 1)])\n state_size = n_features_per_node * n_nodes\n\n # The action space of flatland is 5 discrete actions\n action_size = 5\n\n # Max number of steps per episode\n # This is the official formula used during evaluations\n # See details in flatland.envs.line_generators.sparse_line_generator\n # max_steps = int(4 * 2 * (env.height + env.width + (n_agents / n_cities)))\n max_steps = train_env._max_episode_steps\n\n action_count = [0] * action_size\n action_dict = dict()\n agent_obs = [None] * n_agents\n agent_prev_obs = [None] * n_agents\n agent_prev_action = [2] * n_agents\n update_values = [False] * n_agents\n\n # Smoothed values used as target for hyperparameter tuning\n smoothed_normalized_score = -1.0\n smoothed_eval_normalized_score = -1.0\n smoothed_completion = 0.0\n smoothed_eval_completion = 0.0\n\n # Double Dueling DQN policy\n policy = DDDQNPolicy(state_size, action_size, train_params)\n\n # Loads existing replay buffer\n if restore_replay_buffer:\n try:\n policy.load_replay_buffer(restore_replay_buffer)\n policy.test()\n except RuntimeError as e:\n print(\"\\n🛑 Could't load replay buffer, were the experiences generated using the same tree depth?\")\n print(e)\n exit(1)\n\n print(\"\\n💾 Replay buffer status: {}/{} experiences\".format(len(policy.memory.memory), train_params.buffer_size))\n\n hdd = psutil.disk_usage('/')\n if save_replay_buffer and (hdd.free / (2 ** 30)) < 500.0:\n print(\"⚠️ Careful! Saving replay buffers will quickly consume a lot of disk space. You have {:.2f}gb left.\".format(hdd.free / (2 ** 30)))\n\n # TensorBoard writer\n writer = SummaryWriter()\n writer.add_hparams(vars(train_params), {})\n writer.add_hparams(vars(train_env_params), {})\n writer.add_hparams(vars(obs_params), {})\n\n training_timer = Timer()\n training_timer.start()\n\n print(\"\\n🚉 Training {} trains on {}x{} grid for {} episodes, evaluating on {} episodes every {} episodes. Training id '{}'.\\n\".format(\n train_env.get_num_agents(),\n x_dim, y_dim,\n n_episodes,\n n_eval_episodes,\n checkpoint_interval,\n training_id\n ))\n\n for episode_idx in range(n_episodes + 1):\n step_timer = Timer()\n reset_timer = Timer()\n learn_timer = Timer()\n preproc_timer = Timer()\n inference_timer = Timer()\n\n # Reset environment\n reset_timer.start()\n obs, info = train_env.reset(regenerate_rail=True, regenerate_schedule=True)\n reset_timer.end()\n\n if train_params.render:\n env_renderer.set_new_rail()\n\n score = 0\n nb_steps = 0\n actions_taken = []\n\n # Build initial agent-specific observations\n for agent in train_env.get_agent_handles():\n if obs[agent]:\n agent_obs[agent] = normalize_observation(obs[agent], observation_tree_depth, observation_radius=observation_radius)\n agent_prev_obs[agent] = agent_obs[agent].copy()\n\n # Run episode\n for step in range(max_steps - 1):\n inference_timer.start()\n for agent in train_env.get_agent_handles():\n if info['action_required'][agent]:\n update_values[agent] = True\n action = policy.act(agent_obs[agent], eps=eps_start)\n\n action_count[action] += 1\n actions_taken.append(action)\n else:\n # An action is not required if the train hasn't joined the railway network,\n # if it already reached its target, or if is currently malfunctioning.\n update_values[agent] = False\n action = 0\n action_dict.update({agent: action})\n inference_timer.end()\n\n # Environment step\n step_timer.start()\n next_obs, all_rewards, done, info = train_env.step(action_dict, reward_shaping=True)\n # next_obs, all_rewards, done, info = train_env.step(action_dict)\n\n step_timer.end()\n\n # Render an episode at some interval\n if train_params.render and episode_idx % checkpoint_interval == 0:\n env_renderer.render_env(\n show=True,\n frames=False,\n show_observations=False,\n show_predictions=False\n )\n\n # Update replay buffer and train agent\n for agent in train_env.get_agent_handles():\n if update_values[agent] or done['__all__']:\n # Only learn from timesteps where somethings happened\n learn_timer.start()\n policy.step(agent_prev_obs[agent], agent_prev_action[agent], all_rewards[agent], agent_obs[agent], done[agent])\n learn_timer.end()\n\n agent_prev_obs[agent] = agent_obs[agent].copy()\n agent_prev_action[agent] = action_dict[agent]\n\n # Preprocess the new observations\n if next_obs[agent]:\n preproc_timer.start()\n agent_obs[agent] = normalize_observation(next_obs[agent], observation_tree_depth, observation_radius=observation_radius)\n preproc_timer.end()\n\n score += all_rewards[agent]\n\n nb_steps = step\n\n if done['__all__']:\n break\n\n # Epsilon decay\n eps_start = max(eps_end, eps_decay * eps_start)\n\n # Collect information about training\n tasks_finished = sum(done[idx] for idx in train_env.get_agent_handles())\n completion = tasks_finished / max(1, train_env.get_num_agents())\n normalized_score = score / (max_steps * train_env.get_num_agents())\n action_probs = action_count / np.sum(action_count)\n action_count = [1] * action_size\n\n smoothing = 0.99\n smoothed_normalized_score = smoothed_normalized_score * smoothing + normalized_score * (1.0 - smoothing)\n smoothed_completion = smoothed_completion * smoothing + completion * (1.0 - smoothing)\n\n # Print logs\n if episode_idx % checkpoint_interval == 0:\n torch.save(policy.qnetwork_local, './baselines/checkpoints/multi-' + training_id + '-' + str(episode_idx) + '.pth')\n\n if save_replay_buffer:\n policy.save_replay_buffer('./baselines/checkpoints/multi-' + training_id + '-' + str(episode_idx) + '.pkl')\n\n if train_params.render:\n env_renderer.close_window()\n\n print(\n '\\r🚂 Episode {}'\n '\\t 🏆 Score: {:.3f}'\n ' Avg: {:.3f}'\n '\\t 💯 Done: {:.2f}%'\n ' Avg: {:.2f}%'\n '\\t 🎲 Epsilon: {:.3f} '\n '\\t 🔀 Action Probs: {}'.format(\n episode_idx,\n normalized_score,\n smoothed_normalized_score,\n 100 * completion,\n 100 * smoothed_completion,\n eps_start,\n format_action_prob(action_probs)\n ), end=\" \")\n\n # Evaluate policy and log results at some interval\n if episode_idx % checkpoint_interval == 0 and n_eval_episodes > 0:\n scores, completions, nb_steps_eval = eval_policy(eval_env, policy, train_params, obs_params)\n\n writer.add_scalar(\"evaluation/scores_min\", np.min(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_max\", np.max(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_mean\", np.mean(scores), episode_idx)\n writer.add_scalar(\"evaluation/scores_std\", np.std(scores), episode_idx)\n writer.add_histogram(\"evaluation/scores\", np.array(scores), episode_idx)\n writer.add_scalar(\"evaluation/completions_min\", np.min(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_max\", np.max(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_mean\", np.mean(completions), episode_idx)\n writer.add_scalar(\"evaluation/completions_std\", np.std(completions), episode_idx)\n writer.add_histogram(\"evaluation/completions\", np.array(completions), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_min\", np.min(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_max\", np.max(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_mean\", np.mean(nb_steps_eval), episode_idx)\n writer.add_scalar(\"evaluation/nb_steps_std\", np.std(nb_steps_eval), episode_idx)\n writer.add_histogram(\"evaluation/nb_steps\", np.array(nb_steps_eval), episode_idx)\n\n smoothing = 0.9\n smoothed_eval_normalized_score = smoothed_eval_normalized_score * smoothing + np.mean(scores) * (1.0 - smoothing)\n smoothed_eval_completion = smoothed_eval_completion * smoothing + np.mean(completions) * (1.0 - smoothing)\n writer.add_scalar(\"evaluation/smoothed_score\", smoothed_eval_normalized_score, episode_idx)\n writer.add_scalar(\"evaluation/smoothed_completion\", smoothed_eval_completion, episode_idx)\n\n # Save logs to tensorboard\n writer.add_scalar(\"training/score\", normalized_score, episode_idx)\n writer.add_scalar(\"training/smoothed_score\", smoothed_normalized_score, episode_idx)\n writer.add_scalar(\"training/completion\", np.mean(completion), episode_idx)\n writer.add_scalar(\"training/smoothed_completion\", np.mean(smoothed_completion), episode_idx)\n writer.add_scalar(\"training/nb_steps\", nb_steps, episode_idx)\n writer.add_histogram(\"actions/distribution\", np.array(actions_taken), episode_idx)\n writer.add_scalar(\"actions/nothing\", action_probs[RailEnvActions.DO_NOTHING], episode_idx)\n writer.add_scalar(\"actions/left\", action_probs[RailEnvActions.MOVE_LEFT], episode_idx)\n writer.add_scalar(\"actions/forward\", action_probs[RailEnvActions.MOVE_FORWARD], episode_idx)\n writer.add_scalar(\"actions/right\", action_probs[RailEnvActions.MOVE_RIGHT], episode_idx)\n writer.add_scalar(\"actions/stop\", action_probs[RailEnvActions.STOP_MOVING], episode_idx)\n writer.add_scalar(\"training/epsilon\", eps_start, episode_idx)\n writer.add_scalar(\"training/buffer_size\", len(policy.memory), episode_idx)\n writer.add_scalar(\"training/loss\", policy.loss, episode_idx)\n writer.add_scalar(\"timer/reset\", reset_timer.get(), episode_idx)\n writer.add_scalar(\"timer/step\", step_timer.get(), episode_idx)\n writer.add_scalar(\"timer/learn\", learn_timer.get(), episode_idx)\n writer.add_scalar(\"timer/preproc\", preproc_timer.get(), episode_idx)\n writer.add_scalar(\"timer/total\", training_timer.get_current(), episode_idx)\n\n\ndef format_action_prob(action_probs):\n action_probs = np.round(action_probs, 3)\n actions = [\"↻\", \"←\", \"↑\", \"→\", \"◼\"]\n\n buffer = \"\"\n for action, action_prob in zip(actions, action_probs):\n buffer += action + \" \" + \"{:.3f}\".format(action_prob) + \" \"\n\n return buffer\n\n\ndef eval_policy(env:RailEnv, policy, train_params, obs_params):\n n_eval_episodes = train_params.n_evaluation_episodes\n max_steps = env._max_episode_steps\n tree_depth = obs_params.observation_tree_depth\n observation_radius = obs_params.observation_radius\n\n action_dict = dict()\n scores = []\n completions = []\n nb_steps = []\n\n for episode_idx in range(n_eval_episodes):\n agent_obs = [None] * env.get_num_agents()\n score = 0.0\n\n obs, info = env.reset(regenerate_rail=True, regenerate_schedule=True)\n\n final_step = 0\n\n for step in range(max_steps - 1):\n for agent in env.get_agent_handles():\n if obs[agent]:\n agent_obs[agent] = normalize_observation(obs[agent], tree_depth=tree_depth, observation_radius=observation_radius)\n\n action = 0\n if info['action_required'][agent]:\n action = policy.act(agent_obs[agent], eps=0.0)\n action_dict.update({agent: action})\n\n # obs, all_rewards, done, info = env.step(action_dict)\n obs, all_rewards, done, info = env.step(action_dict, reward_shaping=False)\n\n\n for agent in env.get_agent_handles():\n score += all_rewards[agent]\n # if step%1 == 0:\n # print(f\"Agent: {agent} has the score: {score}\")\n\n final_step = step\n\n if done['__all__']:\n break\n\n normalized_score = score / (max_steps * env.get_num_agents())\n scores.append(normalized_score)\n\n tasks_finished = sum(done[idx] for idx in env.get_agent_handles())\n completion = tasks_finished / max(1, env.get_num_agents())\n completions.append(completion)\n\n nb_steps.append(final_step)\n\n print(\"\\t✅ Eval: score {:.3f} done {:.1f}%\".format(np.mean(scores), np.mean(completions) * 100.0))\n\n return scores, completions, nb_steps\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-n\", \"--n_episodes\", help=\"number of episodes to run\", default=2500, type=int)\n parser.add_argument(\"-t\", \"--training_env_config\", help=\"training config id (eg 0 for Test_0)\", default=0, type=int)\n parser.add_argument(\"-e\", \"--evaluation_env_config\", help=\"evaluation config id (eg 0 for Test_0)\", default=0, type=int)\n parser.add_argument(\"--n_evaluation_episodes\", help=\"number of evaluation episodes\", default=25, type=int)\n parser.add_argument(\"--checkpoint_interval\", help=\"checkpoint interval\", default=50, type=int)\n parser.add_argument(\"--eps_start\", help=\"max exploration\", default=1.0, type=float) # max exploration\n parser.add_argument(\"--eps_end\", help=\"min exploration\", default=0.01, type=float) # min exploration\n parser.add_argument(\"--eps_decay\", help=\"exploration decay\", default=0.99, type=float) # the decay of the exploration\n parser.add_argument(\"--buffer_size\", help=\"replay buffer size\", default=int(1e5), type=int)\n parser.add_argument(\"--buffer_min_size\", help=\"min buffer size to start training\", default=0, type=int)\n parser.add_argument(\"--restore_replay_buffer\", help=\"replay buffer to restore\", default=\"\", type=str)\n parser.add_argument(\"--save_replay_buffer\", help=\"save replay buffer at each evaluation interval\", default=False, type=bool)\n parser.add_argument(\"--batch_size\", help=\"minibatch size\", default=128, type=int)\n parser.add_argument(\"--gamma\", help=\"discount factor\", default=0.99, type=float) # multiplier over the targets \n parser.add_argument(\"--tau\", help=\"soft update of target parameters\", default=1e-3, type=float) # we don't know X\n parser.add_argument(\"--learning_rate\", help=\"learning rate\", default=0.5e-4, type=float)\n parser.add_argument(\"--hidden_size\", help=\"hidden size (2 fc layers)\", default=128, type=int)\n parser.add_argument(\"--update_every\", help=\"how often to update the network\", default=8, type=int)\n parser.add_argument(\"--use_gpu\", help=\"use GPU if available\", default=True, type=bool)\n parser.add_argument(\"--num_threads\", help=\"number of threads PyTorch can use\", default=2, type=int)\n parser.add_argument(\"--render\", help=\"render 1 episode in 100\", default=False, type=bool)\n training_params = parser.parse_args()\n\n env_params = [\n {\n # Test_-1\n \"n_agents\": 3,\n \"x_dim\": 25,\n \"y_dim\": 25,\n \"n_cities\": 3,\n \"max_rails_between_cities\": 3,\n \"max_rails_in_city\": 3,\n \"malfunction_rate\": 1 / 100,\n \"seed\": 0\n },\n {\n # Test_0\n \"n_agents\": 4,\n \"x_dim\": 30,\n \"y_dim\": 30,\n \"n_cities\": 4,\n \"max_rails_between_cities\": 3,\n \"max_rails_in_city\": 4,\n \"malfunction_rate\": 1 / 100,\n \"seed\": 0\n },\n {\n # Test_1\n \"n_agents\": 7,\n \"x_dim\": 30,\n \"y_dim\": 30,\n \"n_cities\": 6,\n \"max_rails_between_cities\": 5,\n \"max_rails_in_city\": 6,\n \"malfunction_rate\": 1 / 100,\n \"seed\": 0\n },\n {\n # Test_2\n \"n_agents\": 20,\n \"x_dim\": 30,\n \"y_dim\": 30,\n \"n_cities\": 3,\n \"max_rails_between_cities\": 2,\n \"max_rails_in_city\": 3,\n \"malfunction_rate\": 1 / 200,\n \"seed\": 0\n },\n ]\n\n obs_params = {\n \"observation_tree_depth\": 2,\n \"observation_radius\": 10,\n \"observation_max_path_depth\": 30\n }\n\n def check_env_config(id):\n if id >= len(env_params) or id < 0:\n print(\"\\n🛑 Invalid environment configuration, only Test_0 to Test_{} are supported.\".format(len(env_params) - 1))\n exit(1)\n\n\n check_env_config(training_params.training_env_config)\n check_env_config(training_params.evaluation_env_config)\n\n training_env_params = env_params[training_params.training_env_config]\n evaluation_env_params = env_params[training_params.evaluation_env_config]\n\n print(\"\\nTraining parameters:\")\n pprint(vars(training_params))\n print(\"\\nTraining environment parameters (Test_{}):\".format(training_params.training_env_config))\n pprint(training_env_params)\n print(\"\\nEvaluation environment parameters (Test_{}):\".format(training_params.evaluation_env_config))\n pprint(evaluation_env_params)\n print(\"\\nObservation parameters:\")\n pprint(obs_params)\n\n os.environ[\"OMP_NUM_THREADS\"] = str(training_params.num_threads)\n train_agent(training_params, Namespace(**training_env_params), Namespace(**evaluation_env_params), Namespace(**obs_params))\n\n"
] | [
[
"numpy.random.seed",
"numpy.power",
"numpy.min",
"numpy.round",
"numpy.max",
"numpy.std",
"numpy.mean",
"torch.utils.tensorboard.SummaryWriter",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lumbric/pyam | [
"a73fc6a78871988cd842e52111c00879cf90882b"
] | [
"pyam/plotting.py"
] | [
"import itertools\nimport warnings\n\ntry:\n import cartopy\n cartopy_message = 'all good!'\nexcept ImportError as e:\n cartopy = None\n cartopy_message = str(e)\n\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport matplotlib.patches as mpatches\nimport numpy as np\nimport pandas as pd\n\ntry:\n import geopandas as gpd\n gpd_message = 'all good!'\nexcept ImportError as e:\n gpd = None\n gpd_message = str(e)\n\nfrom collections import defaultdict, Iterable\nfrom contextlib import contextmanager\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n from functools32 import lru_cache\n\nfrom pyam.logger import logger\nfrom pyam.run_control import run_control\nfrom pyam.utils import requires_package, IAMC_IDX, SORT_IDX, isstr\n\nfrom pandas.plotting._style import _get_standard_colors\n\n# line colors, markers, and styles that are cycled through when not\n# explicitly declared\n_DEFAULT_PROPS = None\n\n# maximum number of labels after which do not show legends by default\nMAX_LEGEND_LABELS = 13\n\n# default legend kwargs for putting legends outside of plots\nOUTSIDE_LEGEND = {\n 'right': dict(loc='center left', bbox_to_anchor=(1.0, 0.5)),\n 'bottom': dict(loc='upper center', bbox_to_anchor=(0.5, -0.2), ncol=3),\n}\n\nPYAM_COLORS = {\n 'AR6-SSP1-1.9': \"#00AAD0\",\n 'AR6-SSP1-2.6': \"#003466\",\n 'AR6-SSP2-4.5': \"#EF550F\",\n 'AR6-SSP3-7.0': \"#E00000\",\n 'AR6-SSP3-LowNTCF': \"#E00000\",\n 'AR6-SSP4-3.4': \"#FFA900\",\n 'AR6-SSP4-6.0': \"#C47900\",\n 'AR6-SSP5-3.4-OS': \"#7F006E\",\n 'AR6-SSP5-8.5': \"#990002\",\n 'AR6-RCP-2.6': \"#003466\",\n 'AR6-RCP-4.5': \"#5492CD\",\n 'AR6-RCP-6.0': \"#C47900\",\n 'AR6-RCP-8.5': \"#990002\",\n # AR5 colours from\n # https://tdaviesbarnard.co.uk/1202/ipcc-official-colors-rcp/\n 'AR5-RCP-2.6': \"#0000FF\",\n 'AR5-RCP-4.5': \"#79BCFF\",\n 'AR5-RCP-6.0': \"#FF822D\",\n 'AR5-RCP-8.5': \"#FF0000\",\n}\n\n\ndef reset_default_props(**kwargs):\n \"\"\"Reset properties to initial cycle point\"\"\"\n global _DEFAULT_PROPS\n pcycle = plt.rcParams['axes.prop_cycle']\n _DEFAULT_PROPS = {\n 'color': itertools.cycle(_get_standard_colors(**kwargs))\n if len(kwargs) > 0 else itertools.cycle([x['color'] for x in pcycle]),\n 'marker': itertools.cycle(['o', 'x', '.', '+', '*']),\n 'linestyle': itertools.cycle(['-', '--', '-.', ':']),\n }\n\n\ndef default_props(reset=False, **kwargs):\n \"\"\"Return current default properties\n\n Parameters\n ----------\n reset : bool\n if True, reset properties and return\n default: False\n \"\"\"\n global _DEFAULT_PROPS\n if _DEFAULT_PROPS is None or reset:\n reset_default_props(**kwargs)\n return _DEFAULT_PROPS\n\n\ndef assign_style_props(df, color=None, marker=None, linestyle=None,\n cmap=None):\n \"\"\"Assign the style properties for a plot\n\n Parameters\n ----------\n df : pd.DataFrame\n data to be used for style properties\n \"\"\"\n if color is None and cmap is not None:\n raise ValueError('`cmap` must be provided with the `color` argument')\n\n # determine color, marker, and linestyle for each line\n n = len(df[color].unique()) if color in df.columns else \\\n len(df[list(set(df.columns) & set(IAMC_IDX))].drop_duplicates())\n defaults = default_props(reset=True, num_colors=n, colormap=cmap)\n\n props = {}\n rc = run_control()\n\n kinds = [('color', color), ('marker', marker), ('linestyle', linestyle)]\n\n for kind, var in kinds:\n rc_has_kind = kind in rc\n if var in df.columns:\n rc_has_var = rc_has_kind and var in rc[kind]\n props_for_kind = {}\n\n for val in df[var].unique():\n if rc_has_var and val in rc[kind][var]:\n props_for_kind[val] = rc[kind][var][val]\n # cycle any way to keep defaults the same\n next(defaults[kind])\n else:\n props_for_kind[val] = next(defaults[kind])\n props[kind] = props_for_kind\n\n # update for special properties only if they exist in props\n if 'color' in props:\n d = props['color']\n values = list(d.values())\n # find if any colors in our properties corresponds with special colors\n # we know about\n overlap_idx = np.in1d(values, list(PYAM_COLORS.keys()))\n if overlap_idx.any(): # some exist in our special set\n keys = np.array(list(d.keys()))[overlap_idx]\n values = np.array(values)[overlap_idx]\n # translate each from pyam name, like AR6-SSP2-45 to proper color\n # designation\n for k, v in zip(keys, values):\n d[k] = PYAM_COLORS[v]\n # replace props with updated dict without special colors\n props['color'] = d\n return props\n\n\ndef reshape_line_plot(df, x, y):\n \"\"\"Reshape data from long form to \"line plot form\".\n\n Line plot form has x value as the index with one column for each line.\n Each column has data points as values and all metadata as column headers.\n \"\"\"\n idx = list(df.columns.drop(y))\n if df.duplicated(idx).any():\n warnings.warn('Duplicated index found.')\n df = df.drop_duplicates(idx, keep='last')\n df = df.set_index(idx)[y].unstack(x).T\n return df\n\n\ndef reshape_bar_plot(df, x, y, bars):\n \"\"\"Reshape data from long form to \"bar plot form\".\n\n Bar plot form has x value as the index with one column for bar grouping.\n Table values come from y values.\n \"\"\"\n idx = [bars, x]\n if df.duplicated(idx).any():\n warnings.warn('Duplicated index found.')\n df = df.drop_duplicates(idx, keep='last')\n df = df.set_index(idx)[y].unstack(x).T\n return df\n\n\n@requires_package(gpd, 'Requires geopandas: ' + gpd_message)\n@lru_cache()\ndef read_shapefile(fname, region_col=None, **kwargs):\n \"\"\"Read a shapefile for use in regional plots. Shapefiles must have a\n column denoted as \"region\".\n\n Parameters\n ----------\n fname : string\n path to shapefile to be read by geopandas\n region_col : string, default None\n if provided, rename a column in the shapefile to \"region\"\n \"\"\"\n gdf = gpd.read_file(fname, **kwargs)\n if region_col is not None:\n gdf = gdf.rename(columns={region_col: 'region'})\n if 'region' not in gdf.columns:\n raise IOError('Must provide a region column')\n gdf['region'] = gdf['region'].str.upper()\n return gdf\n\n\n@requires_package(gpd, 'Requires geopandas: ' + gpd_message)\n@requires_package(cartopy, 'Requires cartopy: ' + cartopy_message)\ndef region_plot(df, column='value', ax=None, crs=None, gdf=None,\n add_features=True, vmin=None, vmax=None, cmap=None,\n cbar=True, legend=False, title=True):\n \"\"\"Plot data on a map.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n column : string, optional, default: 'value'\n The column to use for plotting values\n ax : matplotlib.Axes, optional\n crs : cartopy.crs, optional\n The crs to plot, PlateCarree is used by default.\n gdf : geopandas.GeoDataFrame, optional\n The geometries to plot. The gdf must have a \"region\" column.\n add_features : bool, optional, default: True\n If true, add land, ocean, coastline, and border features.\n vmin : numeric, optional\n The minimum value to plot.\n vmax : numeric, optional\n The maximum value to plot.\n cmap : string, optional\n The colormap to use.\n cbar : bool or dictionary, optional, default: True\n Add a colorbar. If a dictionary is provided, it will be used as keyword\n arguments in creating the colorbar.\n legend : bool or dictionary, optional, default: False\n Add a legend. If a dictionary is provided, it will be used as keyword\n arguments in creating the legend.\n title : bool or string, optional\n Display a default or custom title.\n \"\"\"\n for col in ['model', 'scenario', 'year', 'variable']:\n if len(df[col].unique()) > 1:\n msg = 'Can not plot multiple {}s in region_plot'\n raise ValueError(msg.format(col))\n\n crs = crs or cartopy.crs.PlateCarree()\n if ax is None:\n fig, ax = plt.subplots(subplot_kw=dict(projection=crs))\n elif not isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot):\n msg = 'Must provide a cartopy axes object, not: {}'\n raise ValueError(msg.format(type(ax)))\n\n gdf = gdf or read_shapefile(gpd.datasets.get_path('naturalearth_lowres'),\n region_col='iso_a3')\n data = gdf.merge(df, on='region', how='inner').to_crs(crs.proj4_init)\n if data.empty: # help users with iso codes\n df['region'] = df['region'].str.upper()\n data = gdf.merge(df, on='region', how='inner').to_crs(crs.proj4_init)\n if data.empty:\n raise ValueError('No data to plot')\n\n if add_features:\n ax.add_feature(cartopy.feature.LAND)\n ax.add_feature(cartopy.feature.OCEAN)\n ax.add_feature(cartopy.feature.COASTLINE)\n ax.add_feature(cartopy.feature.BORDERS)\n\n vmin = vmin if vmin is not None else data['value'].min()\n vmax = vmax if vmax is not None else data['value'].max()\n norm = colors.Normalize(vmin=vmin, vmax=vmax)\n cmap = plt.get_cmap(cmap)\n scalar_map = cmx.ScalarMappable(norm=norm, cmap=cmap)\n labels = []\n handles = []\n for _, row in data.iterrows():\n label = row['label'] if 'label' in row else row['region']\n color = scalar_map.to_rgba(row['value'])\n ax.add_geometries(\n [row['geometry']],\n crs,\n facecolor=color,\n label=label,\n )\n if label not in labels:\n labels.append(label)\n handle = mpatches.Rectangle((0, 0), 5, 5, facecolor=color)\n handles.append(handle)\n\n if cbar:\n scalar_map._A = [] # for some reason you have to clear this\n if cbar is True: # use some defaults\n cbar = dict(\n fraction=0.022, # these are magic numbers\n pad=0.02, # that just seem to \"work\"\n )\n plt.colorbar(scalar_map, ax=ax, **cbar)\n\n if legend is not False:\n if legend is True: # use some defaults\n legend = dict(\n bbox_to_anchor=(1.32, 0.5) if cbar else (1.2, 0.5),\n loc='right',\n )\n _add_legend(ax, handles, labels, legend)\n\n if title:\n var = df['variable'].unique()[0]\n unit = df['unit'].unique()[0]\n year = df['year'].unique()[0]\n default_title = '{} ({}) in {}'.format(var, unit, year)\n title = default_title if title is True else title\n ax.set_title(title)\n\n return ax\n\n\ndef pie_plot(df, value='value', category='variable',\n ax=None, legend=False, title=True, cmap=None,\n **kwargs):\n \"\"\"Plot data as a bar chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n value : string, optional\n The column to use for data values\n default: value\n category : string, optional\n The column to use for labels\n default: variable\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n \"\"\"\n for col in set(SORT_IDX) - set([category]):\n if len(df[col].unique()) > 1:\n msg = 'Can not plot multiple {}s in pie_plot with value={},' +\\\n ' category={}'\n raise ValueError(msg.format(col, value, category))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # get data, set negative values to explode\n _df = df.groupby(category)[value].sum()\n where = _df > 0\n explode = tuple(0 if _ else 0.2 for _ in where)\n _df = _df.abs()\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.index),\n colormap=cmap)['color']\n rc = run_control()\n color = []\n for key, c in zip(_df.index, defaults):\n if 'color' in rc and \\\n category in rc['color'] and \\\n key in rc['color'][category]:\n c = rc['color'][category][key]\n color.append(c)\n\n # plot data\n _df.plot(kind='pie', colors=color, ax=ax, explode=explode, **kwargs)\n\n # add legend\n ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5), labels=_df.index)\n if not legend:\n ax.legend_.remove()\n\n # remove label\n ax.set_ylabel('')\n\n return ax\n\n\ndef stack_plot(df, x='year', y='value', stack='variable',\n ax=None, legend=True, title=True, cmap=None, total=None,\n **kwargs):\n \"\"\"Plot data as a stack chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : string, optional\n The column to use for x-axis values\n default: year\n y : string, optional\n The column to use for y-axis values\n default: value\n stack: string, optional\n The column to use for stack groupings\n default: variable\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n total : bool or dict, optional\n If True, plot a total line with default pyam settings. If a dict, then\n plot the total line using the dict key-value pairs as keyword arguments\n to ax.plot(). If None, do not plot the total line.\n default : None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n \"\"\"\n for col in set(SORT_IDX) - set([x, stack]):\n if len(df[col].unique()) > 1:\n msg = 'Can not plot multiple {}s in stack_plot with x={}, stack={}'\n raise ValueError(msg.format(col, x, stack))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_bar_plot(df, x, y, stack)\n\n # Line below is for interpolation. On datetimes I think you'd downcast to\n # seconds first and then cast back to datetime at the end..?\n _df.index = _df.index.astype(float)\n\n time_original = _df.index.values\n first_zero_times = pd.DataFrame(index=[\"first_zero_time\"])\n\n both_positive_and_negative = _df.apply(\n lambda x: (x >= 0).any() and (x < 0).any()\n )\n for col in _df.loc[:, both_positive_and_negative]:\n values = _df[col].dropna().values\n positive = (values >= 0)\n negative = (values < 0)\n pos_to_neg = positive[:-1] & negative[1:]\n neg_to_pos = positive[1:] & negative[:-1]\n crosses = np.argwhere(pos_to_neg | neg_to_pos)\n for i, cross in enumerate(crosses):\n cross = cross[0] # get location\n x_1 = time_original[cross]\n x_2 = time_original[cross + 1]\n y_1 = values[cross]\n y_2 = values[cross + 1]\n\n zero_time = x_1 - y_1 * (x_2 - x_1) / (y_2 - y_1)\n if i == 0:\n first_zero_times.loc[:, col] = zero_time\n if zero_time not in _df.index.values:\n _df.loc[zero_time, :] = np.nan\n\n first_zero_times = first_zero_times.sort_values(\n by=\"first_zero_time\",\n axis=1,\n )\n _df = _df.reindex(sorted(_df.index)).interpolate(method=\"values\")\n\n # Sort lines so that negative timeseries are on the right, positive\n # timeseries are on the left and timeseries which go from positive to\n # negative are ordered such that the timeseries which goes negative first\n # is on the right (case of timeseries which go from negative to positive\n # is an edge case we haven't thought about as it's unlikely to apply to\n # us).\n pos_cols = [c for c in _df if (_df[c] > 0).all()]\n cross_cols = first_zero_times.columns[::-1].tolist()\n neg_cols = [c for c in _df if (_df[c] < 0).all()]\n col_order = pos_cols + cross_cols + neg_cols\n _df = _df[col_order]\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns),\n colormap=cmap)['color']\n rc = run_control()\n colors = {}\n for key in _df.columns:\n c = next(defaults)\n c_in_rc = 'color' in rc\n if c_in_rc and stack in rc['color'] and key in rc['color'][stack]:\n c = rc['color'][stack][key]\n colors[key] = c\n\n # plot stacks, starting from the top and working our way down to the bottom\n negative_only_cumulative = _df.applymap(\n lambda x: x if x < 0 else 0\n ).cumsum(axis=1)\n positive_only_cumulative = _df.applymap(lambda x: x if x >= 0 else 0)[\n col_order[::-1]\n ].cumsum(axis=1)[\n col_order\n ]\n time = _df.index.values\n upper = positive_only_cumulative.iloc[:, 0].values\n for j, col in enumerate(_df):\n noc_tr = negative_only_cumulative.iloc[:, j].values\n try:\n poc_nr = positive_only_cumulative.iloc[:, j + 1].values\n except IndexError:\n poc_nr = np.zeros_like(upper)\n lower = poc_nr.copy()\n if (noc_tr < 0).any():\n lower[np.where(poc_nr == 0)] = noc_tr[np.where(poc_nr == 0)]\n\n ax.fill_between(time, lower, upper, label=col,\n color=colors[col], **kwargs)\n upper = lower.copy()\n\n # add total\n if (total is not None) and total: # cover case where total=False\n if isinstance(total, bool): # can now assume total=True\n total = {}\n total.setdefault(\"label\", \"Total\")\n total.setdefault(\"color\", \"black\")\n total.setdefault(\"lw\", 4.0)\n ax.plot(time, _df.sum(axis=1), **total)\n\n # add legend\n ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n ax.set_xlabel(x.capitalize())\n units = df['unit'].unique()\n if len(units) == 1:\n ax.set_ylabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in ['model', 'scenario', 'region', 'variable']:\n values = df[var].unique()\n if len(values) == 1:\n _title.append('{}: {}'.format(var, values[0]))\n if title and _title:\n title = ' '.join(_title) if title is True else title\n ax.set_title(title)\n\n return ax\n\n\ndef bar_plot(df, x='year', y='value', bars='variable',\n ax=None, orient='v', legend=True, title=True, cmap=None,\n **kwargs):\n \"\"\"Plot data as a bar chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : string, optional\n The column to use for x-axis values\n default: year\n y : string, optional\n The column to use for y-axis values\n default: value\n bars: string, optional\n The column to use for bar groupings\n default: variable\n ax : matplotlib.Axes, optional\n orient : string, optional\n Vertical or horizontal orientation.\n default: variable\n legend : bool, optional\n Include a legend\n default: False\n title : bool or string, optional\n Display a default or custom title.\n cmap : string, optional\n A colormap to use.\n default: None\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n \"\"\"\n for col in set(SORT_IDX) - set([x, bars]):\n if len(df[col].unique()) > 1:\n msg = 'Can not plot multiple {}s in bar_plot with x={}, bars={}'\n raise ValueError(msg.format(col, x, bars))\n\n if ax is None:\n fig, ax = plt.subplots()\n\n # long form to one column per bar group\n _df = reshape_bar_plot(df, x, y, bars)\n\n # explicitly get colors\n defaults = default_props(reset=True, num_colors=len(_df.columns),\n colormap=cmap)['color']\n rc = run_control()\n color = []\n for key in _df.columns:\n c = next(defaults)\n if 'color' in rc and bars in rc['color'] and key in rc['color'][bars]:\n c = rc['color'][bars][key]\n color.append(c)\n\n # plot data\n kind = 'bar' if orient.startswith('v') else 'barh'\n _df.plot(kind=kind, color=color, ax=ax, **kwargs)\n\n # add legend\n ax.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n if not legend:\n ax.legend_.remove()\n\n # add default labels if possible\n if orient == 'v':\n ax.set_xlabel(x.capitalize())\n else:\n ax.set_ylabel(x.capitalize())\n units = df['unit'].unique()\n if len(units) == 1 and y == 'value':\n if orient == 'v':\n ax.set_ylabel(units[0])\n else:\n ax.set_xlabel(units[0])\n\n # build a default title if possible\n _title = []\n for var in ['model', 'scenario', 'region', 'variable']:\n values = df[var].unique()\n if len(values) == 1:\n _title.append('{}: {}'.format(var, values[0]))\n if title and _title:\n title = ' '.join(_title) if title is True else title\n ax.set_title(title)\n\n return ax\n\n\ndef _get_boxes(ax, xoffset=0.05, width_weight=0.1):\n xys = {}\n widths = {}\n heights = defaultdict(list)\n for b in ax.get_children():\n if isinstance(b, mpatches.Rectangle) and b.xy != (0, 0):\n x, y = b.xy\n heights[x].append(b.get_height())\n widths[x] = b.get_width() * width_weight\n xys[x] = ((x + b.get_width()) + xoffset, 0)\n return {x: (xys[x], widths[x], sum(heights[x])) for x in xys.keys()}\n\n\ndef add_net_values_to_bar_plot(axs, color='k'):\n \"\"\"Add net values next to an existing vertical stacked bar chart\n\n Parameters\n ----------\n axs : matplotlib.Axes or list thereof\n color : str, optional, default: black\n the color of the bars to add\n \"\"\"\n axs = axs if isinstance(axs, Iterable) else [axs]\n for ax in axs:\n box_args = _get_boxes(ax)\n for x, args in box_args.items():\n rect = mpatches.Rectangle(*args, color=color)\n ax.add_patch(rect)\n\n\ndef scatter(df, x, y, ax=None, legend=None, title=None,\n color=None, marker='o', linestyle=None, cmap=None,\n groupby=['model', 'scenario'], with_lines=False, **kwargs):\n \"\"\"Plot data as a scatter chart.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : str\n column to be plotted on the x-axis\n y : str\n column to be plotted on the y-axis\n ax : matplotlib.Axes, optional\n legend : bool, optional\n Include a legend (`None` displays legend only if less than 13 entries)\n default: None\n title : bool or string, optional\n Display a custom title.\n color : string, optional\n A valid matplotlib color or column name. If a column name, common\n values will be provided the same color.\n default: None\n marker : string\n A valid matplotlib marker or column name. If a column name, common\n values will be provided the same marker.\n default: 'o'\n linestyle : string, optional\n A valid matplotlib linestyle or column name. If a column name, common\n values will be provided the same linestyle.\n default: None\n cmap : string, optional\n A colormap to use.\n default: None\n groupby : list-like, optional\n Data grouping for plotting.\n default: ['model', 'scenario']\n with_lines : bool, optional\n Make the scatter plot with lines connecting common data.\n default: False\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n # assign styling properties\n props = assign_style_props(df, color=color, marker=marker,\n linestyle=linestyle, cmap=cmap)\n\n # group data\n groups = df.groupby(groupby)\n\n # loop over grouped dataframe, plot data\n legend_data = []\n for name, group in groups:\n pargs = {}\n labels = []\n for key, kind, var in [('c', 'color', color),\n ('marker', 'marker', marker),\n ('linestyle', 'linestyle', linestyle)]:\n if kind in props:\n label = group[var].values[0]\n pargs[key] = props[kind][group[var].values[0]]\n labels.append(repr(label).lstrip(\"u'\").strip(\"'\"))\n else:\n pargs[key] = var\n\n if len(labels) > 0:\n legend_data.append(' '.join(labels))\n else:\n legend_data.append(' '.join(name))\n kwargs.update(pargs)\n if with_lines:\n ax.plot(group[x], group[y], **kwargs)\n else:\n kwargs.pop('linestyle') # scatter() can't take a linestyle\n ax.scatter(group[x], group[y], **kwargs)\n\n # build legend handles and labels\n handles, labels = ax.get_legend_handles_labels()\n if legend_data != [''] * len(legend_data):\n labels = sorted(list(set(tuple(legend_data))))\n idxs = [legend_data.index(d) for d in labels]\n handles = [handles[i] for i in idxs]\n if legend is None and len(labels) < 13 or legend is not False:\n _add_legend(ax, handles, labels, legend)\n\n # add labels and title\n ax.set_xlabel(x)\n ax.set_ylabel(y)\n if title:\n ax.set_title(title)\n\n return ax\n\n\ndef line_plot(df, x='year', y='value', ax=None, legend=None, title=True,\n color=None, marker=None, linestyle=None, cmap=None,\n fill_between=None, final_ranges=None,\n rm_legend_label=[], **kwargs):\n \"\"\"Plot data as lines with or without markers.\n\n Parameters\n ----------\n df : pd.DataFrame\n Data to plot as a long-form data frame\n x : string, optional\n The column to use for x-axis values\n default: year\n y : string, optional\n The column to use for y-axis values\n default: value\n ax : matplotlib.Axes, optional\n legend : bool or dictionary, optional\n Add a legend. If a dictionary is provided, it will be used as keyword\n arguments in creating the legend.\n default: None (displays legend only if less than 13 entries)\n title : bool or string, optional\n Display a default or custom title.\n color : string, optional\n A valid matplotlib color or column name. If a column name, common\n values will be provided the same color.\n default: None\n marker : string, optional\n A valid matplotlib marker or column name. If a column name, common\n values will be provided the same marker.\n default: None\n linestyle : string, optional\n A valid matplotlib linestyle or column name. If a column name, common\n values will be provided the same linestyle.\n default: None\n cmap : string, optional\n A colormap to use.\n default: None\n fill_between : boolean or dict, optional\n Fill lines between minima/maxima of the 'color' argument. This can only\n be used if also providing a 'color' argument. If this is True, then\n default arguments will be provided to `ax.fill_between()`. If this is a\n dictionary, those arguments will be provided instead of defaults.\n default: None\n final_ranges : boolean or dict, optional\n Add vertical line between minima/maxima of the 'color' argument in the\n last period plotted. This can only be used if also providing a 'color'\n argument. If this is True, then default arguments will be provided to\n `ax.axvline()`. If this is a dictionary, those arguments will be\n provided instead of defaults.\n default: None\n rm_legend_label : string, list, optional\n Remove the color, marker, or linestyle label in the legend.\n default: []\n kwargs : Additional arguments to pass to the pd.DataFrame.plot() function\n \"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n # assign styling properties\n props = assign_style_props(df, color=color, marker=marker,\n linestyle=linestyle, cmap=cmap)\n\n if fill_between and 'color' not in props:\n raise ValueError('Must use `color` kwarg if using `fill_between`')\n if final_ranges and 'color' not in props:\n raise ValueError('Must use `color` kwarg if using `final_ranges`')\n\n # reshape data for use in line_plot\n df = reshape_line_plot(df, x, y) # long form to one column per line\n\n # determine index of column name in reshaped dataframe\n prop_idx = {}\n for kind, var in [('color', color), ('marker', marker),\n ('linestyle', linestyle)]:\n if var is not None and var in df.columns.names:\n prop_idx[kind] = df.columns.names.index(var)\n\n # plot data, keeping track of which legend labels to apply\n no_label = [rm_legend_label] if isstr(rm_legend_label) else rm_legend_label\n\n for col, data in df.iteritems():\n pargs = {}\n labels = []\n # build plotting args and line legend labels\n for key, kind, var in [('c', 'color', color),\n ('marker', 'marker', marker),\n ('linestyle', 'linestyle', linestyle)]:\n if kind in props:\n label = col[prop_idx[kind]]\n pargs[key] = props[kind][label]\n if kind not in no_label:\n labels.append(repr(label).lstrip(\"u'\").strip(\"'\"))\n else:\n pargs[key] = var\n kwargs.update(pargs)\n data = data.dropna()\n data.plot(ax=ax, **kwargs)\n if labels:\n ax.lines[-1].set_label(' '.join(labels))\n\n if fill_between:\n _kwargs = {'alpha': 0.25} if fill_between in [True, None] \\\n else fill_between\n data = df.T\n columns = data.columns\n # get outer boundary mins and maxes\n allmins = data.groupby(color).min()\n intermins = (\n data.dropna(axis=1).groupby(color).min() # nonan data\n .reindex(columns=columns) # refill with nans\n .T.interpolate(method='index').T # interpolate\n )\n mins = pd.concat([allmins, intermins]).min(level=0)\n allmaxs = data.groupby(color).max()\n intermaxs = (\n data.dropna(axis=1).groupby(color).max() # nonan data\n .reindex(columns=columns) # refill with nans\n .T.interpolate(method='index').T # interpolate\n )\n maxs = pd.concat([allmaxs, intermaxs]).max(level=0)\n # do the fill\n for idx in mins.index:\n ymin = mins.loc[idx]\n ymax = maxs.loc[idx]\n ax.fill_between(ymin.index, ymin, ymax,\n facecolor=props['color'][idx], **_kwargs)\n\n # add bars to the end of the plot showing range\n if final_ranges:\n # have to explicitly draw it to get the tick labels (these change once\n # you add the vlines)\n plt.gcf().canvas.draw()\n _kwargs = {'linewidth': 2} if final_ranges in [True, None] \\\n else final_ranges\n first = df.index[0]\n final = df.index[-1]\n mins = df.T.groupby(color).min()[final]\n maxs = df.T.groupby(color).max()[final]\n ymin, ymax = ax.get_ylim()\n ydiff = ymax - ymin\n xmin, xmax = ax.get_xlim()\n xdiff = xmax - xmin\n xticks = ax.get_xticks()\n xlabels = ax.get_xticklabels()\n # 1.5% increase seems to be ok per extra line\n extra_space = 0.015\n for i, idx in enumerate(mins.index):\n xpos = final + xdiff * extra_space * (i + 1)\n _ymin = (mins[idx] - ymin) / ydiff\n _ymax = (maxs[idx] - ymin) / ydiff\n ax.axvline(xpos, ymin=_ymin, ymax=_ymax,\n color=props['color'][idx], **_kwargs)\n # for equal spacing between xmin and first datapoint and xmax and last\n # line\n ax.set_xlim(xmin, xpos + first - xmin)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels)\n\n # build unique legend handles and labels\n handles, labels = ax.get_legend_handles_labels()\n handles, labels = np.array(handles), np.array(labels)\n _, idx = np.unique(labels, return_index=True)\n handles, labels = handles[idx], labels[idx]\n if legend is not False:\n _add_legend(ax, handles, labels, legend)\n\n # add default labels if possible\n ax.set_xlabel(x.title())\n units = df.columns.get_level_values('unit').unique()\n units_for_ylabel = len(units) == 1 and x == 'year' and y == 'value'\n ylabel = units[0] if units_for_ylabel else y.title()\n ax.set_ylabel(ylabel)\n\n # build a default title if possible\n if title:\n default_title = []\n for var in ['model', 'scenario', 'region', 'variable']:\n if var in df.columns.names:\n values = df.columns.get_level_values(var).unique()\n if len(values) == 1:\n default_title.append('{}: {}'.format(var, values[0]))\n title = ' '.join(default_title) if title is True else title\n ax.set_title(title)\n\n return ax, handles, labels\n\n\ndef _add_legend(ax, handles, labels, legend):\n if legend is None and len(labels) >= MAX_LEGEND_LABELS:\n logger().info('>={} labels, not applying legend'.format(\n MAX_LEGEND_LABELS))\n else:\n legend = {} if legend in [True, None] else legend\n loc = legend.pop('loc', 'best')\n outside = loc.split(' ')[1] if loc.startswith('outside ') else False\n _legend = OUTSIDE_LEGEND[outside] if outside else dict(loc=loc)\n _legend.update(legend)\n ax.legend(handles, labels, **_legend)\n\n\ndef set_panel_label(label, ax=None, x=0.05, y=0.9):\n \"\"\"Add a panel label to the figure/axes, by default in the top-left corner\n\n Parameters\n ----------\n label : str\n text to be added as panel label\n ax : matplotlib.Axes, optional\n panel to which to add the panel label\n x : number, default 0.05\n relative location of label to x-axis\n y : number, default 0.9\n relative location of label to y-axis\n \"\"\"\n def _lim_loc(lim, loc):\n return lim[0] + (lim[1] - lim[0]) * loc\n\n if ax is not None:\n ax.text(_lim_loc(ax.get_xlim(), x), _lim_loc(ax.get_ylim(), y), label)\n else:\n plt.text(_lim_loc(plt.xlim(), x), _lim_loc(plt.ylim(), y), label)\n"
] | [
[
"pandas.concat",
"numpy.unique",
"matplotlib.pyplot.ylim",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.colorbar",
"numpy.argwhere",
"matplotlib.pyplot.xlim",
"matplotlib.cm.ScalarMappable",
"numpy.zeros_like",
"matplotlib.pyplot.gcf",
"pandas.plotting._style._get_standard_colors",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shriyam-Avasthi/Virtual-Whiteboard | [
"9c0fa39319d360094a380f3d5faf3a6f26531256"
] | [
"GUI.py"
] | [
"from ui_GUI import *\r\n# from PySide2 import *\r\nimport sys \r\nimport cv2\r\nfrom PySide2.QtGui import QPixmap\r\nfrom PySide2 import QtGui\r\nfrom functools import partial\r\nimport numpy as np\r\nimport mouse\r\nfrom MultiThreading import MainThread\r\nfrom Whiteboard import WhiteBoard\r\nfrom Tools import ToolsManager\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n QMainWindow.__init__(self)\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self) \r\n\r\n #Basic Window setup\r\n self.setWindowFlags(Qt.FramelessWindowHint) \r\n\r\n self.setAttribute(Qt.WA_TranslucentBackground)\r\n \r\n self.shadow = QGraphicsDropShadowEffect(self)\r\n self.shadow.setBlurRadius(50)\r\n self.shadow.setXOffset(0)\r\n self.shadow.setYOffset(0)\r\n self.shadow.setColor(QColor(0, 92, 157, 550))\r\n \r\n self.ui.centralwidget.setGraphicsEffect(self.shadow)\r\n\r\n self.ui.minimizeButton.clicked.connect(lambda: self.showMinimized())\r\n\r\n self.ui.closeButton.clicked.connect(lambda: self.close())\r\n\r\n self.ui.minimizeButton.clicked.connect(lambda: self.showMinimized())\r\n\r\n self.ui.closeButton.clicked.connect(lambda: self.close())\r\n\r\n # Add click event/Mouse move event/drag event to the top header to move the window\r\n self.ui.mainHeader.mouseMoveEvent = self.moveWindow\r\n\r\n #Left Menu toggle button\r\n self.ui.menuButton.clicked.connect(lambda: self.slideLeftMenu())\r\n \r\n ui = self.ui\r\n\r\n self.buttons = [ui.penButton, ui.eraserButton, ui.shapesButton, ui.textButton, ui.laserButton]\r\n self.shapeButtons = [ui.CircleButton , ui.LineButton , ui.RectangleButton]\r\n self.sliders = [ui.penSizeSlider , ui.eraserSizeSlider , ui.shapesThicknessSlider , ui.textSizeSlider , ui.laserSizeSlider]\r\n self.dials = [ui.penColorDial, QDial() , ui.shapesColorDial,ui.textColorDial, ui.laserColorDial]\r\n self.lcds = [ui.penSizeLCD , ui.eraserSizeLCD , ui.shapesThicknessLCD , ui.textSizeLCD , ui.laserSizeLCD]\r\n self.displayModeIcons = [u\":/icons/Icons/video.svg\" , u\":/icons/Icons/cast.svg\" , u\":/icons/Icons/video-off.svg\"]\r\n \r\n\r\n for i,j in enumerate(self.buttons):\r\n j.clicked.connect(partial(self.SelectTool , i ))\r\n for i,j in enumerate(self.shapeButtons):\r\n j.clicked.connect(partial(self.SelectShape , i))\r\n for i,j in enumerate(self.sliders):\r\n j.valueChanged.connect(partial(self.SetToolSize , i))\r\n for i,j in enumerate(self.dials):\r\n j.valueChanged.connect(partial(self.SetToolColor , i))\r\n\r\n self.ui.broadcastButton.clicked.connect( self.ChangeDisplayMode )\r\n self.ui.eraseAllButton.clicked.connect( self.EraseOnScreen)\r\n\r\n #Initialize MainThread class to run the main code of the application\r\n self.mainThread = MainThread()\r\n self.mainThread.start()\r\n self.mainThread.signals.change_pixmap_signal.connect(self.ShowFrame)\r\n self.mainThread.signals.changeToolSize_signal.connect(self.ChangeSliderPos)\r\n self.mainThread.signals.moveMouse_signal.connect(self.MoveMouse)\r\n self.mainThread.signals.changeStatus_signal.connect(self.ChangeUseStatus)\r\n img = self.mainThread.videoGet.frame\r\n self.windowWidth = img.shape[1]\r\n self.windowHeight = img.shape[0] + 200\r\n self.setFixedSize(self.windowWidth , self.windowHeight)\r\n\r\n # Slide left menu function\r\n def slideLeftMenu(self):\r\n width = self.ui.menu.width()\r\n # If minimized\r\n if width == 0:\r\n # Expand menu\r\n newWidth = 400\r\n newWinWidth = self.windowWidth + newWidth\r\n self.setFixedSize(newWinWidth , self.windowHeight)\r\n self.slideAnimation = QPropertyAnimation(self.ui.menu, b\"minimumWidth\") #Animate minimumWidht\r\n self.slideAnimation.setDuration(250)\r\n self.slideAnimation.setStartValue(width) #Start value is the current menu width\r\n self.slideAnimation.setEndValue(newWidth) #end value is the new menu width\r\n self.slideAnimation.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.slideAnimation.start()\r\n self.ui.menuButton.setIcon(QtGui.QIcon(u\":/icons/Icons/chevron-left.svg\"))\r\n # If maximized\r\n else:\r\n # Restore menu\r\n newWidth = 0\r\n newWinWidth = self.windowWidth\r\n self.slideAnimation = QPropertyAnimation(self.ui.menu, b\"minimumWidth\") #Animate minimumWidht\r\n self.slideAnimation.setDuration(250)\r\n self.slideAnimation.setStartValue(width) #Start value is the current menu width\r\n self.slideAnimation.setEndValue(newWidth) #end value is the new menu width\r\n self.slideAnimation.setEasingCurve(QEasingCurve.InOutQuart)\r\n self.slideAnimation.start()\r\n self.slideAnimation.finished.connect(lambda : self.setFixedSize(newWinWidth , self.windowHeight))\r\n self.ui.menuButton.setIcon(QtGui.QIcon(u\":/icons/Icons/align-left.svg\"))\r\n\r\n def mousePressEvent(self, event):\r\n self.clickPosition = event.globalPos()\r\n\r\n\r\n def moveWindow(self,e):\r\n\r\n if self.isMaximized() == False: #Not maximized\r\n\r\n if e.buttons() == Qt.LeftButton: \r\n #Move window \r\n self.move(self.pos() + e.globalPos() - self.clickPosition),\r\n self.clickPosition = e.globalPos()\r\n e.accept()\r\n\r\n def SelectTool(self,toolID):\r\n # Iterate through all the buttons of the tools\r\n for i,j in enumerate(self.buttons):\r\n if i == toolID: # Tool to be selected\r\n j.setChecked(True)\r\n ToolsManager.GetInstance().SetCurrentTool(i)\r\n self.ui.stackedToolsMenu.setCurrentIndex(i)\r\n self.ui.useStatus.setText(ToolsManager.GetInstance().GetUseStatus())\r\n self.ui.bottomCurrentSizeLCD.display(str(ToolsManager.GetInstance().GetSize()))\r\n else:\r\n j.setChecked(False)\r\n \r\n # Set Color of the bottomCurrentColorButton of Bottom Status bar to show current color of the selected tool \r\n color = ToolsManager.GetInstance().GetCurrentToolColor()\r\n style = f\"background-color: rgb({color[2]}, {color[1]}, {color[0]});\"\r\n self.ui.bottomCurrentColorButton.setStyleSheet(style)\r\n\r\n def SelectShape(self , toolID):\r\n # Iterate through the shapeButtons list\r\n for i,j in enumerate(self.shapeButtons):\r\n if i == toolID: #Shape to be selected\r\n j.setChecked(True)\r\n ToolsManager.GetInstance().SetCurrentShape(i)\r\n else :\r\n j.setChecked(False)\r\n \r\n \r\n def SetToolSize(self , toolID , size):\r\n toolsManager = ToolsManager.GetInstance()\r\n toolsManager.SetToolSize(toolID ,size)\r\n # Set the display number of LCD Component of the current menu\r\n self.lcds[toolsManager.currentToolID].display(str(size))\r\n # Set the display number of LCD Component of the bottom Status bar\r\n self.ui.bottomCurrentSizeLCD.display(str(toolsManager.GetSize()))\r\n \r\n def SetToolColor(self,toolID , hue):\r\n # Create color of maximum saturation and value with given hue\r\n color = QColor.fromHsv( hue , 255 , 255 )\r\n ToolsManager.GetInstance().SetToolColor(toolID , [color.blue() , color.green() , color.red()])\r\n # Set the color of bottomCurrentColorButton of bottom status bar\r\n style = f\"background-color: rgb({color.red()}, {color.green()}, {color.blue()});\"\r\n self.ui.bottomCurrentColorButton.setStyleSheet(style)\r\n\r\n def ChangeDisplayMode(self):\r\n showMode = self.mainThread.videoShow.showMode\r\n if showMode < 2 :\r\n showMode += 1\r\n else:\r\n # as there are only 3 display modes, defined in videoShow Class\r\n showMode = 0\r\n self.mainThread.videoShow.showMode = showMode\r\n self.ui.broadcastButton.setIcon(QIcon( self.displayModeIcons[ self.mainThread.videoShow.showMode ] ))\r\n status = \"\"\r\n if showMode == 0 :\r\n status = \"Full Video Broadcast\"\r\n elif showMode == 1:\r\n status = \" Mini-Video Broadcast\"\r\n else:\r\n status = \"Only Canvas Broadcast\"\r\n self.ui.videoStatusLabel.setText(status)\r\n\r\n def EraseOnScreen(self):\r\n # clear the sub array of canvas that is visible on screen\r\n WhiteBoard.GetInstance().canvas[ : , : , : ] = np.zeros(WhiteBoard.GetInstance().canvas.shape)\r\n \r\n @Slot(np.ndarray)\r\n def ShowFrame(self , frame):\r\n self.display_width , self.display_height = self.ui.videoLabel.size().toTuple()\r\n qt_img = self.convert_cv_qt(frame)\r\n # Set the video frame as pixmap to videoLabel component\r\n self.ui.videoLabel.setPixmap(qt_img)\r\n\r\n @Slot(int)\r\n def ChangeSliderPos(self , size):\r\n toolsManager = ToolsManager.GetInstance()\r\n # Animate the slider\r\n self.sliderAnimation = QPropertyAnimation(self.sliders[toolsManager.currentToolID], b\"sliderPosition\")\r\n self.sliderAnimation.setDuration(50)\r\n self.sliderAnimation.setStartValue(self.sliders[toolsManager.currentToolID].value())\r\n self.sliderAnimation.setEndValue(size)\r\n self.sliderAnimation.start()\r\n # Set LCD display number of current menu and bottom status bar\r\n self.lcds[toolsManager.currentToolID].display(str(size))\r\n self.ui.bottomCurrentSizeLCD.display(str(toolsManager.GetSize()))\r\n \r\n @Slot( bool , list )\r\n def MoveMouse(self , clicked , fingerPos):\r\n # Get position of central frame with respect to the origin of the screen\r\n minMousePos = self.ui.Central.mapToGlobal(QPoint(0,0)).toTuple()\r\n # Size of the central frame of the main window\r\n winSize = self.ui.Central.size().toTuple()\r\n # End coordinates of the main window\r\n maxMousePos = (minMousePos[0] + winSize[0] , minMousePos[1] + winSize[1] )\r\n # Get position of video label with respect to the origin of the screen\r\n minVideoLabelPos = self.ui.videoLabel.mapToGlobal(QPoint(0,0)).toTuple()\r\n # size of video label\r\n videoLabelSize = self.ui.videoLabel.size().toTuple()\r\n \r\n maxVideoLabelPos = (minVideoLabelPos[0] + videoLabelSize[0] , minVideoLabelPos[1] + videoLabelSize[1])\r\n # As the video is flipped (refer to videoshow class), to get the position of finger with respect to the origin of the video label component\r\n fingerPos_local = ( videoLabelSize[0] - fingerPos[0] , fingerPos[1] )\r\n # Position of the finger with respect to the origin of the screen \r\n globalFingerPos = (minVideoLabelPos[0] + fingerPos_local[0] , minVideoLabelPos[1] + fingerPos_local[1])\r\n\r\n # Convert the given finger position to global mouse position such that the mouse always remains inside the window\r\n mousePosx = np.interp( globalFingerPos[0] , [minVideoLabelPos[0] , maxVideoLabelPos[0]] , [minMousePos[0] , maxMousePos[0]] )\r\n mousePosy = np.interp( globalFingerPos[1] , [minVideoLabelPos[1] , maxVideoLabelPos[1]] , [minMousePos[1] , maxMousePos[1]] )\r\n if clicked:\r\n mouse.press()\r\n else:\r\n mouse.release()\r\n mouse.move ( mousePosx , mousePosy)\r\n\r\n @Slot( str )\r\n def ChangeUseStatus(self, text):\r\n self.ui.useStatus.setText(text)\r\n \r\n def convert_cv_qt(self, img):\r\n # Convert from an opencv image to QPixmap\r\n rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n h, w, ch = rgb_image.shape\r\n bytes_per_line = ch * w\r\n convert_to_Qt_format = QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\r\n p = convert_to_Qt_format.scaled(self.display_width, self.display_height, Qt.KeepAspectRatio)\r\n return QPixmap.fromImage(p)\r\n\r\n def closeEvent(self, event):\r\n self.mainThread.quit()\r\n event.accept()\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n \r\n window.show()\r\n sys.exit(app.exec_())\r\n"
] | [
[
"numpy.interp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
e2crawfo/dps | [
"968a87ed8580f58b46e75463d13a5966f4e772eb"
] | [
"dps/train.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nimport time\nfrom contextlib import ExitStack\nimport numpy as np\nfrom pprint import pformat\nimport datetime\nimport os\nimport pandas as pd\nimport dill\nfrom collections import defaultdict\nimport traceback\nimport json\nimport subprocess\nfrom tabulate import tabulate\nimport warnings\n\nfrom dps import cfg, init\nfrom dps.utils import (\n gen_seed, time_limit, Alarm, memory_usage, gpu_memory_usage, ExperimentStore,\n ExperimentDirectory, nvidia_smi, memory_limit, Config, redirect_stream, pretty_func,\n NumpySeed, restart_tensorboard, launch_pdb_on_exception, execute_command, flush_print as _print,\n)\n\n\ndef training_loop(exp_name='', start_time=None):\n init()\n\n framework = cfg.get('framework', 'tensorflow')\n\n if framework == 'tensorflow':\n from dps.tf.train import TensorFlowTrainingLoop\n loop = TensorFlowTrainingLoop(exp_name)\n\n elif framework == 'pytorch':\n from dps.pytorch.train import PyTorchTrainingLoop\n loop = PyTorchTrainingLoop(exp_name)\n\n else:\n raise Exception(\"Unknown framework: {}. Options are {'tensorflow', 'pytorch'}.\".format(cfg.framework))\n\n return loop.run(start_time)\n\n\nclass EarlyStopHook:\n def __init__(self, patience, maximize, start):\n self.patience = patience\n self.maximize = maximize\n self.start = start\n self.reset()\n\n def _check_trigger(self, sc):\n if self._best_stopping_criteria is None:\n return True\n\n if self.maximize:\n return sc > self._best_stopping_criteria\n else:\n return sc < self._best_stopping_criteria\n\n def check(self, stopping_criteria, step, record):\n if self.start is not None and step < self.start:\n # Overwrite `stopping_criteria` if not enough steps have elapsed\n if self.maximize:\n stopping_criteria = -np.inf\n else:\n stopping_criteria = np.inf\n\n new_best = self._check_trigger(stopping_criteria)\n\n if new_best:\n self._best_stopping_criteria = stopping_criteria\n self._best_step = step\n self._best_record = record.copy()\n\n if self.patience > 0:\n stop_current = step - self._best_step > self.patience\n\n if self.start is not None:\n stop_current = stop_current and step > self.start\n\n self._early_stopped = self._early_stopped or stop_current\n\n return new_best, self._early_stopped\n\n @property\n def best(self):\n best = self._best_record.copy()\n best.update(stopping_criteria=self._best_stopping_criteria, local_step=self._best_step)\n return best\n\n def reset(self):\n self._best_stopping_criteria = None\n self._best_record = None\n self._best_step = None\n self._early_stopped = 0\n\n\nclass TrainingLoop:\n \"\"\" A training loop.\n\n The behaviour of the training loop depends on the context stack that is active when it is\n run (i.e. `run` method is called), not the one that is active when it is created.\n\n Parameters\n ----------\n exp_name: str\n Name of the experiment, used as a prefix when creating a directory for storing data\n generated by the training run.\n\n \"\"\"\n def __init__(self, exp_name=''):\n self.exp_name = exp_name or cfg.exp_name\n self.start_time = None\n\n \"\"\" Abstract methods \"\"\"\n\n def framework_initialize(self):\n raise Exception(\"NotImplemented\")\n\n @property\n def time_remaining(self):\n if cfg.max_time is None or cfg.max_time <= 0:\n return np.inf\n else:\n elapsed_time = time.time() - self.start_time\n return cfg.max_time - elapsed_time\n\n def edit_remaining_stage(self, idx, stage_config):\n if len(self.curriculum_remaining) < idx+1:\n for i in range(idx+1 - len(self.curriculum_remaining)):\n self.curriculum_remaining.append(dict())\n\n self.curriculum_remaining[idx].update(stage_config)\n\n def timestamp(self, message):\n if message:\n message = message + \" \"\n\n _print(\"{}({}, {:.2f}s elapsed, {:.2f}s remaining)\".format(\n message,\n datetime.datetime.now(),\n time.time() - self.start_time,\n self.time_remaining))\n\n def get_load_paths(self):\n \"\"\"\n Let a *path_specification* be one of three things:\n 1. An integer specifying a previous stage to load the best hypothesis from.\n 2. A string of format: \"stage_idx,kind\" where `stage_idx` specifies a previous stage to load from\n and `kind` is either \"final\" or \"best\", specifying whether to load final or best\n hypothesis from that stage.\n 3. A path on the filesystem that gives a prefix for a tensorflow checkpoint file to load from.\n\n Then cfg.load_path can either be a path_specification itself, in which case all variables\n in the network will be loaded from that path_specification, or a dictionary mapping from\n variable scope names to path specifications, in which case all variables in each supplied\n variable scope name will be loaded from the path_specification paired with that scope name.\n\n \"\"\"\n load_path = cfg.load_path\n _print(\"\\nMaybe loading weights, load_path={} ...\".format(load_path))\n\n if load_path:\n if isinstance(load_path, str) or isinstance(load_path, int):\n load_path = {\"\": load_path}\n\n load_path = dict(load_path)\n\n # Sort in increasing order, so that it if one variable scope lies within another scope,\n # the outer scope gets loaded before the inner scope, rather than having the outer scope\n # wipe out the inner scope.\n items = sorted(load_path.items())\n\n # --- fill in paths from stages ---\n\n _items = []\n for module_path, path in items:\n load_stage, kind = None, None\n\n try:\n load_stage = int(path)\n kind = \"best\"\n except (TypeError, ValueError):\n try:\n split = path.split(',')\n load_stage = int(split[0])\n kind = 'best' if len(split) > 1 else split[1]\n assert kind in 'best final'.split(), \"path={}\".format(path)\n except Exception:\n load_stage, kind = None, None\n\n if load_stage is not None:\n if self.stage_idx == 0:\n _print(\n \"Not loading submodule \\\"{}\\\" from stage {}, \"\n \"currently in stage 0.\".format(module_path, load_stage))\n continue\n else:\n key = kind + '_path'\n completed_history = self.data.history[:-1]\n path = completed_history[load_stage][key]\n\n path = os.path.realpath(path)\n\n _items.append((module_path, path))\n return _items\n\n else:\n _print(\"`load_path` is null, using a fresh set of weights.\")\n return []\n\n def run(self, start_time):\n \"\"\" Run the training loop.\n\n Parameters\n ----------\n start_time: int\n Start time (in seconds since epoch) for measuring elapsed time for\n purposes of interrupting the training loop.\n\n \"\"\"\n if start_time is None:\n start_time = time.time()\n self.start_time = start_time\n\n self.timestamp(\"Entering TrainingLoop.run\")\n\n # Call prepare_func to modify the config in arbitrary ways before training\n prepare_func = cfg.get(\"prepare_func\", None)\n if callable(prepare_func):\n prepare_funcs = [prepare_func]\n else:\n try:\n prepare_funcs = list(prepare_func)\n except (TypeError, ValueError):\n prepare_funcs = []\n\n for f in prepare_funcs:\n if callable(f):\n _print(\"Calling prepare func {}...\".format(pretty_func(f)))\n f()\n\n self.curriculum = cfg.curriculum + []\n\n if cfg.start_from:\n initial_stage, initial_step = cfg.start_from.split(',')\n cfg.initial_stage = initial_stage = int(initial_stage)\n initial_step = int(initial_step)\n if initial_step != 0:\n self.curriculum[initial_stage]['initial_step'] = initial_step\n\n if cfg.seed is None or cfg.seed < 0:\n cfg.seed = gen_seed()\n\n # Create a directory to store the results of the training run.\n self.experiment_store = ExperimentStore(os.path.join(cfg.local_experiments_dir, cfg.env_name))\n\n filename_keys = cfg.get('filename_keys', [])\n if isinstance(filename_keys, str):\n filename_keys = filename_keys.split(',')\n filename_data = {key: str(cfg[key]) for key in filename_keys if key}\n\n exp_dir = self.experiment_store.new_experiment(\n self.exp_name, cfg.seed, data=filename_data,\n add_date=1, force_fresh=1, update_latest=cfg.update_latest)\n\n self.exp_dir = exp_dir\n cfg.path = exp_dir.path\n\n breaker = \"-\" * 40\n header = \"{}\\nREADME.md - {}\\n{}\\n\\n\\n\".format(breaker, os.path.basename(exp_dir.path), breaker)\n readme = header + (cfg.readme if cfg.readme else \"\") + \"\\n\\n\"\n\n with open(exp_dir.path_for('README.md'), 'w') as f:\n f.write(readme)\n\n self.data = self.training_loop_data_class(exp_dir)\n self.data.setup()\n\n frozen_data = None\n\n with ExitStack() as stack:\n if cfg.pdb:\n stack.enter_context(launch_pdb_on_exception())\n\n _print(\"`pdb` is turned on, so forcing setting robust=False\")\n cfg.robust = False\n\n stack.enter_context(redirect_stream('stdout', self.data.path_for('stdout'), tee=cfg.tee))\n stack.enter_context(redirect_stream('stderr', self.data.path_for('stderr'), tee=cfg.tee))\n\n stack.enter_context(warnings.catch_warnings())\n warnings.simplefilter(cfg.warning_mode)\n\n _print(\"\\n\\n\" + \"=\" * 80)\n self.timestamp(\"Starting training run (name={})\".format(self.exp_name))\n\n _print(\"\\nDirectory for this training run is {}.\".format(exp_dir.path))\n\n stack.enter_context(NumpySeed(cfg.seed))\n _print(\"\\nSet numpy random seed to {}.\\n\".format(cfg.seed))\n\n limiter = time_limit(\n self.time_remaining, verbose=True,\n timeout_callback=lambda limiter: _print(\"Training run exceeded its time limit.\"))\n\n try:\n with limiter:\n self._run()\n\n finally:\n self.data.summarize()\n\n self.timestamp(f\"Done training run (name={self.exp_name})\")\n _print(f\"Experiment lives at {self.exp_dir.path}\")\n _print(\"=\" * 80)\n _print(\"\\n\\n\")\n\n frozen_data = self.data.freeze()\n\n finalize_func = cfg.get(\"finalize_func\", None)\n if callable(finalize_func):\n finalize_funcs = [finalize_func]\n else:\n try:\n finalize_funcs = list(finalize_func)\n except (TypeError, ValueError):\n finalize_funcs = []\n for f in finalize_funcs:\n if callable(f):\n _print(\"Calling finalize func {}...\".format(f.__name__))\n f()\n\n self.timestamp(\"Leaving TrainingLoop.run\")\n\n return frozen_data\n\n def _run(self):\n _print(cfg)\n\n threshold_reached = True\n self.global_step = 0\n self.n_global_experiences = 0\n max_stages = cfg.get('max_stages', 0) or None\n self.curriculum_remaining = self.curriculum[:max_stages] + []\n self.curriculum_complete = []\n\n if cfg.initial_stage is not None:\n if cfg.initial_stage >= 0:\n self.stage_idx = cfg.initial_stage\n else:\n raise Exception(\"Initial stage cannot be negative: {}\".format(cfg.initial_stage))\n self.curriculum_remaining = self.curriculum_remaining[self.stage_idx:]\n else:\n self.stage_idx = 0\n\n while self.curriculum_remaining:\n _print(\"\\n\" + \"=\" * 50)\n self.timestamp(\"Starting stage {}\".format(self.stage_idx))\n _print(\"\\n\")\n\n if cfg.start_tensorboard:\n if cfg.start_tensorboard == \"local\":\n tb_path = self.exp_dir.path\n else:\n try:\n n_latest = int(cfg.start_tensorboard)\n tb_path = self.experiment_store.isolate_n_latest(n_latest)\n except (ValueError, TypeError):\n tb_path = self.experiment_store.path\n\n restart_tensorboard(tb_path, cfg.tbport, cfg.reload_interval)\n\n stage_config = self.curriculum_remaining.pop(0)\n stage_config = Config(stage_config)\n\n self.data.start_stage(self.stage_idx, stage_config)\n\n with ExitStack() as stack:\n\n # --------------- Stage set-up -------------------\n\n _print(\"\\n\" + \"-\" * 10 + \" Stage set-up \" + \"-\" * 10)\n\n _print(\"\\nNew config values for this stage are: \\n{}\\n\".format(stage_config))\n\n stack.enter_context(stage_config)\n\n stage_prepare_func = cfg.get(\"stage_prepare_func\", None)\n if callable(stage_prepare_func):\n stage_prepare_func() # Modify the stage config in arbitrary ways before starting stage\n\n # Set limit on CPU RAM for the stage\n cpu_ram_limit_mb = cfg.get(\"cpu_ram_limit_mb\", None)\n if cpu_ram_limit_mb is not None:\n stack.enter_context(memory_limit(cfg.cpu_ram_limit_mb))\n\n self.framework_initialize_stage(stack)\n\n _print(\"Building env...\\n\")\n\n # Maybe build env\n if self.stage_idx == 0 or not cfg.preserve_env:\n if getattr(self, 'env', None):\n self.env.close()\n self.env = cfg.build_env()\n\n if hasattr(self.env, \"print_memory_footprint\"):\n self.env.print_memory_footprint()\n\n _print(\"\\nDone building env.\\n\")\n _print(\"Building updater...\\n\")\n\n updater = cfg.get_updater(self.env)\n self.updater = updater\n\n updater.stage_idx = self.stage_idx\n updater.exp_dir = self.exp_dir\n\n updater.build_graph()\n _print(\"\\nDone building updater.\\n\")\n\n # --- build hooks ---\n\n for hook in cfg.hooks:\n assert isinstance(hook, Hook)\n hook.start_stage(self, updater, self.stage_idx)\n\n if cfg.render_hook is not None:\n cfg.render_hook.start_stage(self, updater, self.stage_idx)\n\n self.framework_finalize_stage_initialization()\n\n threshold_reached = False\n reason = None\n ran_ok = False\n\n try:\n # --------------- Run stage -------------------\n\n start = time.time()\n phys_memory_before = memory_usage(physical=True)\n gpu_memory_before = gpu_memory_usage()\n\n threshold_reached, reason = self._run_stage(self.stage_idx, updater)\n\n ran_ok = True\n\n except KeyboardInterrupt:\n reason = \"User interrupt\"\n raise\n\n except NotImplementedError as e:\n # There is a bug in pdb_postmortem that prevents instances of `NotImplementedError`\n # from being handled properly, so replace it with an instance of `Exception`.\n if cfg.robust:\n traceback.print_exc()\n reason = \"Exception occurred ({})\".format(repr(e))\n else:\n raise Exception(\"NotImplemented\") from e\n\n except Exception as e:\n reason = \"Exception occurred ({})\".format(repr(e))\n if cfg.robust:\n traceback.print_exc()\n else:\n raise\n\n except Alarm:\n reason = \"Time limit exceeded\"\n raise\n\n finally:\n\n try:\n phys_memory_after = memory_usage(physical=True)\n gpu_memory_after = gpu_memory_usage()\n\n self.data.record_values_for_stage(\n stage_duration=time.time()-start,\n phys_memory_before_mb=phys_memory_before,\n phys_memory_delta_mb=phys_memory_after - phys_memory_before,\n gpu_memory_before_mb=gpu_memory_before,\n gpu_memory_delta_mb=gpu_memory_after - gpu_memory_before\n )\n\n self.data.record_values_for_stage(reason=reason)\n\n _print(\"\\n\" + \"-\" * 10 + \" Optimization complete \" + \"-\" * 10)\n _print(\"\\nReason: {}.\\n\".format(reason))\n\n _print(\"Storing final weights...\")\n weight_start = time.time()\n final_path = self.data.path_for('weights/final_stage_{}'.format(self.stage_idx))\n final_path = cfg.get('save_path', final_path)\n final_path = updater.save(final_path)\n _print(\"Done saving weights, took {} seconds\".format(time.time() - weight_start))\n\n self.data.record_values_for_stage(final_path=final_path)\n\n # --------------- Maybe test and render with best hypothesis -------------------\n\n do_final_testing = (\n \"Exception occurred\" not in reason\n and reason != \"Time limit exceeded\"\n and cfg.get('do_final_testing', True)\n )\n\n if do_final_testing:\n try:\n _print(\"\\n\" + \"-\" * 10 + \" Final testing/rendering \" + \"-\" * 10)\n\n if 'best_path' in self.data.current_stage_record:\n best_path = self.data.current_stage_record['best_path']\n\n _print(\"Best hypothesis for this stage was found on \"\n \"step (l: {best_local_step}, g: {best_global_step}) \"\n \"with stopping criteria ({sc_name}) of {best_stopping_criteria}.\".format(\n sc_name=self.stopping_criteria_name, **self.data.current_stage_record))\n\n _print(\"Loading best hypothesis for this stage \"\n \"from file {}...\".format(best_path))\n updater.restore(best_path)\n else:\n _print(\"No `best_path` found, testing with final weights instead.\")\n\n try:\n test_record = updater.evaluate(cfg.batch_size, self.local_step, mode=\"test\")\n except Exception:\n _print(\"Encountered error running final tests: \")\n traceback.print_exc()\n\n test_record = {}\n\n for hook in cfg.hooks:\n if hook.final:\n hook_record = hook.final_step(self, updater)\n\n if hook_record:\n assert len(hook_record) == 1\n for k, d in dict(hook_record).items():\n test_record.update(d)\n\n self.data.record_values_for_stage(\n **{'test_' + k: v for k, v in test_record.items()})\n\n if cfg.render_final and cfg.render_hook is not None:\n _print(\"Rendering...\")\n cfg.render_hook(updater)\n _print(\"Done rendering.\")\n\n self.data.summarize()\n\n except BaseException:\n _print(\"Exception occurred while performing final testing/rendering: \")\n traceback.print_exc()\n\n else:\n _print(\"\\n\" + \"-\" * 10 + \" Skipping final testing/rendering \" + \"-\" * 10)\n\n # --------------- Finish up the stage -------------------\n\n _print(\"\\n\" + \"-\" * 10 + \" Running end-of-stage hooks \" + \"-\" * 10 + \"\\n\")\n for hook in cfg.hooks:\n hook.end_stage(self, updater, self.stage_idx)\n\n self.data.end_stage(self.local_step)\n\n _print()\n self.timestamp(\"Done stage {}\".format(self.stage_idx))\n _print(\"=\" * 50)\n\n self.stage_idx += 1\n self.curriculum_complete.append(stage_config)\n\n except Exception:\n # If there is already an exception, we want to post-portem as the original exception,\n # not the one caused by finalization.\n if ran_ok:\n raise\n else:\n _print(\"Ignoring exception triggered while finalizing:\")\n traceback.print_exc()\n\n if not (threshold_reached or cfg.power_through):\n _print(\"Failed to reach stopping criteria threshold on stage {} \"\n \"of the curriculum, terminating.\".format(self.stage_idx-1))\n break\n\n def _run_stage(self, stage_idx, updater):\n \"\"\" Run main training loop for a stage of the curriculum. \"\"\"\n\n threshold_reached = False\n reason = \"NotStarted\"\n\n stopping_criteria = cfg.stopping_criteria\n\n if isinstance(stopping_criteria, str):\n stopping_criteria = stopping_criteria.split(\",\")\n\n self.stopping_criteria_name = stopping_criteria[0]\n if \"max\" in stopping_criteria[1]:\n self.maximize_sc = True\n stopping_criteria_value = -np.inf\n elif \"min\" in stopping_criteria[1]:\n self.maximize_sc = False\n stopping_criteria_value = np.inf\n else:\n raise Exception(\"Ambiguous stopping criteria specification: {}\".format(stopping_criteria[1]))\n\n early_stop = EarlyStopHook(\n patience=cfg.patience, maximize=self.maximize_sc, start=cfg.get('patience_start', None))\n\n _print(\"\\n\" + \"-\" * 10 + \" Training begins \" + \"-\" * 10)\n self.timestamp(\"\")\n\n total_hooks_time = 0.0\n time_per_hook = 0.0\n\n total_eval_time = 0.0\n time_per_eval = 0.0\n\n total_train_time = 0.0\n time_per_example = 0.0\n time_per_update = 0.0\n\n n_updates = 0\n n_evals = 0\n if cfg.initial_step is not None and cfg.initial_step > 0:\n self.local_step = cfg.initial_step\n else:\n self.local_step = 0\n\n n_fallbacks = 0\n\n while True:\n local_step = self.local_step\n global_step = self.global_step\n\n # --- check whether to keep training ---\n\n if local_step >= cfg.max_steps:\n reason = \"Maximum number of steps-per-stage reached\"\n break\n\n if updater.n_experiences >= cfg.max_experiences:\n reason = \"Maximum number of experiences-per-stage reached\"\n break\n\n # --- check which steps to run ---\n\n render_step = cfg.eval_step if cfg.render_step <= 0 else cfg.render_step\n display_step = cfg.eval_step if cfg.display_step <= 0 else cfg.display_step\n checkpoint_step = cfg.eval_step if cfg.checkpoint_step <= 0 else cfg.checkpoint_step\n weight_step = cfg.eval_step if cfg.weight_step <= 0 else cfg.weight_step\n backup_step = cfg.eval_step if cfg.backup_step <= 0 else cfg.backup_step\n\n evaluate = local_step % cfg.eval_step == 0 and (local_step > 0 or cfg.get('eval_first', True))\n display = local_step % display_step == 0 and local_step > 0\n render = local_step % render_step == 0 and (local_step > 0 or cfg.render_first)\n checkpoint = local_step % checkpoint_step == 0 and local_step > 0\n save_weights = local_step % weight_step == 0 and local_step > 0\n save_weights_steps = cfg.get('save_weights_steps', [])\n save_weights |= local_step in save_weights_steps\n overwrite_weights = cfg.overwrite_weights and local_step not in save_weights_steps\n backup = local_step % backup_step == 0 and local_step > 0 and cfg.backup_dir\n\n if display or render or evaluate or local_step % 100 == 0:\n _print(\"\\n{} Starting step {} {}\".format(\"-\" * 40, local_step, \"-\" * 40), flush=True)\n self.timestamp(\"\")\n _print(\"\")\n\n data_to_store = []\n\n try:\n updater.step = local_step\n\n # --------------- Run hooks -------------------\n\n hooks_start = time.time()\n\n for hook in cfg.hooks:\n if hook.call_per_timestep:\n run_hook = local_step == 0 and hook.initial\n run_hook |= local_step > 0 and local_step % hook.n == 0\n\n if run_hook:\n hook_record = hook.step(self, updater, local_step)\n\n if hook_record:\n data_to_store.extend(dict(hook_record).items())\n\n hooks_duration = time.time() - hooks_start\n\n if render and cfg.render_hook is not None:\n _print(\"Rendering...\")\n\n start = time.time()\n if cfg.robust:\n try:\n cfg.render_hook(updater)\n except Exception:\n pass\n else:\n cfg.render_hook(updater)\n\n _print(\"Done rendering, took {} seconds.\".format(time.time() - start))\n\n # --------------- Possibly evaluate -------------------\n\n if evaluate:\n _print(\"Evaluating...\")\n eval_start_time = time.time()\n val_record = updater.evaluate(cfg.batch_size, local_step, mode=\"val\")\n eval_duration = time.time() - eval_start_time\n _print(\"Done evaluating, took {} seconds.\".format(eval_duration))\n\n val_record[\"duration\"] = eval_duration\n\n n_evals += 1\n total_eval_time += eval_duration\n time_per_eval = total_eval_time / n_evals\n\n val_record = Config(val_record)\n\n data_to_store.append((\"val\", val_record))\n\n if self.stopping_criteria_name in val_record:\n stopping_criteria_value = val_record[self.stopping_criteria_name]\n else:\n stopping_criteria_names = [\n k for k in val_record.flatten().keys() if k.startswith(self.stopping_criteria_name)]\n\n if len(stopping_criteria_names) == 0:\n _print(\"Stopping criteria {} not in record returned \"\n \"by updater, using 0.0.\".format(self.stopping_criteria_name))\n stopping_criteria_value = 0.0\n\n elif len(stopping_criteria_names) > 1:\n _print(\"stopping_criteria_name `{}` picks out multiple values: {}, using \"\n \"0.0\".format(self.stopping_criteria_name, stopping_criteria_names))\n stopping_criteria_value = 0.0\n else:\n stopping_criteria_value = val_record[stopping_criteria_names[0]]\n\n new_best, stop = early_stop.check(stopping_criteria_value, local_step, val_record)\n\n if new_best:\n _print(\"Storing new best on step (l={}, g={}), \"\n \"constituting (l={}, g={}) experiences, \"\n \"with stopping criteria ({}) of {}.\".format(\n local_step, global_step,\n updater.n_experiences, self.n_global_experiences,\n self.stopping_criteria_name, stopping_criteria_value))\n\n best_path = self.data.path_for('weights/best_stage_{}'.format(stage_idx))\n best_path = cfg.get('save_path', best_path)\n\n weight_start = time.time()\n best_path = updater.save(best_path)\n\n _print(\"Done saving weights, took {} seconds\".format(time.time() - weight_start))\n\n self.data.record_values_for_stage(\n best_path=best_path, best_global_step=global_step)\n self.data.record_values_for_stage(\n **{'best_' + k: v for k, v in early_stop.best.items()})\n\n if stop:\n _print(\"Early stopping triggered.\")\n reason = \"Early stopping triggered\"\n break\n\n threshold = cfg.get('threshold', None)\n if threshold is not None:\n if self.maximize_sc:\n threshold_reached = stopping_criteria_value >= threshold\n else:\n threshold_reached = stopping_criteria_value <= threshold\n\n if threshold_reached:\n reason = \"Stopping criteria threshold reached\"\n break\n\n # --------------- Perform an update -------------------\n\n if cfg.do_train:\n if local_step % 100 == 0:\n _print(\"Running update step {}...\".format(local_step))\n\n update_start_time = time.time()\n\n _old_n_experiences = updater.n_experiences\n\n update_record = updater.update(cfg.batch_size, local_step)\n\n n_updates += 1\n\n update_duration = time.time() - update_start_time\n update_record[\"duration\"] = update_duration\n\n n_experiences_delta = updater.n_experiences - _old_n_experiences\n self.n_global_experiences += n_experiences_delta\n\n total_train_time += update_duration\n time_per_example = total_train_time / updater.n_experiences\n time_per_update = total_train_time / n_updates\n\n total_hooks_time += hooks_duration\n time_per_hook = total_hooks_time / n_updates\n\n if local_step % 100 == 0:\n _print(\"Done update step, took {} seconds.\".format(update_duration))\n _print(\"Average time per update: {} seconds\".format(time_per_update))\n\n start = time.time()\n update_record[\"memory_physical_mb\"] = memory_usage(physical=True)\n update_record[\"memory_virtual_mb\"] = memory_usage(physical=False)\n update_record[\"memory_gpu_mb\"] = gpu_memory_usage()\n _print(\"Memory check duration: {}\".format(time.time() - start))\n\n if evaluate:\n # Only store train data as often as we evaluate, otherwise it's just too much data\n data_to_store.append(('train', update_record))\n\n except Exception as e:\n if not cfg.max_n_fallbacks:\n raise e\n\n traceback.print_exc()\n\n n_fallbacks += 1\n\n if n_fallbacks > cfg.max_n_fallbacks:\n _print(f\"Fell back too many times ({n_fallbacks} times).\")\n raise e\n\n weight_dir = self.data.path_for('weights')\n weight_files = [f for f in os.listdir(weight_dir) if f.startswith(f'checkpoint_stage_{stage_idx}')]\n if not weight_files:\n _print(\"Tried to fall back, but no checkpoint weights were found.\")\n raise e\n weight_file = sorted(weight_files)[-1]\n weight_path = os.path.join(weight_dir, weight_file)\n\n _print(f\"Falling back to checkpoint weights: {weight_path}\")\n\n updater.restore(weight_path)\n\n self.local_step += 1\n self.global_step += 1\n\n continue\n\n # --------------- Store data -------------------\n\n records = defaultdict(dict)\n for mode, r in data_to_store:\n r = Config(r).flatten()\n records[mode].update(r)\n\n self.data.store_step_data_and_summaries(\n stage_idx, local_step, global_step,\n updater.n_experiences, self.n_global_experiences,\n **records)\n\n self.data.record_values_for_stage(\n time_per_example=time_per_example,\n time_per_update=time_per_update,\n time_per_eval=time_per_eval,\n time_per_hook=time_per_hook,\n n_steps=local_step,\n n_experiences=updater.n_experiences,\n )\n\n if display:\n _print(\"Displaying...\")\n self.data.summarize_current_stage(\n local_step, global_step, updater.n_experiences, self.n_global_experiences)\n _print(\"\\nMy PID: {}\\n\".format(os.getpid()))\n _print(\"Physical memory use: {}mb\".format(memory_usage(physical=True)))\n _print(\"Virtual memory use: {}mb\".format(memory_usage(physical=False)))\n\n _print(\"Avg time per update: {}s\".format(time_per_update))\n _print(\"Avg time per eval: {}s\".format(time_per_eval))\n _print(\"Avg time for hooks: {}s\".format(time_per_hook))\n\n if cfg.use_gpu:\n _print(nvidia_smi())\n\n if checkpoint:\n self.data.dump_data(local_step)\n\n if save_weights:\n _print(\"Storing checkpoint weights on step (l={}, g={}), \"\n \"constituting (l={}, g={}) experiences, \"\n \"with stopping criteria ({}) of {}.\".format(\n local_step, global_step,\n updater.n_experiences, self.n_global_experiences,\n self.stopping_criteria_name, stopping_criteria_value))\n\n if overwrite_weights:\n weight_path = self.data.path_for(\n 'weights/checkpoint_stage_{}'.format(stage_idx))\n else:\n weight_path = self.data.path_for(\n 'weights/checkpoint_stage_{}_step_{}'.format(stage_idx, local_step))\n\n weight_start = time.time()\n weight_path = updater.save(weight_path)\n _print(f\"Saved weights to {weight_path}, took {time.time()-weight_start} seconds\")\n\n if backup:\n _print(\"Backing up experiment directory.\")\n _print(\"src: {}\".format(self.exp_dir.path))\n _print(\"dest: {}\".format(cfg.backup_dir))\n\n command = \"rsync -avzu --timeout=300 {src} {dest}\".format(\n src=self.exp_dir.path, dest=cfg.backup_dir,\n )\n execute_command(command, output=\"loud\", robust=True)\n\n self.local_step += 1\n self.global_step += 1\n\n # If `do_train` is False, we do no training and evaluate\n # exactly once, so only one iteration is required.\n if not cfg.do_train:\n reason = \"`do_train` set to False\"\n break\n\n return threshold_reached, reason\n\n\nclass FrozenTrainingLoopData(ExperimentDirectory):\n \"\"\" Interface for the on-disk data generated by a training loop.\n\n Parameters\n ----------\n path: str\n Path to the the directory for the experiment whose data we want to access.\n\n \"\"\"\n def __init__(self, path):\n self.path = path.path if isinstance(path, ExperimentDirectory) else path\n self._config = None\n self._history = None\n\n def get_summary_path(self, mode):\n return self.path_for('summaries/' + mode, is_dir=True)\n\n def get_data_path(self, mode, stage_idx, local_step):\n local_path = f'data/{mode}/stage{stage_idx}/localstep={local_step}.csv'\n return self.path_for(local_path)\n\n def step_data(self, mode, stage_slice=None):\n stage_dirs = sorted(os.listdir(self.path_for('data/{}'.format(mode))))\n indices = [int(s[5:]) for s in stage_dirs]\n\n if stage_slice is None:\n pass\n elif isinstance(stage_slice, int):\n indices = [indices[stage_slice]]\n elif isinstance(stage_slice, slice):\n indices = indices[stage_slice]\n else:\n start, end, *step = stage_slice\n step = step[0] if step else 1\n indices = indices[start:end:step]\n\n data = {}\n\n for stage_idx in indices:\n local_path = 'data/{}/stage{}'.format(mode, stage_idx)\n path = self.path_for(local_path)\n files = os.listdir(path) if os.path.isdir(path) else []\n for f in files:\n local_step = float(f.split('=')[1].split('.')[0]) # Filename created by `get_data_path`\n data[(stage_idx, local_step)] = pd.read_csv(os.path.join(path, f))\n\n data_frames = [df for _, df in sorted(data.items())]\n if data_frames:\n return pd.concat(data_frames, axis=0, ignore_index=True)\n else:\n return None\n\n @property\n def config(self):\n if self._config is None:\n try:\n with open(self.path_for('config.pkl'), 'rb') as f:\n self._config = dill.load(f)\n except Exception:\n pass\n else:\n return self._config\n\n try:\n with open(self.path_for('config.json'), 'r') as f:\n self._config = json.load(f)\n except Exception:\n pass\n else:\n return self._config\n\n return self._config\n\n def get_config_value(self, key):\n if self.config is None:\n # A temporary hack to deal with version inconsistencies\n command = \"grep \\\"'{}':\\\" < {}\".format(key, self.path_for(\"config.txt\"))\n p = subprocess.run(command, shell=True, stdout=subprocess.PIPE)\n\n # Get the line with the least amount of indentation.\n lines = p.stdout.decode().split('\\n')\n lines = [l for l in lines if l.strip()]\n indentations = []\n for line in lines:\n n_leading_spaces = 0\n for c in line:\n if c.isspace():\n n_leading_spaces += 1\n else:\n break\n indentations.append((n_leading_spaces, line))\n smallest_indent = min(indentations)\n with_smallest = [(s, l) for s, l in indentations if s == smallest_indent[0]]\n assert len(with_smallest) == 1, with_smallest\n line = with_smallest[0][1]\n\n left, right = line.split(':')\n right = right.strip()[:-1]\n right = eval(right)\n return right\n else:\n return self.config[key]\n\n @property\n def n_stages(self):\n return len(self.history)\n\n @property\n def history(self):\n if self._history is None:\n try:\n with open(self.path_for('history.json'), 'r') as f:\n self._history = json.load(f)\n except Exception:\n with open(self.path_for('history.pkl'), 'rb') as f:\n self._history = dill.load(f)\n return self._history\n\n @property\n def modes(self):\n return os.listdir(self.path_for('summaries'))\n\n\nclass TrainingLoopData(FrozenTrainingLoopData):\n \"\"\" Data structure used by a TrainingLoop to manage data throughout the experiment. \"\"\"\n\n def setup(self):\n # Record training session environment for later diagnostic purposes\n frozen_config = cfg.freeze()\n git_mode = cfg.get('git_record_mode', 'all')\n self.record_environment(config=frozen_config, dill_recurse=True, git_mode=git_mode)\n self.curriculum = []\n\n self.make_directory('weights')\n self.make_directory('plots')\n self.make_directory('data')\n self.make_directory('summaries')\n\n self._history = []\n\n self.data = defaultdict(list)\n\n self.stage_idx = -1\n\n self.writers = {}\n\n @property\n def history(self):\n return self._history\n\n def start_stage(self, stage_idx, stage_config):\n self.history.append(dict(stage_idx=stage_idx, stage_config=stage_config))\n self.stage_idx = stage_idx\n self.writers = {}\n\n def end_stage(self, local_step=None):\n self.dump_data(local_step)\n\n for w in self.writers.values():\n w.close()\n\n def dump_data(self, local_step):\n if local_step is None:\n local_step = float(\"inf\") # Final dump for a stage\n\n for mode, data in self.data.items():\n if data:\n path = self.get_data_path(mode, self.stage_idx, local_step)\n\n with open(path, 'w') as f:\n pd.DataFrame.from_records(data).to_csv(f, index=False)\n\n self.data[mode] = []\n\n def record_values_for_stage(self, d=None, **kwargs):\n \"\"\" Record values for the current stage. \"\"\"\n d = d or {}\n self.current_stage_record.update(d)\n self.current_stage_record.update(kwargs)\n\n def store_scalar_summaries(self, mode, path, record, n_global_experiences):\n raise Exception(\"NotImplemented\")\n\n def store_step_data_and_summaries(\n self, stage_idx, local_step, global_step, n_local_experiences, n_global_experiences, **data):\n\n for mode, record in data.items():\n if not record:\n continue\n\n if getattr(cfg, 'store_step_data', True):\n record = record.copy()\n record.update(\n stage_idx=stage_idx,\n local_step=local_step,\n global_step=global_step,\n n_local_experiences=n_local_experiences,\n n_global_experiences=n_global_experiences)\n\n self.data[mode].append(record)\n\n path = self.get_summary_path(mode)\n self.store_scalar_summaries(mode, path, record, n_global_experiences)\n\n @property\n def current_stage_record(self):\n return self.history[-1]\n\n def _finalize(self):\n \"\"\" Write all stored data to disk. \"\"\"\n self.dump_data(None)\n\n with open(self.path_for('history.json'), 'w') as f:\n json.dump(self.history, f, default=str, indent=4, sort_keys=True)\n\n def freeze(self):\n self._finalize()\n return FrozenTrainingLoopData(self.path)\n\n def summarize_current_stage(self, local_step, global_step, n_local_experiences, n_global_experiences):\n stage_idx = self.current_stage_record['stage_idx']\n\n print(\"\\n{} Summary: Stage={}, Step(l={}, g={}), Experiences(l={}, g={}) {}\\n\".format(\n \"*\" * 20, stage_idx, local_step, global_step,\n n_local_experiences, n_global_experiences, \"*\" * 20))\n\n data = defaultdict(dict)\n\n for k, v in sorted(self.current_stage_record.items()):\n if isinstance(v, dict):\n v = \"\\n\" + pformat(v, indent=2)\n print(\"* {}: {}\".format(k, v))\n elif k.endswith(\"_path\") or not k.startswith(\"best_\"):\n print(\"* {}: {}\".format(k, v))\n else:\n data[k[5:]]['best'] = v\n\n for mode, mode_data in sorted(self.data.items()):\n if mode_data:\n record = mode_data[-1] or {}\n for k, v in sorted(record.items()):\n if isinstance(v, dict):\n v = \"\\n\" + pformat(v, indent=2)\n print(\"* {}_{}: {}\".format(mode, k, v))\n else:\n data[k][mode] = v\n\n headers = [\"key\", \"best\"] + sorted(self.data)\n table = [\n [key] + [row.get(k, None) for k in headers[1:]]\n for key, row in sorted(data.items())]\n\n print(tabulate(table, headers=headers, tablefmt=\"psql\"))\n\n def summarize(self):\n \"\"\" Summarize the training data.\n\n Parameters\n ----------\n steps: quadtuple of ints\n local_step, global_step, local_experience, global_experiences\n\n \"\"\"\n print(\"\\n\" + \"-\" * 30 + \" Stage-by-Stage Summary \" + \"-\" * 30 + \"\\n\")\n\n table = defaultdict(dict)\n\n for record in self.history:\n stage_idx = record['stage_idx']\n print(\"\\n\" + \"-\" * 20 + \" Stage {} \".format(stage_idx) + \"-\" * 20)\n\n record = Config(record).flatten()\n\n for k, v in sorted(record.items()):\n if isinstance(v, dict):\n v = \"\\n\" + pformat(v, indent=2)\n print(\"* {}: {}\".format(k, v))\n elif isinstance(v, str) and len(v) > 20:\n print(\"* {}: {}\".format(k, v))\n else:\n table[k][stage_idx] = v\n\n headers = [\"key\"] + list(range(len(self.history)))\n table = [\n [key] + [row.get(k, None) for k in headers[1:]]\n for key, row in sorted(table.items())]\n\n print()\n print(tabulate(table, headers=headers, tablefmt=\"psql\"))\n print()\n\n\nclass Hook:\n \"\"\" Hook called throughout training.\n\n Parameters\n ----------\n n: int\n Hook is called every n steps throughout training.\n initial: bool\n If True, this hook is called on the first step of a stage.\n final: bool\n If True, this hook is called at the end of stage, after loading\n the best hypothesis.\n\n \"\"\"\n def __init__(self, n=None, initial=False, final=False):\n self.n = n\n self.initial = initial\n self.final = final\n\n @property\n def call_per_timestep(self):\n return bool(self.n)\n\n def _attrs(self):\n return \"n initial final\".split()\n\n def __str__(self):\n attr_string = \", \".join(\n \"{}={}\".format(k, getattr(self, k)) for k in self._attrs())\n return(\"{}({})\".format(self.__class__.__name__, attr_string))\n\n def __repr__(self):\n return str(self)\n\n def start_stage(self, training_loop, updater, stage_idx):\n \"\"\" Called at the beginning of every stage. \"\"\"\n pass\n\n def end_stage(self, training_loop, updater, stage_idx):\n \"\"\" Called at the end of every stage, after best hypothesis has been reloaded. \"\"\"\n pass\n\n def step(self, training_loop, updater, step_idx):\n pass\n\n def final_step(self, training_loop, updater):\n \"\"\" Called during final testing for a stage. \"\"\"\n pass\n\n def _print(self, s):\n print(\"{}: {}\".format(self.__class__.__name__, s))\n"
] | [
[
"pandas.DataFrame.from_records",
"pandas.concat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
wilsonify/sktime | [
"68395d44bd3f46b0801c506e23e889dd54999d29"
] | [
"examples/scripts/dictionary_based_classification.py"
] | [
"# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# # Dictionary based time series classification in sktime\n#\n# Dictionary based approaches adapt the bag of words model commonly used in signal processing, computer vision and audio processing for time series classification.\n# Dictionary based classifiers have the same broad structure.\n# A sliding window of length $w$ is run across a series.\n# For each window, the real valued series of length $w$ is converted through approximation and discretisation processes into a symbolic string of length $l$, which consists of $\\alpha$ possible letters.\n# The occurrence in a series of each 'word' from the dictionary defined by $l$ and $\\alpha$ is counted, and once the sliding window has completed the series is transformed into a histogram.\n# Classification is based on the histograms of the words extracted from the series, rather than the raw data.\n#\n# Currently 4 univeriate dictionary based classifiers are implemented in sktime, all making use of the Symbolic Fourier Approximation (SFA)\\[1\\] transform to discretise into words.\n# These are the Bag of SFA Symbols (BOSS)\\[2\\], the Contractable Bag of SFA Symbols (cBOSS)\\[3\\], Word Extraction for Time Series Classification (WEASEL)\\[4\\] and the Temporal Dictionary Ensemble (TDE)\\[5\\]. WEASEL has a multivariate extension called MUSE\\[7\\] and TDE has multivariate capabilities.\n#\n# In this notebook, we will demonstrate how to use BOSS, cBOSS, WEASEL and TDE on the ItalyPowerDemand and JapaneseVowels datasets.\n#\n# #### References:\n#\n# \\[1\\] Schäfer, P., & Högqvist, M. (2012). SFA: a symbolic fourier approximation and index for similarity search in high dimensional datasets. In Proceedings of the 15th International Conference on Extending Database Technology (pp. 516-527).\n#\n# \\[2\\] Schäfer, P. (2015). The BOSS is concerned with time series classification in the presence of noise. Data Mining and Knowledge Discovery, 29(6), 1505-1530.\n#\n# \\[3\\] Middlehurst, M., Vickers, W., & Bagnall, A. (2019). Scalable dictionary classifiers for time series classification. In International Conference on Intelligent Data Engineering and Automated Learning (pp. 11-19). Springer, Cham.\n#\n# \\[4\\] Schäfer, P., & Leser, U. (2017). Fast and accurate time series classification with WEASEL. In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management (pp. 637-646).\n#\n# \\[5\\] Middlehurst, M., Large, J., Cawley, G., & Bagnall, A. (2020). The Temporal Dictionary Ensemble (TDE) Classifier for Time Series Classification. In The European Conference on Machine Learning and Principles and Practice of Knowledge Discovery in Databases.\n#\n# \\[6\\] Large, J., Bagnall, A., Malinowski, S., & Tavenard, R. (2019). On time series classification with dictionary-based classifiers. Intelligent Data Analysis, 23(5), 1073-1089.\n#\n# \\[7\\] Schäfer, P., & Leser, U. (2018). Multivariate time series classification with WEASEL+MUSE. 3rd ECML/PKDD Workshop on AALTD.\n#\n# ## 1. Imports\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\nfrom sklearn import metrics\n\nfrom sktime.classification.dictionary_based import (\n MUSE,\n WEASEL,\n BOSSEnsemble,\n ContractableBOSS,\n TemporalDictionaryEnsemble,\n)\nfrom sktime.datasets import load_italy_power_demand\nfrom sktime.datasets.base import load_japanese_vowels\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## 2. Load data\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\nX_train, y_train = load_italy_power_demand(split=\"train\", return_X_y=True)\nX_test, y_test = load_italy_power_demand(split=\"test\", return_X_y=True)\nX_test = X_test[:50]\ny_test = y_test[:50]\n\nprint(X_train.shape, y_train.shape, X_test.shape, y_test.shape)\n\nX_train_mv, y_train_mv = load_japanese_vowels(split=\"train\", return_X_y=True)\nX_test_mv, y_test_mv = load_japanese_vowels(split=\"test\", return_X_y=True)\n\nX_train_mv = X_train_mv[:50]\ny_train_mv = y_train_mv[:50]\nX_test_mv = X_test_mv[:50]\ny_test_mv = y_test_mv[:50]\n\nprint(X_train_mv.shape, y_train_mv.shape, X_test_mv.shape, y_test_mv.shape)\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## 3. Bag of SFA Symbols (BOSS)\n#\n# BOSS is an ensemble of individual BOSS classifiers making use of the SFA transform.\n# The classifier performs grid-search through a large number of individual classifiers for parameters $l$, $\\alpha$, $w$ and $p$ (normalise each window).\n# Of the classifiers searched only those within 92\\% accuracy of the best classifier are retained.\n# Individual BOSS classifiers use a non-symmetric distance function, BOSS distance, in conjunction with a nearest neighbour classifier.\n#\n# As tuning is handled inside the classifier, BOSS has very little parameters to be altered and generally should be run using default settings.\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\nboss = BOSSEnsemble(random_state=47)\nboss.fit(X_train, y_train)\n\nboss_preds = boss.predict(X_test)\nprint(\"BOSS Accuracy: \" + str(metrics.accuracy_score(y_test, boss_preds)))\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## 4. Contractable BOSS (cBOSS)\n#\n# cBOSS significantly speeds up BOSS with no significant difference in accuracy by improving how the ensemble is formed.\n# cBOSS utilises a filtered random selection of parameters to find its ensemble members.\n# Each ensemble member is built on a 70% subsample of the train data, using random sampling without replacement.\n# An exponential weighting scheme for the predictions of the base classifiers is introduced.\n#\n# A new parameter for the number of parameters samples $k$ is introduced. of which the top $s$ (max ensemble size) with the highest accuracy are kept for the final ensemble.\n# The $k$ parameter is replaceable with a time limit $t$ through contracting.\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\n# Recommended non-contract cBOSS parameters\ncboss = ContractableBOSS(n_parameter_samples=250, max_ensemble_size=50, random_state=47)\n\n# cBOSS with a 5 minute build time contract\n# cboss = ContractableBOSS(time_limit=5,\n# max_ensemble_size=50,\n# random_state=47)\n\ncboss.fit(X_train, y_train)\n\ncboss_preds = cboss.predict(X_test)\nprint(\"cBOSS Accuracy: \" + str(metrics.accuracy_score(y_test, cboss_preds)))\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ## 5. Word Extraction for Time Series Classification (WEASEL)\n#\n# ### Univariate\n#\n# WEASEL transforms time series into feature vectors, using a sliding-window approach, which are then analyzed through a machine learning classifier. The novelty of WEASEL lies in its specific method for deriving features, resulting in a much smaller yet much more discriminative feature set than BOSS. It extends SFA by bigrams, feature selection using Anova-f-test and Information Gain Binning (IGB).\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\nweasel = WEASEL(binning_strategy=\"equi-depth\", anova=False, random_state=47)\nweasel.fit(X_train, y_train)\n\nweasel_preds = weasel.predict(X_test)\nprint(\"WEASEL Accuracy: \" + str(metrics.accuracy_score(y_test, weasel_preds)))\n# -\n\n# ### Multivariate\n#\n# WEASEL+MUSE (Multivariate Symbolic Extension) is the multivariate extension of WEASEL.\n\n# + pycharm={\"name\": \"#%%\\n\"}\nmuse = MUSE()\nmuse.fit(X_train_mv, y_train_mv)\n\nmuse_preds = muse.predict(X_test_mv)\nprint(\"MUSE Accuracy: \" + str(metrics.accuracy_score(y_test_mv, muse_preds)))\n# -\n\n# ## 6. Temporal Dictionary Ensemble (TDE)\n#\n# ### Univariate\n#\n# TDE aggregates the best components of 3 classifiers extending from the original BOSS algorithm. The ensemble structure and improvements of cBOSS\\[3\\] are used; Spatial pyramids are introduced from Spatial BOSS (S-BOSS)\\[6\\]; From Word Extraction for Time Series Classification (WEASEL)\\[4\\] bigrams and Information Gain Binning (IGB), a replacement for the multiple coefficient binning (MCB) used by SFA, are included.\n# Two new parameters are included in the ensemble parameter search, the number of spatial pyramid levels $h$ and whether to use IGB or MCB $b$.\n# A Gaussian processes regressor is used to select new parameter sets to evaluate for the ensemble, predicting the accuracy of a set of parameter values using past classifier performances.\n#\n# Inheriting the cBOSS ensemble structure, the number of parameter samples $k$, time limit $t$ and max ensemble size $s$ remain as parameters to be set accounting for memory and time requirements.\n\n# + pycharm={\"name\": \"#%%\\n\"} tags=[]\n# Recommended non-contract TDE parameters\ntde_u = TemporalDictionaryEnsemble(\n n_parameter_samples=250,\n max_ensemble_size=50,\n randomly_selected_params=50,\n random_state=47,\n)\n\n# TDE with a 5 minute build time contract\n# tde = TemporalDictionaryEnsemble(time_limit=5,\n# max_ensemble_size=50,\n# randomly_selected_params=50,\n# random_state=47)\n\ntde_u.fit(X_train, y_train)\n\ntde_u_preds = tde_u.predict(X_test)\nprint(\"TDE Accuracy: \" + str(metrics.accuracy_score(y_test, tde_u_preds)))\n\n# + [markdown] pycharm={\"name\": \"#%% md\\n\"}\n# ### Multivariate\n\n# + pycharm={\"name\": \"#%%\\n\"}\n# Recommended non-contract TDE parameters\ntde_m = TemporalDictionaryEnsemble(\n n_parameter_samples=250,\n max_ensemble_size=50,\n randomly_selected_params=50,\n random_state=47,\n)\n\n# TDE with a 5 minute build time contract\n# tde_m = TemporalDictionaryEnsemble(time_limit=5,\n# max_ensemble_size=50,\n# randomly_selected_params=50,\n# random_state=47)\n\ntde_m.fit(X_train_mv, y_train_mv)\n\ntde_m_preds = tde_m.predict(X_test_mv)\nprint(\"TDE Accuracy: \" + str(metrics.accuracy_score(y_test_mv, tde_m_preds)))\n# -\n\n\n"
] | [
[
"sklearn.metrics.accuracy_score"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
violet-zct/fairseq | [
"5fd9b555428f004f72d4fe89e2a9d2c863c07581",
"5fd9b555428f004f72d4fe89e2a9d2c863c07581"
] | [
"fairseq/modules/typed_transformer_layer.py",
"fairseq/data/monolingual_dataset.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom fairseq import utils\nfrom fairseq.modules import LayerNorm, MultiheadAttention\n\n\nclass TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer block.\n\n In the original paper each operation (multi-head attention or FFN) is\n postprocessed with: `dropout -> add residual -> layernorm`. In the\n tensor2tensor code they suggest that learning is more robust when\n preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.encoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n \"\"\"\n\n def __init__(self, encoder_embed_dim, encoder_attention_heads,\n encoder_ffn_embed_dim, args):\n super().__init__()\n self.embed_dim = encoder_embed_dim\n self.self_attn = MultiheadAttention(\n self.embed_dim, encoder_attention_heads,\n dropout= args.attention_dropout, self_attention=True\n )\n self.self_attn_layer_norm = LayerNorm(self.embed_dim)\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu')\n )\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.encoder_normalize_before\n self.fc1 = Linear(self.embed_dim, encoder_ffn_embed_dim)\n self.fc2 = Linear(encoder_ffn_embed_dim, self.embed_dim)\n self.final_layer_norm = LayerNorm(self.embed_dim)\n\n def upgrade_state_dict_named(self, state_dict, name):\n \"\"\"\n Rename layer norm states from `...layer_norms.0.weight` to\n `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to\n `...final_layer_norm.weight`\n \"\"\"\n layer_norm_map = {\n '0': 'self_attn_layer_norm',\n '1': 'final_layer_norm'\n }\n for old, new in layer_norm_map.items():\n for m in ('weight', 'bias'):\n k = '{}.layer_norms.{}.{}'.format(name, old, m)\n if k in state_dict:\n state_dict[\n '{}.{}.{}'.format(name, new, m)\n ] = state_dict[k]\n del state_dict[k]\n\n def forward(self, x, encoder_padding_mask, attn_mask=None):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor): binary ByteTensor of shape\n `(batch, src_len)` where padding elements are indicated by ``1``.\n attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where\n T_tgt is the length of query, while T_src is the length of key,\n though here both query and key is x here,\n attn_mask[t_tgt, t_src] = 1 means when calculating embedding\n for t_tgt, t_src is excluded (or masked out), =0 means it is\n included in attention\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n if attn_mask is not None:\n attn_mask = attn_mask.masked_fill(attn_mask.bool(), -1e8)\n # anything in original attn_mask = 1, becomes -1e8\n # anything in original attn_mask = 0, becomes 0\n # Note that we cannot use -inf here, because at some edge cases,\n # the attention weight (before softmax) for some padded element in query\n # will become -inf, which results in NaN in model parameters\n # TODO: to formally solve this problem, we need to change fairseq's\n # MultiheadAttention. We will do this later on.\n x, _ = self.self_attn(query=x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n return x\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n\nclass TransformerDecoderLayer(nn.Module):\n \"\"\"Decoder layer block.\n\n In the original paper each operation (multi-head attention, encoder\n attention or FFN) is postprocessed with: `dropout -> add residual ->\n layernorm`. In the tensor2tensor code they suggest that learning is more\n robust when preprocessing each layer with layernorm and postprocessing with:\n `dropout -> add residual`. We default to the approach in the paper, but the\n tensor2tensor approach can be enabled by setting\n *args.decoder_normalize_before* to ``True``.\n\n Args:\n args (argparse.Namespace): parsed command-line arguments\n no_encoder_attn (bool, optional): whether to attend to encoder outputs\n (default: False).\n \"\"\"\n\n def __init__(self, args, decoder_embed_dim, decoder_attention_heads, decoder_ffn_embed_dim,\n encoder_embed_dim, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False,):\n\n super().__init__()\n self.embed_dim = decoder_embed_dim\n self.cross_self_attention = getattr(args, 'cross_self_attention', False)\n self.self_attn = MultiheadAttention(\n embed_dim=self.embed_dim,\n num_heads= decoder_attention_heads,\n dropout=args.attention_dropout,\n add_bias_kv=add_bias_kv,\n add_zero_attn=add_zero_attn,\n self_attention=not self.cross_self_attention,\n )\n self.dropout = args.dropout\n self.activation_fn = utils.get_activation_fn(\n activation=getattr(args, 'activation_fn', 'relu')\n )\n self.activation_dropout = getattr(args, 'activation_dropout', 0)\n if self.activation_dropout == 0:\n # for backwards compatibility with models that use args.relu_dropout\n self.activation_dropout = getattr(args, 'relu_dropout', 0)\n self.normalize_before = args.decoder_normalize_before\n\n # use layerNorm rather than FusedLayerNorm for exporting.\n # char_inputs can be used to determint this.\n # TODO remove this once we update apex with the fix\n export = getattr(args, 'char_inputs', False)\n self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n if no_encoder_attn:\n self.encoder_attn = None\n self.encoder_attn_layer_norm = None\n else:\n self.encoder_attn = MultiheadAttention(\n self.embed_dim,\n decoder_attention_heads,\n kdim=encoder_embed_dim,\n vdim=encoder_embed_dim,\n dropout=args.attention_dropout,\n encoder_decoder_attention=True,\n )\n self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)\n\n self.fc1 = Linear(self.embed_dim, decoder_ffn_embed_dim)\n self.fc2 = Linear(decoder_ffn_embed_dim, self.embed_dim)\n\n self.final_layer_norm = LayerNorm(self.embed_dim, export=export)\n self.need_attn = True\n\n self.onnx_trace = False\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def forward(\n self,\n x,\n encoder_out=None,\n encoder_padding_mask=None,\n incremental_state=None,\n prev_self_attn_state=None,\n prev_attn_state=None,\n self_attn_mask=None,\n self_attn_padding_mask=None,\n need_attn=False,\n need_head_weights=False,\n cur_tgt_pos=None,\n global_vector=None,\n ):\n \"\"\"\n Args:\n x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`\n encoder_padding_mask (ByteTensor, optional): binary\n ByteTensor of shape `(batch, src_len)` where padding\n elements are indicated by ``1``.\n need_attn (bool, optional): return attention weights\n need_head_weights (bool, optional): return attention weights\n for each head (default: return average over heads).\n\n Returns:\n encoded output of shape `(seq_len, batch, embed_dim)`\n \"\"\"\n if need_head_weights:\n need_attn = True\n residual = x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, before=True)\n if prev_self_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_self_attn_state[:2]\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n if len(prev_self_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_self_attn_state[2]\n self.self_attn._set_input_buffer(incremental_state, saved_state)\n\n if self.cross_self_attention and not (incremental_state is not None and \"prev_key\" in self.self_attn._get_input_buffer(incremental_state)):\n if self_attn_mask is not None:\n self_attn_mask = torch.cat((x.new(x.size(0), encoder_out.size(0)).zero_(), self_attn_mask), dim=1)\n if self_attn_padding_mask is not None:\n if encoder_padding_mask is None:\n encoder_padding_mask = self_attn_padding_mask.new(encoder_out.size(1), encoder_out.size(0)).zero_()\n self_attn_padding_mask = torch.cat((encoder_padding_mask, self_attn_padding_mask), dim=1)\n y = torch.cat((encoder_out, x), dim=0)\n else:\n y = x\n\n x, attn = self.self_attn(\n query=x if global_vector is None else x + global_vector,\n key=y,\n value=y,\n key_padding_mask=self_attn_padding_mask,\n incremental_state=incremental_state,\n need_weights=False,\n attn_mask=self_attn_mask,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.self_attn_layer_norm, x, after=True)\n\n if self.encoder_attn is not None:\n residual = x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, before=True)\n if prev_attn_state is not None:\n if incremental_state is None:\n incremental_state = {}\n prev_key, prev_value = prev_attn_state[:2]\n saved_state = {\"prev_key\": prev_key, \"prev_value\": prev_value}\n if len(prev_attn_state) >= 3:\n saved_state[\"prev_key_padding_mask\"] = prev_attn_state[2]\n self.encoder_attn._set_input_buffer(incremental_state, saved_state)\n\n x, attn = self.encoder_attn(\n query=x if global_vector is None else x + global_vector,\n key=encoder_out,\n value=encoder_out,\n key_padding_mask=encoder_padding_mask,\n incremental_state=incremental_state,\n static_kv=True,\n need_weights=need_attn or (not self.training and self.need_attn),\n need_head_weights=need_head_weights,\n cur_tgt_pos=cur_tgt_pos,\n )\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.encoder_attn_layer_norm, x, after=True)\n\n residual = x\n x = self.maybe_layer_norm(self.final_layer_norm, x, before=True)\n x = self.activation_fn(self.fc1(x))\n x = F.dropout(x, p=self.activation_dropout, training=self.training)\n x = self.fc2(x)\n x = F.dropout(x, p=self.dropout, training=self.training)\n x = residual + x\n x = self.maybe_layer_norm(self.final_layer_norm, x, after=True)\n if self.onnx_trace and incremental_state is not None:\n saved_state = self.self_attn._get_input_buffer(incremental_state)\n if self_attn_padding_mask is not None:\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"], saved_state[\"prev_key_padding_mask\"]\n else:\n self_attn_state = saved_state[\"prev_key\"], saved_state[\"prev_value\"]\n return x, attn, self_attn_state\n return x, attn\n\n def maybe_layer_norm(self, layer_norm, x, before=False, after=False):\n assert before ^ after\n if after ^ self.normalize_before:\n return layer_norm(x)\n else:\n return x\n\n def make_generation_fast_(self, need_attn=False, **kwargs):\n self.need_attn = need_attn\n\n\ndef Linear(in_features, out_features, bias=True):\n m = nn.Linear(in_features, out_features, bias)\n nn.init.xavier_uniform_(m.weight)\n if bias:\n nn.init.constant_(m.bias, 0.)\n return m\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\n\nfrom . import data_utils, FairseqDataset\nimport math\n\ndef collate(samples, pad_idx, eos_idx):\n if len(samples) == 0:\n return {}\n\n def merge(key, is_list=False):\n if is_list:\n res = []\n for i in range(len(samples[0][key])):\n res.append(data_utils.collate_tokens(\n [s[key][i] for s in samples], pad_idx, eos_idx, left_pad=False,\n ))\n return res\n else:\n return data_utils.collate_tokens(\n [s[key] for s in samples], pad_idx, eos_idx, left_pad=False,\n )\n\n src_tokens = merge('source')\n if samples[0]['target'] is not None:\n is_target_list = isinstance(samples[0]['target'], list)\n target = merge('target', is_target_list)\n else:\n target = src_tokens\n\n return {\n 'id': torch.LongTensor([s['id'] for s in samples]),\n 'nsentences': len(samples),\n 'ntokens': sum(len(s['source']) for s in samples),\n 'net_input': {\n 'src_tokens': src_tokens,\n 'src_lengths': torch.LongTensor([\n s['source'].numel() for s in samples\n ]),\n },\n 'target': target,\n }\n\n\nclass MonolingualDataset(FairseqDataset):\n \"\"\"\n A wrapper around torch.utils.data.Dataset for monolingual data.\n\n Args:\n dataset (torch.utils.data.Dataset): dataset to wrap\n sizes (List[int]): sentence lengths\n vocab (~fairseq.data.Dictionary): vocabulary\n shuffle (bool, optional): shuffle the elements before batching\n (default: True).\n \"\"\"\n\n def __init__(self, dataset, sizes, src_vocab, tgt_vocab, add_eos_for_other_targets, shuffle,\n targets=None, add_bos_token=False):\n self.dataset = dataset\n self.sizes = np.array(sizes)\n self.vocab = src_vocab\n self.tgt_vocab = tgt_vocab\n self.add_eos_for_other_targets = add_eos_for_other_targets\n self.shuffle = shuffle\n self.add_bos_token = add_bos_token\n\n assert targets is None or all(t in {'self', 'future', 'past'} for t in targets), \\\n \"targets must be none or one of 'self', 'future', 'past'\"\n if targets is not None and len(targets) == 0:\n targets = None\n self.targets = targets\n\n def __getitem__(self, index):\n if self.targets is not None:\n # *future_target* is the original sentence\n # *source* is shifted right by 1 (maybe left-padded with eos)\n # *past_target* is shifted right by 2 (left-padded as needed)\n #\n # Left-to-right language models should condition on *source* and\n # predict *future_target*.\n # Right-to-left language models should condition on *source* and\n # predict *past_target*.\n source, future_target, past_target = self.dataset[index]\n source, target = self._make_source_target(source, future_target, past_target)\n else:\n source = self.dataset[index]\n target = None\n source, target = self._maybe_add_bos(source, target)\n return {'id': index, 'source': source, 'target': target}\n\n def __len__(self):\n return len(self.dataset)\n\n def _make_source_target(self, source, future_target, past_target):\n if self.targets is not None:\n target = []\n\n if self.add_eos_for_other_targets and (('self' in self.targets) or ('past' in self.targets)) \\\n and source[-1] != self.vocab.eos():\n # append eos at the end of source\n source = torch.cat([source, source.new([self.vocab.eos()])])\n\n if 'future' in self.targets:\n future_target = torch.cat([future_target, future_target.new([self.vocab.pad()])])\n if 'past' in self.targets:\n # first token is before the start of sentence which is only used in \"none\" break mode when\n # add_eos_for_other_targets is False\n past_target = torch.cat([past_target.new([self.vocab.pad()]), past_target[1:], source[-2, None]])\n\n for t in self.targets:\n if t == 'self':\n target.append(source)\n elif t == 'future':\n target.append(future_target)\n elif t == 'past':\n target.append(past_target)\n else:\n raise Exception('invalid target ' + t)\n\n if len(target) == 1:\n target = target[0]\n else:\n target = future_target\n\n return source, self._filter_vocab(target)\n\n def _maybe_add_bos(self, source, target):\n if self.add_bos_token:\n source = torch.cat([source.new([self.vocab.bos()]), source])\n if target is not None:\n target = torch.cat([target.new([self.tgt_vocab.bos()]), target])\n return source, target\n\n def _filter_vocab(self, target):\n if len(self.tgt_vocab) != len(self.vocab):\n def _filter(target):\n mask = target.ge(len(self.tgt_vocab))\n if mask.any():\n target[mask] = self.tgt_vocab.unk()\n return target\n\n if isinstance(target, list):\n return [_filter(t) for t in target]\n return _filter(target)\n return target\n\n def collater(self, samples):\n \"\"\"Merge a list of samples to form a mini-batch.\n\n Args:\n samples (List[dict]): samples to collate\n\n Returns:\n dict: a mini-batch with the following keys:\n\n - `id` (LongTensor): example IDs in the original input order\n - `ntokens` (int): total number of tokens in the batch\n - `net_input` (dict): the input to the Model, containing keys:\n\n - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in\n the source sentence of shape `(bsz, src_len)`. Padding will\n appear on the right.\n\n - `target` (LongTensor): a padded 2D Tensor of tokens in the\n target sentence of shape `(bsz, tgt_len)`. Padding will appear\n on the right.\n \"\"\"\n return collate(samples, self.vocab.pad(), self.vocab.eos())\n\n def num_tokens(self, index):\n \"\"\"Return the number of tokens in a sample. This value is used to\n enforce ``--max-tokens`` during batching.\"\"\"\n return self.sizes[index]\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n return self.sizes[index]\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n if self.shuffle:\n order = [np.random.permutation(len(self))]\n if hasattr(self.dataset, 'context_mode') and self.dataset.context_mode == 'window':\n order = [order[0][:math.ceil(len(self)*0.3)]]\n order.append(self.sizes[order[0]])\n else:\n order.append(self.sizes)\n else:\n order = [np.arange(len(self))]\n order.append(self.sizes)\n return np.lexsort(order)\n\n @property\n def supports_prefetch(self):\n return getattr(self.dataset, 'supports_prefetch', False)\n\n def prefetch(self, indices):\n self.dataset.prefetch(indices)\n"
] | [
[
"torch.nn.functional.dropout",
"torch.nn.init.constant_",
"torch.cat",
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_"
],
[
"torch.LongTensor",
"numpy.array",
"numpy.lexsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XiangqianMa/AI-Competition-HuaWei | [
"d479b772f446033d32a124b80a1f9cd835988020"
] | [
"losses/CE_label_smooth.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: q_i = (1 - epsilon) * a_i + epsilon / N.\n\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, use_gpu=True):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n '''\n scatter_第一个参数为1表示分别对每行填充;targets.unsqueeze(1)得到的维度为[num_classes, 1];\n 填充方法为:取出targets的第i行中的第一个元素(每行只有一个元素),记该值为j;则前面tensor中的(i,j)元素填充1;\n 最终targets的维度为[batch_size, num_classes],每一行代表一个样本,若该样本类别为j,则只有第j元素为1,其余元素为0\n '''\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.use_gpu:\n targets = targets.cuda()\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n # mean(0)表示缩减第0维,也就是按列求均值,得到维度为[num_classes],得到该batch内每一个类别的损失,再求和\n loss = (- targets * log_probs).mean(0).sum()\n return loss\n\n\nclass CrossEntropyLabelSmoothHardMining(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: q_i = (1 - epsilon) * a_i + epsilon / N.\n\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n def __init__(self, num_classes, epsilon=0.1, ratio=0.6, use_gpu=True):\n super(CrossEntropyLabelSmoothHardMining, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n self.ratio = ratio\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs)\n '''\n scatter_第一个参数为1表示分别对每行填充;targets.unsqueeze(1)得到的维度为[num_classes, 1];\n 填充方法为:取出targets的第i行中的第一个元素(每行只有一个元素),记该值为j;则前面tensor中的(i,j)元素填充1;\n 最终targets的维度为[batch_size, num_classes],每一行代表一个样本,若该样本类别为j,则只有第j元素为1,其余元素为0\n '''\n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)\n if self.use_gpu:\n targets = targets.cuda()\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n # 先求各个样本的损失\n loss = (- targets * log_probs).sum(1)\n selected_number = int(inputs.size(0) * self.ratio)\n # 按照样本取均值\n loss = torch.sort(loss, descending=True)[0][:selected_number].sum() / selected_number\n return loss\n\n\nif __name__ == '__main__':\n criterion = CrossEntropyLabelSmoothHardMining(54)\n input = torch.Tensor(5, 54).cuda()\n target = torch.ones(5).long()\n loss = criterion(input, target)\n"
] | [
[
"torch.nn.LogSoftmax",
"torch.ones",
"torch.sort",
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FeliMe/autoseg | [
"627a6b2bda3f6da8ea7c65742b9e9d3b7d6cc845"
] | [
"uas_mood/utils/data_utils.py"
] | [
"from PIL import Image\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport numpy as np\nfrom skimage.exposure import equalize_hist\nfrom skimage.transform import resize\nimport torch\nfrom torchvision import transforms\n\n\ndef plot(image, f=None):\n plt.axis(\"off\")\n plt.imshow(image, cmap=\"gray\", vmin=0., vmax=1.)\n if f is None:\n plt.show()\n else:\n plt.savefig(f, bbox_inches='tight', pad_inches=0)\n\n\ndef volume_viewer(volume, initial_position=None, slices_first=True):\n \"\"\"Plot a volume of shape [x, y, slices]\n Useful for MR and CT image volumes\n\n Args:\n volume (torch.Tensor or np.ndarray): With shape [slices, h, w]\n initial_position (list or tuple of len 3): (Optional)\n slices_first (bool): If slices are first or last dimension in volume\n \"\"\"\n\n def remove_keymap_conflicts(new_keys_set):\n for prop in plt.rcParams:\n if prop.startswith('keymap.'):\n keys = plt.rcParams[prop]\n remove_list = set(keys) & new_keys_set\n for key in remove_list:\n keys.remove(key)\n\n def previous_slice(ax):\n volume = ax.volume\n d = volume.shape[0]\n ax.index = (ax.index + 1) % d\n ax.images[0].set_array(volume[ax.index])\n ax.texts.pop()\n ax.text(5, 15, f\"Slice: {d - ax.index}\", color=\"white\")\n\n def next_slice(ax):\n volume = ax.volume\n d = volume.shape[0]\n ax.index = (ax.index - 1) % d\n ax.images[0].set_array(volume[ax.index])\n ax.texts.pop()\n ax.text(5, 15, f\"Slice: {d - ax.index}\", color=\"white\")\n\n def process_key(event):\n fig = event.canvas.figure\n # Move axial (slices)\n if event.key == 'k':\n next_slice(fig.axes[0])\n elif event.key == 'j':\n previous_slice(fig.axes[0])\n # Move coronal (h)\n elif event.key == 'u':\n previous_slice(fig.axes[1])\n elif event.key == 'i':\n next_slice(fig.axes[1])\n # Move saggital (w)\n elif event.key == 'h':\n previous_slice(fig.axes[2])\n elif event.key == 'l':\n next_slice(fig.axes[2])\n fig.canvas.draw()\n\n def prepare_volume(volume, slices_first):\n # Convert to numpy\n if isinstance(volume, torch.Tensor):\n volume = volume.numpy()\n\n # Omit batch dimension\n if volume.ndim == 4:\n volume = volume[0]\n\n # If image is not loaded with slices_first, put slices dimension first\n if not slices_first:\n volume = np.moveaxis(volume, 2, 0)\n\n # Pad slices\n if volume.shape[0] < volume.shape[1]:\n pad_size = (volume.shape[1] - volume.shape[0]) // 2\n pad = [(0, 0)] * volume.ndim\n pad[0] = (pad_size, pad_size)\n volume = np.pad(volume, pad)\n\n # Flip directions for display\n volume = np.flip(volume, (0, 1, 2))\n\n return volume\n\n def plot_ax(ax, volume, index, title):\n ax.volume = volume\n shape = ax.volume.shape\n d = shape[0]\n ax.index = d - index\n aspect = shape[2] / shape[1]\n ax.imshow(ax.volume[ax.index], aspect=aspect, vmin=0., vmax=1.)\n ax.set_title(title)\n ax.text(5, 15, f\"Slice: {d - ax.index}\", color=\"white\")\n\n plt.rcParams['image.cmap'] = 'gray'\n plt.rcParams['image.interpolation'] = 'nearest'\n\n remove_keymap_conflicts({'h', 'j', 'k', 'l'})\n\n volume = prepare_volume(volume, slices_first)\n\n if initial_position is None:\n initial_position = torch.tensor(volume.shape) // 2\n\n # Volume shape [slices, h, w]\n fig, ax = plt.subplots(1, 3, figsize=(12, 4))\n plot_ax(ax[0], np.transpose(volume, (0, 2, 1)), initial_position[2],\n \"axial\") # axial [slices, h, w]\n plot_ax(ax[1], np.transpose(volume, (2, 0, 1)), initial_position[1],\n \"coronal\") # saggital [h, slices, w]\n plot_ax(ax[2], np.transpose(volume, (1, 0, 2)), initial_position[0],\n \"sagittal\") # coronal [w, slices, h]\n fig.canvas.mpl_connect('key_press_event', process_key)\n print(\"Plotting volume, navigate:\"\n \"\\naxial with 'j', 'k'\"\n \"\\ncoronal with 'u', 'i'\"\n \"\\nsaggital with 'h', 'l'\")\n plt.show()\n\n\ndef write_txt(path: str, msg: str) -> None:\n with open(path, \"w\") as f:\n f.write(msg)\n\n\ndef load_nii(path: str, size: int = None, primary_axis: int = 0,\n dtype: str = \"float32\"):\n \"\"\"Load a neuroimaging file with nibabel, [w, h, slices]\n https://nipy.org/nibabel/reference/nibabel.html\n\n Args:\n path (str): Path to nii file\n size (int): Optional. Output size for h and w. Only supports rectangles\n primary_axis (int): Primary axis (the one to slice along, usually 2)\n dtype (str): Numpy datatype\n\n Returns:\n volume (np.ndarray): Of shape [w, h, slices]\n affine (np.ndarray): Affine coordinates (rotation and translation),\n shape [4, 4]\n \"\"\"\n # Load file\n data = nib.load(path, keep_file_open=False)\n volume = data.get_fdata(caching='unchanged') # [w, h, slices]\n affine = data.affine\n\n # Squeeze optional 4th dimension\n if volume.ndim == 4:\n volume = volume.squeeze(-1)\n\n # Resize if size is given and if necessary\n if size is not None and (volume.shape[0] != size or volume.shape[1] != size):\n volume = resize(volume, [size, size, size])\n\n # Convert\n volume = volume.astype(np.dtype(dtype))\n\n # Move primary axis to first dimension\n volume = np.moveaxis(volume, primary_axis, 0)\n\n return volume, affine\n\n\ndef save_nii(path: str, volume: np.ndarray, affine: np.ndarray = None,\n dtype: str = \"float32\", primary_axis: int = 0) -> None:\n \"\"\"Save a neuroimaging file (.nii) with nibabel\n https://nipy.org/nibabel/reference/nibabel.html\n\n Args:\n path (str): Path to save file at\n volume (np.ndarray): Image as numpy array\n affine (np.ndarray): Affine transformation that determines the\n world-coordinates of the image elements\n dtype (str): Numpy dtype of saved image\n primary_axis (int): The primary axis. Needs to be put back in place\n \"\"\"\n if affine is None:\n affine = np.eye(4)\n volume = np.moveaxis(volume, 0, primary_axis)\n nib.save(nib.Nifti1Image(volume.astype(dtype), affine), path)\n\n\ndef histogram_equalization(volume):\n # Create equalization mask\n mask = np.zeros_like(volume)\n mask[volume > 0] = 1\n\n # Equalize\n dtype = volume.dtype\n volume = equalize_hist(volume, nbins=256, mask=mask).astype(dtype)\n\n # Assure that background still is 0\n volume *= mask\n\n return volume\n\n\ndef process_scan(path: str, size: int = None, equalize_hist: bool = False,\n return_affine: bool = False) -> np.ndarray:\n \"\"\"Load and pre-process a medical 3D scan\n\n Args:\n path (str): Path to file\n size (int): Optional, spatial dimension (height / width)\n equalize_hist (bool): Perform histogram equalization\n return_affine (bool): Whether to return the affine transformation matrix\n\n Returns:\n volume (torch.Tensor): Loaded and pre-processed scan\n affine (np.ndarray): Affine transformation matrix\n \"\"\"\n\n # Load\n volume, affine = load_nii(path=path, size=size, primary_axis=2, dtype=\"float32\")\n\n # Pre-processing\n if equalize_hist:\n volume = histogram_equalization(volume)\n\n if return_affine:\n return volume, affine\n else:\n return volume\n\n\ndef load_segmentation(path: str, size: int = None, bin_threshold: float = 0.4):\n \"\"\"Load a segmentation file\n\n Args:\n path (str): Path to file\n size (int): Optional, spatial dimension (height / width)\n bin_threshold (float): Optional, threshold at which a pixel belongs to\n the segmentation\n \"\"\"\n\n # Load\n segmentation, _ = load_nii(path, size=size, primary_axis=2, dtype='float32')\n\n # Binarize\n segmentation = np.where(\n segmentation > bin_threshold, 1, 0).astype(np.short)\n\n return segmentation\n\n\ndef load_image(path: str, img_size: int = None):\n img = Image.open(path).convert(\"L\")\n if img_size is None:\n return transforms.ToTensor()(img)\n else:\n return transforms.Compose([\n transforms.Resize(img_size),\n transforms.ToTensor()\n ])(img)\n\n\nif __name__ == '__main__':\n size = 256\n # path = \"/home/felix/datasets/MOOD/brain/test_label/pixel/00480_uniform_shift.nii.gz\"\n # path = \"/home/felix/datasets/MOOD/abdom/test_label/pixel/00330_slice_shuffle.nii.gz\"\n # segmentation = load_segmentation(path, size=size)\n # path = \"/home/felix/datasets/MOOD/brain/test/00480_uniform_shift.nii.gz\"\n # path = \"/home/felix/datasets/MOOD/abdom/test/00330_slice_shuffle.nii.gz\"\n path = \"/home/felix/datasets/MOOD/brain/train/00000.nii.gz\"\n volume = process_scan(path, size=size, equalize_hist=False)\n print(volume.shape)\n volume_viewer(volume)\n import IPython ; IPython.embed() ; exit(1)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.pad",
"numpy.eye",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.transpose",
"numpy.dtype",
"torch.tensor",
"numpy.zeros_like",
"matplotlib.pyplot.axis",
"numpy.moveaxis",
"matplotlib.pyplot.show",
"numpy.flip",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wj1tr0y/openpose | [
"b0971af64080e36992b588becdd920823fac179d"
] | [
"1_extract_pose.py"
] | [
"'''\n@Author: Jilong Wang\n@Date: 2019-01-10 14:12:01\n@LastEditors: Jilong Wang\n@Email: [email protected]\n@LastEditTime: 2019-01-15 11:16:42\n@Description: file content\n'''\n# From Python\n# It requires OpenCV installed for Python\nimport sys\nimport cv2\nimport os\nfrom sys import platform\nimport numpy as np\n# Remember to add your installation path here\n# Option a\ndir_path = os.path.dirname(os.path.realpath(__file__))\nif platform == \"win32\": sys.path.append(dir_path + '/../../python/openpose/');\nelse: sys.path.append('./build/python');\n# Option b\n# If you run `make install` (default path is `/usr/local/python` for Ubuntu), you can also access the OpenPose/python module from there. This will install OpenPose and the python library at your desired installation path. Ensure that this is in your python path in order to use it.\n# sys.path.append('/usr/local/python')\n\n# Parameters for OpenPose. Take a look at C++ OpenPose example for meaning of components. Ensure all below are filled\ntry:\n from openpose import *\nexcept:\n raise Exception('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')\n \ndef init():\n params = dict()\n params[\"logging_level\"] = 3\n params[\"output_resolution\"] = \"-1x-1\"\n params[\"net_resolution\"] = \"-1x256\"\n params[\"model_pose\"] = \"BODY_25\"\n params[\"alpha_pose\"] = 0.6\n params[\"scale_gap\"] = 0.25\n params[\"scale_number\"] = 1\n params[\"render_threshold\"] = 0.15\n # If GPU version is built, and multiple GPUs are available, set the ID here\n params[\"num_gpu_start\"] = 0\n params[\"disable_blending\"] = False\n # Ensure you point to the correct path where models are located\n params[\"default_model_folder\"] = \"./models/\"\n # Construct OpenPose object allocates GPU memory\n openpose = OpenPose(params)\n return openpose\n\ndef check_integrity(points):\n '''\n @description: get an image then return how many keypoints were detected\n @param {nparray image} \n @return: number of keypoints\n '''\n threshold = 0.15\n count = 0\n if sum(points[1]) > 0 and points[1][2] > threshold: # Neck\n count += 1\n if sum(points[10]) >0 and sum(points[13]) >0 and points[10][2] > threshold and points[13][2] > threshold: # Knee\n count += 1\n if sum(points[11]) > 0 and sum(points[14]) >0 and points[11][2] > threshold and points[14][2] > threshold: # Ankle\n count += 1\n if sum(points[19]) >0 and sum(points[22]) >0 and points[19][2] > threshold and points[22][2] > threshold: # BigToe\n count += 1\n else:\n count -= 1\n if sum(points[21]) >0 and sum(points[24]) >0 and points[21][2] > threshold and points[24][2] > threshold: # Heel\n count += 1\n else:\n count -= 1\n if sum(points[9]) >0 and sum(points[8]) >0 and sum(points[12]) >0 and points[9][2] > threshold and points[8][2] > threshold and points[12][2] > threshold: # Hip\n count += 1\n if sum(points[2]) >0 and sum(points[5]) >0 and points[2][2] > threshold and points[5][2] > threshold: # double Shoulder\n count += 1\n else:\n count -= 1\n\n if len([x for x in points if (x[0]!=0 or x[1]!=0) and x[2] > threshold]) > 22:\n print(len([x for x in points if (x[0]!=0 or x[1]!=0) and x[2] > threshold]))\n count = 7\n return count\nif __name__ == '__main__':\n\n openpose = init()\n # Read new image\n # img = cv2.imread(\"../../../examples/media/COCO_val2014_000000000192.jpg\")\n img = cv2.imread(\"videoframes/videoframe-001_scene3_nm_L_090_1_s/frame108.jpg\")\n # Output keypoints and the image with the human skeleton blended on it\n keypoints, output_image = openpose.forward(img, True)\n # Print the human pose keypoints, i.e., a [#people x #keypoints x 3]-dimensional numpy object with the keypoints of all the people on that image\n for j in range(keypoints.shape[0]):\n print(check_integrity(keypoints[j, ...]), np.sum(keypoints[j,:,2]))\n print(keypoints)\n # Display the image\n cv2.imshow(\"output\", output_image)\n cv2.imwrite('38.jpg', output_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n"
] | [
[
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yangpuhai/Granularity-in-DST | [
"1d9a42966ebda675d71b4b54412133cef63ec931"
] | [
"MGL_BERTDST/BERTDST_utils/MultiWOZ_data_utils.py"
] | [
"import numpy as np\nimport json\nfrom torch.utils.data import Dataset\nimport torch\nimport random\nimport re\nfrom copy import deepcopy\nfrom collections import OrderedDict\nfrom .fix_label import fix_general_label_error\nfrom .fix_value import fix_value_dict\nfrom .fix_value import fix_time\n\nEXPERIMENT_DOMAINS = [\"hotel\", \"train\", \"restaurant\", \"attraction\", \"taxi\"]\n# EXPERIMENT_DOMAINS = [\"restaurant\"]\nOP = {'span': 0, 'none': 1, 'dontcare': 2, 'yes': 3, 'no': 4, 'del': 5}\n\ndef slot_value_span(value, diag):\n sequences = diag\n patt = ' ' + value + ' '\n pattern = re.compile(patt)\n m = pattern.finditer(sequences)\n m = [mi for mi in m]\n if m != []:\n line_st = sequences[:m[-1].span()[0]]\n start = len(line_st.split())\n slot_v = [start, start + len(value.split())-1]\n else:\n slot_v = [0, 0]\n return slot_v\n\ndef fix_value_span(slot, tokenizer, state_value, diag):\n tokenize_state_value = ' '.join(tokenizer.tokenize(state_value))\n state_value_span = slot_value_span(tokenize_state_value, diag)\n if 'arrive' in slot or 'leave' in slot or 'time' in slot:\n if state_value not in fix_value_dict:\n fix_value_dict[state_value]=fix_time(state_value)\n if state_value_span == [0, 0] and state_value in fix_value_dict:\n for fix_value in fix_value_dict[state_value]:\n tokenize_fix_value = ' '.join(tokenizer.tokenize(fix_value))\n state_value_span = slot_value_span(tokenize_fix_value, diag)\n if state_value_span != [0, 0]:\n break\n return state_value_span\n\ndef make_span(slot, turn_utter, tokenizer, value):\n diag = tokenizer.tokenize(turn_utter)\n diag = [\"[CLS]\"] + diag\n diag_text = ' '.join(diag)\n result = fix_value_span(slot, tokenizer, value, diag_text)\n return result\n\ndef make_word_idx(turn_utter, tokenizer):\n diag = turn_utter.split()\n diag = [\"[CLS]\"] + diag\n diag_uttr = diag\n word_idx = []\n diag_len = 0\n for word in diag:\n word_list = tokenizer.tokenize(word)\n list_len = len(word_list)\n word_idx.extend([diag_len] * list_len)\n diag_len += 1\n return word_idx, diag_uttr\n\ndef make_turn_label(window_utter, slot_meta, window_dialog_state, turn_dialog_state, tokenizer, dynamic=False):\n if dynamic:\n gold_state = turn_dialog_state\n turn_dialog_state = {}\n for x in gold_state:\n s = x.split('-')\n k = '-'.join(s[:2])\n turn_dialog_state[k] = s[2]\n\n word_idx, diag_uttr = make_word_idx(window_utter, tokenizer)\n\n op_labels = []\n generate_y = []\n for k in slot_meta:\n v = turn_dialog_state.get(k)\n vv = window_dialog_state.get(k)\n if vv != v:\n if v == 'dontcare' :\n op_labels.append('dontcare')\n generate_y.append([0, 0])\n elif v == 'yes':\n op_labels.append('yes')\n generate_y.append([0, 0])\n elif v == 'no':\n op_labels.append('no')\n generate_y.append([0, 0])\n elif v == None:\n op_labels.append('del')\n generate_y.append([0, 0])\n else:\n op_labels.append('span')\n generate_y.append(make_span(k, window_utter, tokenizer, v))\n else:\n op_labels.append('none')\n generate_y.append([0, 0])\n\n gold_state = [str(k) + '-' + str(v) for k, v in turn_dialog_state.items()]\n\n if dynamic:\n op2id = OP\n op_labels = [op2id[i] for i in op_labels]\n\n return op_labels, generate_y, gold_state, word_idx, diag_uttr\n\n\ndef postprocessing(slot_meta, ops, last_dialog_state, generated, input_, gold_gen, word_idx, diag_uttr):\n gid = 0\n for st, op in zip(slot_meta, ops):\n if op == 'dontcare':\n last_dialog_state[st] = 'dontcare'\n elif op == 'yes':\n last_dialog_state[st] = 'yes'\n elif op == 'no':\n last_dialog_state[st] = 'no'\n elif op == 'del':\n if st in last_dialog_state:\n last_dialog_state.pop(st)\n elif op == 'span':\n #g = input_[generated[gid][0]:generated[gid][1]+1]\n if generated[gid][0] >= len(input_) or generated[gid][1] >= len(input_):\n continue\n start = word_idx[generated[gid][0]]\n end = word_idx[generated[gid][1]]\n g = diag_uttr[start: end+1]\n\n gen = []\n for gg in g:\n gen.append(gg)\n gen = ' '.join(gen).replace(' ##', '')\n gen = gen.replace(' : ', ':').replace('##', '')\n last_dialog_state[st] = gen\n gid += 1\n return generated, last_dialog_state\n\ndef state_equal(pred_dialog_state, gold_dialog_state, slot_meta):\n equal = True\n for slot in slot_meta:\n pred_value = pred_dialog_state.get(slot)\n gold_value = gold_dialog_state.get(slot)\n if pred_value != gold_value:\n equal = False\n for s in fix_value_dict:\n if pred_value in [s]+fix_value_dict[s]:\n for s1 in [s]+fix_value_dict[s]:\n if s1 == gold_value:\n equal = True\n pred_dialog_state[slot] = s\n break\n return pred_dialog_state, equal\n\n\ndef make_slot_meta(ontology):\n meta = []\n change = {}\n idx = 0\n max_len = 0\n for i, k in enumerate(ontology.keys()):\n d, s = k.split('-')\n if d not in EXPERIMENT_DOMAINS:\n continue\n if 'price' in s or 'leave' in s or 'arrive' in s:\n s = s.replace(' ', '')\n ss = s.split()\n if len(ss) + 1 > max_len:\n max_len = len(ss) + 1\n meta.append('-'.join([d, s]))\n change[meta[-1]] = ontology[k]\n return sorted(meta), change\n\n\ndef create_instance(dialog_history, state_history, size_window, tokenizer, ti, len_turns, dialogue_id,\n turn_domain, turn_id, turn_dialog_state, slot_meta, max_seq_length):\n if len(state_history) < size_window:\n window_dialog_state = state_history[0]\n else:\n window_dialog_state = state_history[len(state_history) - size_window]\n\n if (ti + 1) == len_turns:\n is_last_turn = True\n else:\n is_last_turn = False\n\n turn_utter = \" ; \".join(dialog_history[-size_window:])\n\n op_labels, generate_y, gold_state, word_idx, diag_uttr = make_turn_label(turn_utter, slot_meta, window_dialog_state, turn_dialog_state, tokenizer)\n\n instance = TrainingInstance(dialogue_id,turn_domain, turn_id, turn_utter, window_dialog_state, turn_dialog_state, op_labels, generate_y, gold_state, max_seq_length, slot_meta, is_last_turn)\n instance.make_instance(tokenizer)\n return instance\n\n\ndef prepare_dataset(data_scale, data_path, tokenizer, slot_meta, size_window, max_seq_length, multi_granularity = False, data_type = ''):\n dials = json.load(open(data_path))\n data = []\n domain_counter = {}\n\n if data_type == 'train':\n random.seed(42)\n dials = random.sample(dials, int(data_scale*len(dials)))\n random.seed()\n\n for dial_dict in dials:\n for domain in dial_dict[\"domains\"]:\n if domain not in EXPERIMENT_DOMAINS:\n continue\n if domain not in domain_counter.keys():\n domain_counter[domain] = 0\n domain_counter[domain] += 1\n state_history = []\n dialog_history = []\n last_dialog_state = {}\n for ti, turn in enumerate(dial_dict[\"dialogue\"]):\n turn_id = turn[\"turn_idx\"]\n turn_domain = turn[\"domain\"]\n if turn_domain not in EXPERIMENT_DOMAINS:\n continue\n system_uttr = turn['system_transcript'].strip()\n user_uttr = turn['transcript'].strip()\n if system_uttr == '':\n turn_uttr = '[SEP] ' + user_uttr\n else:\n turn_uttr = system_uttr + ' [SEP] ' + user_uttr\n dialog_history.append(turn_uttr)\n turn_dialog_state = fix_general_label_error(turn[\"belief_state\"], False, slot_meta)\n turn_dialog_state = {k: v for k, v in turn_dialog_state.items() if k in slot_meta}\n keys = list(turn_dialog_state.keys())\n for k in keys:\n if turn_dialog_state.get(k) == 'none':\n turn_dialog_state.pop(k)\n state_history.append(last_dialog_state)\n\n len_turns = len(dial_dict['dialogue'])\n dialogue_id = dial_dict[\"dialogue_idx\"]\n if multi_granularity:\n max_size_window = min(size_window, len(dialog_history))\n for sw in range(1, max_size_window + 1):\n instance = create_instance(dialog_history, state_history, sw, tokenizer, ti,\n len_turns, dialogue_id, turn_domain, turn_id, turn_dialog_state,\n slot_meta, max_seq_length)\n data.append(instance)\n else:\n size_window1 = min(size_window, len(dialog_history))\n instance = create_instance(dialog_history, state_history, size_window1, tokenizer, ti,\n len_turns, dialogue_id, turn_domain, turn_id, turn_dialog_state,\n slot_meta, max_seq_length)\n data.append(instance)\n last_dialog_state = turn_dialog_state\n return data\n\nclass TrainingInstance:\n def __init__(self, ID,\n turn_domain,\n turn_id,\n turn_utter,\n last_dialog_state,\n turn_dialog_state,\n op_labels,\n generate_y,\n gold_state,\n max_seq_length,\n slot_meta,\n is_last_turn):\n self.id = ID\n self.turn_domain = turn_domain\n self.turn_id = turn_id\n self.turn_utter = turn_utter\n self.last_dialog_state = last_dialog_state\n self.gold_p_state = last_dialog_state\n self.turn_dialog_state = turn_dialog_state\n self.generate_y = generate_y\n self.op_labels = op_labels\n self.gold_state = gold_state\n self.max_seq_length = max_seq_length\n self.slot_meta = slot_meta\n self.is_last_turn = is_last_turn\n self.op2id = OP\n\n def make_instance(self, tokenizer, max_seq_length=None, word_dropout=0.):\n if max_seq_length is None:\n max_seq_length = self.max_seq_length\n \n avail_length_1 = max_seq_length - 1\n diag = tokenizer.tokenize(self.turn_utter)\n\n if len(diag) > avail_length_1:\n avail_length = len(diag) - avail_length_1\n diag = diag[avail_length:]\n\n drop_mask = [0] + [1] * len(diag)\n diag = [\"[CLS]\"] + diag\n segment = [1] * len(diag)\n\n # word dropout\n if word_dropout > 0.:\n drop_mask = np.array(drop_mask)\n word_drop = np.random.binomial(drop_mask.astype('int64'), word_dropout)\n diag = [w if word_drop[i] == 0 else '[UNK]' for i, w in enumerate(diag)]\n input_ = diag\n segment = segment\n self.input_ = input_\n\n self.segment_id = segment\n\n input_mask = [1] * len(self.input_)\n self.input_id = tokenizer.convert_tokens_to_ids(self.input_)\n if len(input_mask) < max_seq_length:\n self.input_id = self.input_id + [0] * (max_seq_length-len(input_mask))\n self.segment_id = self.segment_id + [0] * (max_seq_length-len(input_mask))\n input_mask = input_mask + [0] * (max_seq_length-len(input_mask))\n\n self.input_mask = input_mask\n self.op_ids = [self.op2id[a] for a in self.op_labels]\n self.generate_ids = self.generate_y\n\n\nclass MultiWozDataset(Dataset):\n def __init__(self, data, tokenizer, slot_meta, max_seq_length, rng, word_dropout=0.1):\n self.data = data\n self.len = len(data)\n self.tokenizer = tokenizer\n self.slot_meta = slot_meta\n self.max_seq_length = max_seq_length\n self.word_dropout = word_dropout\n self.rng = rng\n\n def __len__(self):\n return self.len\n\n def __getitem__(self, idx):\n if self.word_dropout > 0:\n self.data[idx].make_instance(self.tokenizer, word_dropout=self.word_dropout)\n return self.data[idx]\n\n def collate_fn(self, batch):\n input_ids = torch.tensor([f.input_id for f in batch], dtype=torch.long)\n input_mask = torch.tensor([f.input_mask for f in batch], dtype=torch.long)\n segment_ids = torch.tensor([f.segment_id for f in batch], dtype=torch.long)\n op_ids = torch.tensor([f.op_ids for f in batch], dtype=torch.long)\n gen_ids = [b.generate_ids for b in batch]\n gen_ids = torch.tensor(gen_ids, dtype=torch.long)\n\n return input_ids, input_mask, segment_ids, op_ids, gen_ids\n"
] | [
[
"numpy.array",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
typicasoft/transformers | [
"a1a8ffa5126ced93c12dfb677cbe3a069f48dcf3"
] | [
"tests/test_modeling_tf_common.py"
] | [
"# coding=utf-8\n# Copyright 2019 HuggingFace Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport copy\nimport inspect\nimport os\nimport random\nimport tempfile\nimport unittest\nfrom importlib import import_module\nfrom typing import List, Tuple\n\nfrom transformers import is_tf_available, is_torch_available\nfrom transformers.testing_utils import _tf_gpu_memory_limit, require_tf, slow\n\n\nif is_tf_available():\n import numpy as np\n import tensorflow as tf\n\n from transformers import (\n TF_MODEL_FOR_CAUSAL_LM_MAPPING,\n TF_MODEL_FOR_MASKED_LM_MAPPING,\n TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,\n TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,\n TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,\n TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,\n TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n TFAdaptiveEmbedding,\n TFSharedEmbeddings,\n tf_top_k_top_p_filtering,\n )\n\n if _tf_gpu_memory_limit is not None:\n gpus = tf.config.list_physical_devices(\"GPU\")\n for gpu in gpus:\n # Restrict TensorFlow to only allocate x GB of memory on the GPUs\n try:\n tf.config.experimental.set_virtual_device_configuration(\n gpu, [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n print(\"Logical GPUs\", logical_gpus)\n except RuntimeError as e:\n # Virtual devices must be set before GPUs have been initialized\n print(e)\n\n\ndef _config_zero_init(config):\n configs_no_init = copy.deepcopy(config)\n for key in configs_no_init.__dict__.keys():\n if \"_range\" in key or \"_std\" in key:\n setattr(configs_no_init, key, 0.0)\n return configs_no_init\n\n\n@require_tf\nclass TFModelTesterMixin:\n\n model_tester = None\n all_model_classes = ()\n all_generative_model_classes = ()\n test_resize_embeddings = True\n is_encoder_decoder = False\n\n def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):\n inputs_dict = copy.deepcopy(inputs_dict)\n\n if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():\n inputs_dict = {\n k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))\n if isinstance(v, tf.Tensor) and v.ndim > 0\n else v\n for k, v in inputs_dict.items()\n }\n\n if return_labels:\n if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():\n inputs_dict[\"labels\"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():\n inputs_dict[\"start_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n inputs_dict[\"end_positions\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():\n inputs_dict[\"labels\"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)\n elif model_class in [\n *TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),\n *TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),\n *TF_MODEL_FOR_MASKED_LM_MAPPING.values(),\n *TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),\n ]:\n inputs_dict[\"labels\"] = tf.zeros(\n (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32\n )\n return inputs_dict\n\n def test_initialization(self):\n pass\n\n def test_save_load(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n outputs = model(self._prepare_for_class(inputs_dict, model_class))\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n model.save_pretrained(tmpdirname)\n model = model_class.from_pretrained(tmpdirname)\n after_outputs = model(self._prepare_for_class(inputs_dict, model_class))\n\n self.assert_outputs_same(after_outputs, outputs)\n\n def test_graph_mode(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n for model_class in self.all_model_classes:\n inputs = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n\n @tf.function\n def run_in_graph_mode():\n return model(inputs)\n\n outputs = run_in_graph_mode()\n self.assertIsNotNone(outputs)\n\n @slow\n def test_saved_model_with_hidden_states_output(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_hidden_states = True\n\n for model_class in self.all_model_classes:\n inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n num_out = len(model(inputs_dict))\n model._saved_model_inputs_spec = None\n model._set_save_spec(inputs_dict)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n tf.saved_model.save(model, tmpdirname)\n model = tf.keras.models.load_model(tmpdirname)\n outputs = model(inputs_dict)\n output = outputs[list(outputs.keys())[-1]] if isinstance(outputs, dict) else outputs[-1]\n hidden_states = [t.numpy() for t in output]\n self.assertEqual(len(outputs), num_out)\n self.assertEqual(len(hidden_states), self.model_tester.num_hidden_layers + 1)\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [self.model_tester.seq_length, self.model_tester.hidden_size],\n )\n\n @slow\n def test_saved_model_with_attentions_output(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n config.output_attentions = True\n encoder_seq_length = (\n self.model_tester.encoder_seq_length\n if hasattr(self.model_tester, \"encoder_seq_length\")\n else self.model_tester.seq_length\n )\n encoder_key_length = (\n self.model_tester.key_length if hasattr(self.model_tester, \"key_length\") else encoder_seq_length\n )\n\n for model_class in self.all_model_classes:\n inputs_dict = self._prepare_for_class(inputs_dict, model_class)\n model = model_class(config)\n num_out = len(model(inputs_dict))\n model._saved_model_inputs_spec = None\n model._set_save_spec(inputs_dict)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n tf.saved_model.save(model, tmpdirname)\n model = tf.keras.models.load_model(tmpdirname)\n outputs = model(inputs_dict)\n output = outputs[list(outputs.keys())[-1]] if isinstance(outputs, dict) else outputs[-1]\n attentions = [t.numpy() for t in output]\n self.assertEqual(len(outputs), num_out)\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n\n def test_keras_save_load(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n tf_main_layer_classes = set(\n module_member\n for model_class in self.all_model_classes\n for module in (import_module(model_class.__module__),)\n for module_member_name in dir(module)\n if module_member_name.endswith(\"MainLayer\")\n for module_member in (getattr(module, module_member_name),)\n if isinstance(module_member, type)\n and tf.keras.layers.Layer in module_member.__bases__\n and getattr(module_member, \"_keras_serializable\", False)\n )\n for main_layer_class in tf_main_layer_classes:\n # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter\n if \"T5\" in main_layer_class.__name__:\n # Take the same values than in TFT5ModelTester for this shared layer\n shared = TFSharedEmbeddings(99, 32, name=\"shared\")\n config.use_cache = False\n main_layer = main_layer_class(config, embed_tokens=shared)\n else:\n main_layer = main_layer_class(config)\n symbolic_inputs = {\n name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()\n }\n\n model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))\n outputs = model(inputs_dict)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n filepath = os.path.join(tmpdirname, \"keras_model.h5\")\n model.save(filepath)\n if \"T5\" in main_layer_class.__name__:\n model = tf.keras.models.load_model(\n filepath,\n custom_objects={\n main_layer_class.__name__: main_layer_class,\n \"TFSharedEmbeddings\": TFSharedEmbeddings,\n },\n )\n else:\n model = tf.keras.models.load_model(\n filepath, custom_objects={main_layer_class.__name__: main_layer_class}\n )\n assert isinstance(model, tf.keras.Model)\n after_outputs = model(inputs_dict)\n self.assert_outputs_same(after_outputs, outputs)\n\n def assert_outputs_same(self, after_outputs, outputs):\n # Make sure we don't have nans\n if isinstance(after_outputs, tf.Tensor):\n out_1 = after_outputs.numpy()\n elif isinstance(after_outputs, dict):\n out_1 = after_outputs[list(after_outputs.keys())[0]]\n else:\n out_1 = after_outputs[0].numpy()\n out_2 = outputs[0].numpy()\n self.assertEqual(out_1.shape, out_2.shape)\n out_1 = out_1[~np.isnan(out_1)]\n out_2 = out_2[~np.isnan(out_2)]\n max_diff = np.amax(np.abs(out_1 - out_2))\n self.assertLessEqual(max_diff, 1e-5)\n\n def test_pt_tf_model_equivalence(self):\n if not is_torch_available():\n return\n\n import torch\n\n import transformers\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n pt_model_class_name = model_class.__name__[2:] # Skip the \"TF\" at the beggining\n pt_model_class = getattr(transformers, pt_model_class_name)\n\n config.output_hidden_states = True\n\n tf_model = model_class(config)\n pt_model = pt_model_class(config)\n\n # Check we can load pt model in tf and vice-versa with model => model functions\n\n tf_model = transformers.load_pytorch_model_in_tf2_model(\n tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)\n )\n pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)\n\n # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences\n pt_model.eval()\n pt_inputs_dict = dict(\n (name, torch.from_numpy(key.numpy()).to(torch.long))\n for name, key in self._prepare_for_class(inputs_dict, model_class).items()\n )\n # need to rename encoder-decoder \"inputs\" for PyTorch\n if \"inputs\" in pt_inputs_dict and self.is_encoder_decoder:\n pt_inputs_dict[\"input_ids\"] = pt_inputs_dict.pop(\"inputs\")\n\n with torch.no_grad():\n pto = pt_model(**pt_inputs_dict)\n tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)\n tf_hidden_states = tfo[0].numpy()\n pt_hidden_states = pto[0].numpy()\n\n tf_nans = np.copy(np.isnan(tf_hidden_states))\n pt_nans = np.copy(np.isnan(pt_hidden_states))\n\n pt_hidden_states[tf_nans] = 0\n tf_hidden_states[tf_nans] = 0\n pt_hidden_states[pt_nans] = 0\n tf_hidden_states[pt_nans] = 0\n\n max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))\n # Debug info (remove when fixed)\n if max_diff >= 4e-2:\n print(\"===\")\n print(model_class)\n print(config)\n print(inputs_dict)\n print(pt_inputs_dict)\n self.assertLessEqual(max_diff, 4e-2)\n\n # Check we can load pt model in tf and vice-versa with checkpoint => model functions\n with tempfile.TemporaryDirectory() as tmpdirname:\n pt_checkpoint_path = os.path.join(tmpdirname, \"pt_model.bin\")\n torch.save(pt_model.state_dict(), pt_checkpoint_path)\n tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)\n\n tf_checkpoint_path = os.path.join(tmpdirname, \"tf_model.h5\")\n tf_model.save_weights(tf_checkpoint_path)\n pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)\n\n # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences\n pt_model.eval()\n pt_inputs_dict = dict(\n (name, torch.from_numpy(key.numpy()).to(torch.long))\n for name, key in self._prepare_for_class(inputs_dict, model_class).items()\n )\n # need to rename encoder-decoder \"inputs\" for PyTorch\n if \"inputs\" in pt_inputs_dict and self.is_encoder_decoder:\n pt_inputs_dict[\"input_ids\"] = pt_inputs_dict.pop(\"inputs\")\n\n with torch.no_grad():\n pto = pt_model(**pt_inputs_dict)\n tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))\n tfo = tfo[0].numpy()\n pto = pto[0].numpy()\n tf_nans = np.copy(np.isnan(tfo))\n pt_nans = np.copy(np.isnan(pto))\n\n pto[tf_nans] = 0\n tfo[tf_nans] = 0\n pto[pt_nans] = 0\n tfo[pt_nans] = 0\n\n max_diff = np.amax(np.abs(tfo - pto))\n self.assertLessEqual(max_diff, 4e-2)\n\n def test_train_pipeline_custom_model(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n tf_main_layer_classes = set(\n module_member\n for model_class in self.all_model_classes\n for module in (import_module(model_class.__module__),)\n for module_member_name in dir(module)\n if module_member_name.endswith(\"MainLayer\")\n for module_member in (getattr(module, module_member_name),)\n if isinstance(module_member, type)\n and tf.keras.layers.Layer in module_member.__bases__\n and getattr(module_member, \"_keras_serializable\", False)\n )\n\n for main_layer_class in tf_main_layer_classes:\n # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter\n if \"T5\" in main_layer_class.__name__:\n # Take the same values than in TFT5ModelTester for this shared layer\n shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name=\"shared\")\n config.use_cache = False\n main_layer = main_layer_class(config, embed_tokens=shared)\n del inputs_dict[\"use_cache\"]\n else:\n main_layer = main_layer_class(config)\n\n symbolic_inputs = {\n name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()\n }\n\n if hasattr(self.model_tester, \"num_labels\"):\n num_labels = self.model_tester.num_labels\n else:\n num_labels = 2\n\n X = tf.data.Dataset.from_tensor_slices(\n (inputs_dict, np.random.randint(0, num_labels, (self.model_tester.batch_size, 1)))\n ).batch(1)\n\n hidden_states = main_layer(symbolic_inputs)[0]\n outputs = tf.keras.layers.Dense(num_labels, activation=\"softmax\", name=\"outputs\")(hidden_states)\n model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])\n\n model.compile(loss=\"binary_crossentropy\", optimizer=\"adam\", metrics=[\"acc\"])\n model.fit(X, epochs=1)\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n filepath = os.path.join(tmpdirname, \"keras_model.h5\")\n model.save(filepath)\n if \"T5\" in main_layer_class.__name__:\n model = tf.keras.models.load_model(\n filepath,\n custom_objects={\n main_layer_class.__name__: main_layer_class,\n \"TFSharedEmbeddings\": TFSharedEmbeddings,\n },\n )\n else:\n model = tf.keras.models.load_model(\n filepath, custom_objects={main_layer_class.__name__: main_layer_class}\n )\n assert isinstance(model, tf.keras.Model)\n model(inputs_dict)\n\n def test_compile_tf_model(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n metric = tf.keras.metrics.SparseCategoricalAccuracy(\"accuracy\")\n\n for model_class in self.all_model_classes:\n if self.is_encoder_decoder:\n input_ids = {\n \"decoder_input_ids\": tf.keras.Input(\n batch_shape=(2, 2000), name=\"decoder_input_ids\", dtype=\"int32\"\n ),\n \"input_ids\": tf.keras.Input(batch_shape=(2, 2000), name=\"input_ids\", dtype=\"int32\"),\n }\n elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():\n input_ids = tf.keras.Input(batch_shape=(4, 2, 2000), name=\"input_ids\", dtype=\"int32\")\n else:\n input_ids = tf.keras.Input(batch_shape=(2, 2000), name=\"input_ids\", dtype=\"int32\")\n\n # Prepare our model\n model = model_class(config)\n\n # Let's load it from the disk to be sure we can use pretrained weights\n with tempfile.TemporaryDirectory() as tmpdirname:\n outputs = model(self._prepare_for_class(inputs_dict, model_class)) # build the model\n model.save_pretrained(tmpdirname)\n model = model_class.from_pretrained(tmpdirname)\n\n outputs_dict = model(input_ids)\n hidden_states = outputs_dict[0]\n\n # Add a dense layer on top to test integration with other keras modules\n outputs = tf.keras.layers.Dense(2, activation=\"softmax\", name=\"outputs\")(hidden_states)\n\n # Compile extended model\n extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])\n extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])\n\n def test_keyword_and_dict_args(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n outputs_dict = model(self._prepare_for_class(inputs_dict, model_class))\n\n inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))\n input_ids = inputs_keywords.pop(\"input_ids\", None)\n outputs_keywords = model(input_ids, **inputs_keywords)\n output_dict = outputs_dict[0].numpy()\n output_keywords = outputs_keywords[0].numpy()\n\n self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)\n\n def test_attention_outputs(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n decoder_seq_length = (\n self.model_tester.decoder_seq_length\n if hasattr(self.model_tester, \"decoder_seq_length\")\n else self.model_tester.seq_length\n )\n encoder_seq_length = (\n self.model_tester.encoder_seq_length\n if hasattr(self.model_tester, \"encoder_seq_length\")\n else self.model_tester.seq_length\n )\n decoder_key_length = (\n self.model_tester.key_length if hasattr(self.model_tester, \"key_length\") else decoder_seq_length\n )\n encoder_key_length = (\n self.model_tester.key_length if hasattr(self.model_tester, \"key_length\") else encoder_seq_length\n )\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = False\n model = model_class(config)\n outputs = model(self._prepare_for_class(inputs_dict, model_class))\n attentions = [t.numpy() for t in outputs[-1]]\n self.assertEqual(model.config.output_hidden_states, False)\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n out_len = len(outputs)\n\n if self.is_encoder_decoder:\n self.assertEqual(out_len % 2, 0)\n decoder_attentions = outputs[(out_len // 2) - 1]\n self.assertEqual(model.config.output_hidden_states, False)\n self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(decoder_attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],\n )\n\n # Check that output attentions can also be changed via the config\n del inputs_dict[\"output_attentions\"]\n config.output_attentions = True\n model = model_class(config)\n outputs = model(self._prepare_for_class(inputs_dict, model_class))\n attentions = [t.numpy() for t in outputs[-1]]\n self.assertEqual(model.config.output_hidden_states, False)\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n\n # Check attention is always last and order is fine\n inputs_dict[\"output_attentions\"] = True\n config.output_hidden_states = True\n model = model_class(config)\n outputs = model(self._prepare_for_class(inputs_dict, model_class))\n self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))\n self.assertEqual(model.config.output_hidden_states, True)\n\n attentions = [t.numpy() for t in outputs[-1]]\n self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)\n self.assertListEqual(\n list(attentions[0].shape[-3:]),\n [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],\n )\n\n def test_hidden_states_output(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n def check_hidden_states_output(config, inputs_dict, model_class):\n model = model_class(config)\n outputs = model(self._prepare_for_class(inputs_dict, model_class))\n hidden_states = [t.numpy() for t in outputs[-1]]\n expected_num_layers = getattr(\n self.model_tester, \"expected_num_hidden_layers\", self.model_tester.num_hidden_layers + 1\n )\n self.assertEqual(len(hidden_states), expected_num_layers)\n self.assertListEqual(\n list(hidden_states[0].shape[-2:]),\n [self.model_tester.seq_length, self.model_tester.hidden_size],\n )\n\n for model_class in self.all_model_classes:\n inputs_dict[\"output_hidden_states\"] = True\n check_hidden_states_output(config, inputs_dict, model_class)\n\n del inputs_dict[\"output_hidden_states\"]\n config.output_hidden_states = True\n check_hidden_states_output(config, inputs_dict, model_class)\n\n def test_model_common_attributes(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n assert isinstance(model.get_input_embeddings(), (tf.keras.layers.Layer, TFAdaptiveEmbedding))\n x = model.get_output_embeddings()\n assert x is None or isinstance(x, tf.keras.layers.Layer)\n\n def test_determinism(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n first, second = (\n model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],\n model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],\n )\n out_1 = first.numpy()\n out_2 = second.numpy()\n out_1 = out_1[~np.isnan(out_1)]\n out_2 = out_2[~np.isnan(out_2)]\n max_diff = np.amax(np.abs(out_1 - out_2))\n self.assertLessEqual(max_diff, 1e-5)\n\n def test_model_outputs_equivalence(self):\n\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):\n tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)\n dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()\n\n def recursive_check(tuple_object, dict_object):\n if isinstance(tuple_object, (List, Tuple)):\n for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):\n recursive_check(tuple_iterable_value, dict_iterable_value)\n elif tuple_object is None:\n return\n else:\n self.assertTrue(\n all(tf.equal(tuple_object, dict_object)),\n msg=f\"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}\",\n )\n\n recursive_check(tuple_output, dict_output)\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs)\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs)\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_attentions\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(model, tuple_inputs, dict_inputs, {\"output_attentions\": True})\n\n tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)\n check_equivalence(\n model, tuple_inputs, dict_inputs, {\"output_hidden_states\": True, \"output_attentions\": True}\n )\n\n def _get_embeds(self, wte, input_ids):\n # ^^ In our TF models, the input_embeddings can take slightly different forms,\n # so we try a few of them.\n # We used to fall back to just synthetically creating a dummy tensor of ones:\n try:\n x = wte(input_ids, mode=\"embedding\")\n except Exception:\n try:\n x = wte([input_ids], mode=\"embedding\")\n except Exception:\n try:\n x = wte([input_ids, None, None, None], mode=\"embedding\")\n except Exception:\n if hasattr(self.model_tester, \"embedding_size\"):\n x = tf.ones(\n input_ids.shape + [self.model_tester.embedding_size],\n dtype=tf.dtypes.float32,\n )\n else:\n x = tf.ones(\n input_ids.shape + [self.model_tester.hidden_size],\n dtype=tf.dtypes.float32,\n )\n return x\n\n def test_inputs_embeds(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n\n for model_class in self.all_model_classes:\n model = model_class(config)\n\n inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))\n if not self.is_encoder_decoder:\n input_ids = inputs[\"input_ids\"]\n del inputs[\"input_ids\"]\n else:\n encoder_input_ids = inputs[\"input_ids\"]\n decoder_input_ids = inputs.get(\"decoder_input_ids\", encoder_input_ids)\n del inputs[\"input_ids\"]\n inputs.pop(\"decoder_input_ids\", None)\n\n wte = model.get_input_embeddings()\n if not self.is_encoder_decoder:\n inputs[\"inputs_embeds\"] = self._get_embeds(wte, input_ids)\n else:\n inputs[\"inputs_embeds\"] = self._get_embeds(wte, encoder_input_ids)\n inputs[\"decoder_inputs_embeds\"] = self._get_embeds(wte, decoder_input_ids)\n\n model(inputs)\n\n def test_resize_token_embeddings(self):\n if not self.test_resize_embeddings:\n return\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n INPUT_SHAPE = [1, 10, config.hidden_size]\n for model_class in self.all_model_classes:\n for size in [config.vocab_size - 10, config.vocab_size + 10, None]:\n # build the embeddings\n model = model_class(config=config)\n emb_old = model.get_input_embeddings()\n emb_old.build(INPUT_SHAPE)\n # reshape the embeddings\n new_embeddings = model._get_resized_embeddings(emb_old, size)\n # # check that the resized embeddings size matches the desired size.\n assert_size = size if size is not None else config.vocab_size\n self.assertEqual(new_embeddings.shape[0], assert_size)\n # check that weights remain the same after resizing\n emd_old_weights = model._get_word_embeddings(emb_old)\n models_equal = True\n for p1, p2 in zip(emd_old_weights.numpy(), new_embeddings.numpy()):\n if np.sum(abs(p1 - p2)) > 0:\n models_equal = False\n self.assertTrue(models_equal)\n\n def test_lm_head_model_random_no_beam_search_generate(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n input_ids = inputs_dict[\"input_ids\"] if \"input_ids\" in inputs_dict else inputs_dict[\"inputs\"]\n\n # iterate over all generative models\n for model_class in self.all_generative_model_classes:\n model = model_class(config)\n\n if config.bos_token_id is None:\n # if bos token id is not defined mobel needs input_ids\n with self.assertRaises(AssertionError):\n model.generate(do_sample=True, max_length=5)\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(input_ids, do_sample=True))\n else:\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(do_sample=True, max_length=5))\n\n with self.assertRaises(AssertionError):\n # generating multiple sequences when no beam search generation\n # is not allowed as it would always generate the same sequences\n model.generate(input_ids, do_sample=False, num_return_sequences=2)\n\n # num_return_sequences > 1, sample\n self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))\n\n # check bad words tokens language generation\n # create list of 1-seq bad token and list of 2-seq of bad tokens\n bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]\n output_tokens = model.generate(\n input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2\n )\n # only count generated tokens\n generated_ids = output_tokens[:, input_ids.shape[-1] :]\n self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))\n\n def test_lm_head_model_random_beam_search_generate(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n input_ids = inputs_dict[\"input_ids\"] if \"input_ids\" in inputs_dict else inputs_dict[\"inputs\"]\n\n for model_class in self.all_generative_model_classes:\n model = model_class(config)\n\n if config.bos_token_id is None:\n # if bos token id is not defined mobel needs input_ids, num_return_sequences = 1\n self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))\n else:\n # num_return_sequences = 1\n self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))\n\n with self.assertRaises(AssertionError):\n # generating more sequences than having beams leads is not possible\n model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)\n\n # num_return_sequences > 1, sample\n self._check_generated_ids(\n model.generate(\n input_ids,\n do_sample=True,\n num_beams=2,\n num_return_sequences=2,\n )\n )\n # num_return_sequences > 1, greedy\n self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))\n\n # check bad words tokens language generation\n # create list of 1-seq bad token and list of 2-seq of bad tokens\n bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]\n output_tokens = model.generate(\n input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2\n )\n # only count generated tokens\n generated_ids = output_tokens[:, input_ids.shape[-1] :]\n self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))\n\n def test_loss_computation(self):\n config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()\n for model_class in self.all_model_classes:\n model = model_class(config)\n if getattr(model, \"compute_loss\", None):\n # The number of elements in the loss should be the same as the number of elements in the label\n prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)\n added_label = prepared_for_class[list(prepared_for_class.keys() - inputs_dict.keys())[0]]\n loss_size = tf.size(added_label)\n\n if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():\n # if loss is causal lm loss, labels are shift, so that one label per batch\n # is cut\n loss_size = loss_size - self.model_tester.batch_size\n\n # Test that model correctly compute the loss with kwargs\n prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)\n input_ids = prepared_for_class.pop(\"input_ids\")\n\n loss = model(input_ids, **prepared_for_class)[0]\n self.assertEqual(loss.shape, [loss_size])\n\n # Test that model correctly compute the loss with a dict\n prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)\n loss = model(prepared_for_class)[0]\n self.assertEqual(loss.shape, [loss_size])\n\n # Test that model correctly compute the loss with a tuple\n prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)\n\n # Get keys that were added with the _prepare_for_class function\n label_keys = prepared_for_class.keys() - inputs_dict.keys()\n signature = inspect.getfullargspec(model.call)[0]\n\n # Create a dictionary holding the location of the tensors in the tuple\n tuple_index_mapping = {1: \"input_ids\"}\n for label_key in label_keys:\n label_key_index = signature.index(label_key)\n tuple_index_mapping[label_key_index] = label_key\n sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())\n\n # Initialize a list with None, update the values and convert to a tuple\n list_input = [None] * sorted_tuple_index_mapping[-1][0]\n for index, value in sorted_tuple_index_mapping:\n list_input[index - 1] = prepared_for_class[value]\n tuple_input = tuple(list_input)\n\n # Send to model\n loss = model(tuple_input)[0]\n self.assertEqual(loss.shape, [loss_size])\n\n def _generate_random_bad_tokens(self, num_bad_tokens, model):\n # special tokens cannot be bad tokens\n special_tokens = []\n if model.config.bos_token_id is not None:\n special_tokens.append(model.config.bos_token_id)\n if model.config.pad_token_id is not None:\n special_tokens.append(model.config.pad_token_id)\n if model.config.eos_token_id is not None:\n special_tokens.append(model.config.eos_token_id)\n\n # create random bad tokens that are not special tokens\n bad_tokens = []\n while len(bad_tokens) < num_bad_tokens:\n token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]\n if token not in special_tokens:\n bad_tokens.append(token)\n return bad_tokens\n\n def _check_generated_ids(self, output_ids):\n for token_id in output_ids[0].numpy().tolist():\n self.assertGreaterEqual(token_id, 0)\n self.assertLess(token_id, self.model_tester.vocab_size)\n\n def _check_match_tokens(self, generated_ids, bad_words_ids):\n # for all bad word tokens\n for bad_word_ids in bad_words_ids:\n # for all slices in batch\n for generated_ids_slice in generated_ids:\n # for all word idx\n for i in range(len(bad_word_ids), len(generated_ids_slice)):\n # if tokens match\n if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:\n return True\n return False\n\n\ndef ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):\n \"\"\"Creates a random int32 tensor of the shape within the vocab size.\"\"\"\n if rng is None:\n rng = random.Random()\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.randint(0, vocab_size - 1))\n\n output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)\n\n return output\n\n\n@require_tf\nclass UtilsFunctionsTest(unittest.TestCase):\n\n # tests whether the top_k_top_p_filtering function behaves as expected\n def test_top_k_top_p_filtering(self):\n logits = tf.convert_to_tensor(\n [\n [\n 8.2220991, # 3rd highest value; idx. 0\n -0.5620044,\n 5.23229752,\n 4.0386393,\n -6.8798378,\n -0.54785802,\n -3.2012153,\n 2.92777176,\n 1.88171953,\n 7.35341276, # 5th highest value; idx. 9\n 8.43207833, # 2nd highest value; idx. 10\n -9.85711836,\n -5.96209236,\n -1.13039161,\n -7.1115294,\n -0.8369633,\n -5.3186408,\n 7.06427407,\n 0.81369344,\n -0.82023817,\n -5.9179796,\n 0.58813443,\n -6.99778438,\n 4.71551189,\n -0.18771637,\n 7.44020759, # 4th highest value; idx. 25\n 9.38450987, # 1st highest value; idx. 26\n 2.12662941,\n -9.32562038,\n 2.35652522,\n ], # cummulative prob of 5 highest values <= 0.6\n [\n 0.58425518,\n 4.53139238,\n -5.57510464,\n -6.28030699,\n -7.19529503,\n -4.02122551,\n 1.39337037,\n -6.06707057,\n 1.59480517,\n -9.643119,\n 0.03907799,\n 0.67231762,\n -8.88206726,\n 6.27115922, # 4th highest value; idx. 13\n 2.28520723,\n 4.82767506,\n 4.30421368,\n 8.8275313, # 2nd highest value; idx. 17\n 5.44029958, # 5th highest value; idx. 18\n -4.4735794,\n 7.38579536, # 3rd highest value; idx. 20\n -2.91051663,\n 2.61946077,\n -2.5674762,\n -9.48959302,\n -4.02922645,\n -1.35416918,\n 9.67702323, # 1st highest value; idx. 27\n -5.89478553,\n 1.85370467,\n ], # cummulative prob of 5 highest values <= 0.6\n ],\n dtype=tf.float32,\n )\n\n non_inf_expected_idx = tf.convert_to_tensor(\n [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],\n dtype=tf.int32,\n ) # expected non filtered idx as noted above\n\n non_inf_expected_output = tf.convert_to_tensor(\n [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],\n dtype=tf.float32,\n ) # expected non filtered values as noted above\n\n output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)\n\n non_inf_output = output[output != -float(\"inf\")]\n non_inf_idx = tf.cast(\n tf.where(tf.not_equal(output, tf.constant(-float(\"inf\"), dtype=tf.float32))),\n dtype=tf.int32,\n )\n\n tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)\n tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.keras.models.load_model",
"tensorflow.zeros",
"tensorflow.equal",
"tensorflow.debugging.assert_near",
"tensorflow.config.list_physical_devices",
"torch.no_grad",
"numpy.random.randint",
"tensorflow.keras.Input",
"tensorflow.saved_model.save",
"tensorflow.debugging.assert_equal",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.keras.models.Model",
"numpy.isnan",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Model",
"tensorflow.size",
"tensorflow.constant",
"numpy.abs",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.ones",
"tensorflow.config.experimental.VirtualDeviceConfiguration",
"tensorflow.expand_dims",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
sgarofoli/tf-quant-finance | [
"0dafa7379100b343e22ef2d4185e442f8520f8a6"
] | [
"tf_quant_finance/math/random_ops/stateless.py"
] | [
"# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python2, python3\n\"\"\"Stateless random ops.\n\nImplement some of the stateless ops, which produce random numbers as a\ndeterministic function of seed.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\ndef stateless_random_shuffle(input_tensor, seed, name=None):\n \"\"\"Produces stateless random shuffle of the 1st dimension of an input Tensor.\n\n This is a stateless version of `tf.random_shuffle`. If run twice with the same\n seed, produces the same result.\n\n Example\n ```python\n identity_shuffle = tf.range(100)\n random_shuffle = stateless_random_shuffle(identity_shuffle, seed=(42, 2))\n ```\n\n Args:\n input_tensor: float32, float64, int32 or int64 1-D Tensor.\n seed: int32 or int64 Tensor of shape [2].\n name: Python `str` name prefixed to ops created by this function.\n\n Returns:\n A Tensor of the same shape and dtype as `input_tensor`.\n \"\"\"\n with tf.compat.v1.name_scope(name,\n default_name='stateless_random_shuffle',\n values=[input_tensor, seed]):\n input_tensor = tf.convert_to_tensor(input_tensor, name='input_tensor')\n seed = tf.convert_to_tensor(seed, name='random_seed')\n uniforms = tf.random.stateless_uniform(\n shape=[tf.shape(input_tensor)[0]], seed=seed, dtype=tf.float64)\n return tf.gather(input_tensor, tf.argsort(uniforms, stable=True, axis=0))\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.argsort",
"tensorflow.shape",
"tensorflow.compat.v1.name_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PranithChowdary/DataQ | [
"1070038b14714156c4a9a7c06f606e155d6272b1"
] | [
"downloader.py"
] | [
"import pandas as pd\nfrom io import BytesIO\nimport base64\n\n\ndef to_excel(df):\n output = BytesIO()\n writer = pd.ExcelWriter(output, engine='xlsxwriter')\n df.to_excel(writer, index = False)\n writer.save()\n processed_data = output.getvalue()\n return processed_data\n\n\ndef get_table_download_link(df):\n \"\"\"Generates a link allowing the data in a given panda dataframe to be downloaded\n in: dataframe\n out: href string\n \"\"\"\n val = to_excel(df)\n return val\n"
] | [
[
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
NKrvavica/fqs | [
"d95d684d867dcb89d0a3853569d12f1f955f1d5d"
] | [
"test_quadratic_roots.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 3 11:14:52 2019\n\n@author: NKrvavica\n\"\"\"\n\nimport timeit\nimport numpy as np\nimport fqs\n\n\ndef eig_roots(p):\n '''Finds cubic roots via numerical eigenvalue solver\n `npumpy.linalg.eigvals` from a 3x3 companion matrix'''\n a, b = (p[:, 1]/p[:, 0], p[:, 2]/p[:, 0])\n N = len(a)\n A = np.zeros((N, 2, 2))\n A[:, 1, 0] = 1\n A[:, :, 1] = - np.array([b, a]).T\n roots = np.linalg.eigvals(A)\n return roots\n\n\n\n# --------------------------------------------------------------------------- #\n# Test speed of fqs cubic solver compared to np.roots and np.linalg.eigvals\n# --------------------------------------------------------------------------- #\n\n# Number of samples (sets of randomly generated cubic coefficients)\nN = 1000\n\n# Generate polynomial coefficients\nrange_coeff = 100\np = np.random.rand(N, 3)*(range_coeff) - range_coeff/2\n\n# number of runs\nruns = 5\n\nbest_time = 100\nfor i in range(runs):\n start = timeit.default_timer()\n roots1 = [np.roots(pi) for pi in p]\n stop = timeit.default_timer()\n time = stop - start\n best_time = min(best_time, time)\nprint('np.roots: {:.3f} ms (best of {} runs)'\n .format(best_time*1_000, runs))\n\nbest_time = 100\nfor i in range(runs):\n start = timeit.default_timer()\n roots2 = eig_roots(p)\n stop = timeit.default_timer()\n time = stop - start\n best_time = min(best_time, time)\nprint('np.linalg.eigvals: {:.3f} ms (best of {} runs)'\n .format(best_time*1_000, runs))\nprint('max err: {:.2e}'.format(abs(np.sort(roots2, axis=1)\n - (np.sort(roots1, axis=1))).max()))\n\nbest_time = 100\nfor i in range(runs):\n start = timeit.default_timer()\n roots3 = [fqs.solve_single_quadratic(*pi) for pi in p]\n stop = timeit.default_timer()\n time = stop - start\n best_time = min(best_time, time)\nprint('fqs.solve_single_quadratic: {:.3f} ms (best of {} runs)'\n .format(best_time*1_000, runs))\nprint('max err: {:.2e}'.format(abs(np.sort(roots3, axis=1)\n - (np.sort(roots1, axis=1))).max()))\n\nbest_time = 100\nfor i in range(runs):\n start = timeit.default_timer()\n roots = fqs.solve_multi_quadratic(*p.T)\n roots4 = np.array(roots).T\n stop = timeit.default_timer()\n time = stop - start\n best_time = min(best_time, time)\nprint('fqs.solve_multi_quadratic: {:.3f} ms (best of {} runs)'\n .format(best_time*1_000, runs))\nprint('max err: {:.2e}'.format(abs(np.sort(roots4, axis=1)\n - (np.sort(roots1, axis=1))).max()))\n# --------------------------------------------------------------------------- #\n"
] | [
[
"numpy.linalg.eigvals",
"numpy.sort",
"numpy.roots",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tamasorosz/artap | [
"e8df160bfc9c378c3fc96b0b86e92d75d89cf26b"
] | [
"examples/mechanical_design/gear_design.py"
] | [
"from artap.problem import Problem\nfrom artap.algorithm_genetic import NSGAII\nfrom artap.results import Results\n\nimport matplotlib.pyplot as plt\n\n\nclass GearDesignProblem(Problem):\n \"\"\"\n Example from K.DEb Multi-objective evolutionary optimization problems, Wiley, 2001.\n pp 434.\n\n The objective of the task is to find the optimal turn numbers of a gearbox gear's.\n The gearbox contains for gears, the required gear ratio is 1/6.931, the number of\n the teeth must be integer numbers, all of these four variables must to be integers.\n\n The variable vector: x = (x1, x2, x3, x4) = (Td, Tb, Ta, Tf)\n\n The problem can be formulated as a two variable optimization function:\n\n The first goal function's role is to minimize the error between the obtained and\n the realized gear ratio:\n\n Minimize f1 = (1./6.931 - x1*x2/(x3*x4))**2.\n\n where x is the solution vector, contains the number of the teeths, these numbers\n are strictly integers\n\n Minimize f2 = max(x1, x2, x3, x4)\n\n subject to\n\n x1 e [12, 60] strictly integer\n x2 e [12, 60] strictly integer\n x3 e [12, 60] strictly integer\n x4 e [12, 60] strictly integer\n\n The two extremal solution from [DEB]:\n\n - solution E : x = (12, 12, 27, 37); Error f1 = 1.83e-8; Max teeth number = 37\n - solution D : x = (12, 12, 13, 13); Error f1 = 5.01e-1; Max teeth number = 13\n\n References:\n\n .: [DEB] Deb, K. (2001). Multi-objective optimization using evolutionary algorithms (Vol. 16). John Wiley & Sons.\n \"\"\"\n def set(self):\n\n # Not mandatory to give a name for the test problem\n self.name = 'Gear Design'\n\n # Defines x_1 and x_2, which are the optimized parameters\n # and the bounds 'defines' the constraints of the optimization problem\n # nsga -- ii algorithm doesn't need an initial value for the definition\n self.parameters = [{'name':'x1', 'bounds': [12, 60], 'parameter_type':'integer'},\n {'name':'x2', 'bounds': [12, 60], 'parameter_type':'integer'},\n {'name':'x3', 'bounds': [12, 60], 'parameter_type':'integer'},\n {'name':'x4', 'bounds': [12, 60], 'parameter_type':'integer'}]\n\n # The two, separate optimization functions and the direction of the optimization\n # is set to minimization. It is also possible to use the maximize keyword.\n self.costs = [{'name': 'f_1', 'criteria': 'minimize'},\n {'name': 'f_2', 'criteria': 'minimize'}]\n\n def evaluate(self, x):\n f1 = (1./6.931 - (x.vector[0]*x.vector[1])/(x.vector[2]*x.vector[3]))**2.\n f2 = max(x.vector)\n return [f1, f2]\n\n\n# Initialization of the problem\nproblem = GearDesignProblem()\n\n# Perform the optimization iterating over 100 times on 100 individuals.\nalgorithm = NSGAII(problem)\nalgorithm.options['max_population_number'] = 100\nalgorithm.options['max_population_size'] = 100\nalgorithm.run()\n\n# Post - processing the results\n# reads in the result values into the b, results class\nb = Results(problem)\n# finding the pareto values\nsolution = b.pareto_values()\n\nprint(solution)\n\n# Plotting out the resulting hyperbola with matplotlib\nplt.scatter([s[0] for s in solution],\n [s[1] for s in solution])\n\nplt.xlabel(\"$f_1(x)$ - Error\")\nplt.ylabel(\"$f_2(x)$ - Maximum size\")\n\nplt.show()\n"
] | [
[
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jordanopensource/arabic-ocr-studygroup | [
"3a39593ec28976c4209c813c87d0f37db72dcc03"
] | [
"starting-code.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef to_categorical (y, num_classes=None):\n y = np.array(y, dtype='int').ravel()\n if not num_classes: num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes))\n categorical[np.arange(n), y] = 1\n return categorical\n\n\ndef load_datasets (files_path: str):\n\tfrom pandas import read_csv\n\ttrain_x = read_csv(\"{}csvTrainImages 13440x1024.csv\".format(files_path), header=None).values.astype('float32').reshape([-1, 32, 32, 1]) / 255.0\n\ttrain_y = read_csv(\"{}csvTrainLabel 13440x1.csv\".format(files_path), header=None).values.astype('int32') - 1\n\ttest_x = read_csv(\"{}csvTestImages 3360x1024.csv\".format(files_path), header=None).values.astype('float32').reshape([-1, 32, 32, 1]) / 255.0\n\ttest_y = read_csv(\"{}csvTestLabel 3360x1.csv\".format(files_path), header=None).values.astype('int32') - 1\n\treturn train_x, train_y, test_x, test_y\n\n\ndef accuracy (y_true: np.ndarray, y_pred: np.ndarray):\n\t# ======= Just some sanity checks ========\n\tassert isinstance(y_true, np.ndarray)\n\tassert isinstance(y_pred, np.ndarray)\n\tassert len(y_true.shape) == 2\n\tassert len(y_pred.shape) == 2\n\tassert y_true.shape[0] == 28\n\tassert y_pred.shape[0] == 28\n\tassert y_true.shape[1] == y_pred.shape[1]\n\t# ======= All systems are go! ============\n\n\tresults = np.zeros((28,))\n\tfor i in range(y_true.shape[1]):\n\t\tresults += np.logical_and(y_true[:, i] > 0.5, y_pred[:, i] > 0.5)\n\n\tsummation = np.sum(y_true, axis=1)\n\tassert summation.shape[0] == 28\n\n\taccuracy_per_class = (results / summation) * 100.0\n\toverall_accuracy = np.mean(accuracy_per_class)\n\n\treturn overall_accuracy, accuracy_per_class\n\n\ndef plot_randomly (training_set: np.ndarray, training_labels: np.ndarray, testing_set: np.ndarray, testing_labels: np.ndarray):\n\tfrom random import randint\n\n\tarabic_labels = [\n\t\t'alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',\n\t\t'reh', 'zah', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',\n\t\t'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh',\n\t]\n\n\tf, axarr = plt.subplots(4, 4)\n\tsubplots = []\n\tfor i in range(axarr.shape[0]):\n\t\tfor j in range(axarr.shape[1]):\n\t\t\tsubplots.append(axarr[i, j])\n\n\tfor sp_index, subplot in enumerate(subplots):\n\t\tif sp_index < int(len(subplots) / 2):\n\t\t\tsubplot_dataset = \"training\"\n\t\t\trandom_index = randint(0, training_set.shape[0] - 1)\n\t\t\tsubplot_image = training_set[random_index]\n\t\t\tsubplot_class = training_labels[random_index][0]\n\t\telse:\n\t\t\tsubplot_dataset = \"testing\"\n\t\t\trandom_index = randint(0, testing_set.shape[0] - 1)\n\t\t\tsubplot_image = testing_set[random_index]\n\t\t\tsubplot_class = testing_labels[random_index][0]\n\n\t\tsubplot_title = \"Image #{}, {} set, class {}\".format(random_index, subplot_dataset, arabic_labels[subplot_class])\n\t\tsubplot.imshow(subplot_image.squeeze().T, cmap='gray')\n\t\tsubplot.axis('off')\n\t\tsubplot.set_title(subplot_title, size=8)\n\n\tplt.show()\n\n\ndef train_model (training_set: np.ndarray, training_labels: np.ndarray, testing_set: np.ndarray, testing_labels: np.ndarray):\n\tpass\n\n\nif __name__ == \"__main__\":\n\t'''\n\tWelcome to the JOSA Deep Learning study groups first homework! In this homework, we'll be\n\tclassifying handwritten Arabic letters (all 28 of them) using whatever neural network you want to build.\n\n\tStart by loading in your training + testing datasets using the load_datasets function. The\n\tload_datasets function takes a path as its argument. In the folder located by the path, Python should find\n\tfour CSV files with their original names.\n\t'''\n\ttrain, train_labels, test, test_labels = load_datasets(\"path/to/your/datasets\")\n\n\t'''\n\tAlways make sure the shape of your dataset is correct! Your output should look like this:\n\n\t\t(13440, 32, 32, 1)\n\t\t(13440, 1)\n\t\t(3360, 32, 32, 1)\n\t\t(3360, 1)\n\t'''\n\tprint(*[x.shape for x in [train, train_labels, test, test_labels]], sep=\"\\n\")\n\n\t'''\n\tWhat does our dataset look like? The plot_randomly function will do some matplotlib magic to randomly\n\tdisplay 4 images from the training set, and 4 images from the testing set. Above each image, you will\n\tsee the index of that image, the set it was taken from (training or testing), and most importantly the\n\tclass of the set. Our alphabet will be sorted by the \"Hijā’ī\" sorting, so \"alef\" will come first and\n\t\"yeh\" will come last.\n\t'''\n\tplot_randomly(train, train_labels, test, test_labels)\n\n\t'''\n\tFor reference, here's the naming we will use for each letter. The class numbers are therefore the\n\tindices of this list. For example, \"alef\" belongs to class 0, and \"yeh\" belongs to class 27.\n\t'''\n\tarabic_labels = [\n\t\t'alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',\n\t\t'reh', 'zah', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',\n\t\t'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh',\n\t]\n\n\t'''\n\tMore basic exploratory analysis of our datasets. Let's count the number of unique train/test labels. We\n\tshould get exactly 28. This is useful later when we build the output layer of our neural network.\n\t'''\n\tunique_classes = list(set(train_labels.squeeze().tolist() + test_labels.squeeze().tolist()))\n\tnumber_of_outputs = len(unique_classes)\n\tassert number_of_outputs == 28\n\n\t'''\n\tOne last important step: converting our output into a big binary vector. Remember that for classification\n\tproblems, we will output the probability that an input belongs to one specific class. When we have multiple\n\tclasses, we need an output for each class probability. The to_categorical function simply takes each\n\tlabel and converts it into a binary vector, something like [0, 0, ... 1, 0, 0]. The 1 in that vector is in\n\tthe same index as the corresponding label is in the arabic_labels list. You can check that out for yourself\n\tby printing train_labels[0].\n\t'''\n\ttrain_labels, test_labels = to_categorical(train_labels, number_of_outputs), to_categorical(test_labels, number_of_outputs)\n\n\t'''\n\tShow me what you got! I've left the train_model function empty for you, so you can experiment with\n\tthe train set, build your neural network, and try it out on the test set. Our only condition is that you\n\tuse the accuracy(y_true, y_pred) function to measure your accuracy on the train/test sets. Check that function\n\tout to see what it takes as input and what it returns as output. Happy coding!\n\t'''\n\ttrain_model(train, train_labels, test, test_labels)\n"
] | [
[
"numpy.logical_and",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jesperiksson/SoccermaticsForPython | [
"aeb6cdfd4dfd0acfc15e0d47024693c01ec241d8"
] | [
"classes.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 24 15:00:30 2020\n\n@author: jesper\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport seaborn as sn\n\nclass Table():\n # Makes a table at the end of a season\n def __init__(self):\n self.table = pd.DataFrame( # Initiate an empty table\n columns = ['Team','Points','Win','Draw','Lose','Goals for','Goals against','Goal difference']\n )\n def add_numbers(self,team_list): # Is called from the simulate_season method of the Stats class\n for i in range(len(team_list)):\n t = team_list[i]\n self.table = self.table.append(\n pd.DataFrame(\n [[t.name,(t.wins*3+t.draws*1),t.wins,t.draws,t.losses,\n t.goals_for,t.goals_against,(t.goals_for-t.goals_against)]],\n columns= ['Team','Points','Win','Draw','Lose','Goals for','Goals against','Goal difference']\n )\n )\n self.table = self.table.sort_values(by='Points',ascending=False)\n self.table.index = range(1,len(self.table)+1)\n \n def show_table(self):\n return self.table\n \nclass Team():\n # Team objects which populate the Table\n def __init__(self,name):\n self.name = name\n self.wins = 0\n self.draws = 0\n self.losses = 0\n self.goals_for = 0\n self.goals_against = 0\n \n def add_result(self,scored,conceded):\n if scored > conceded: # win\n self.wins += 1\n elif scored == conceded: # draw\n self.draws += 1\n else: # loss\n self.losses +=1 \n self.goals_for += scored\n self.goals_against += conceded\n\nclass Stats(): # raw data, teams, colors, parameters, etc. \n def __init__(self,df):\n self.df = df\n self.team_colors = {'Arsenal':'#ef0107', 'Aston Villa':'#95bfe5', 'Bournemouth':'#da291c', 'Brighton':'#0057b8',\n 'Burnley':'#6c1d45', 'Chelsea':'#034694', 'Crystal Palace':'#1b458f', 'Everton':'#003399',\n 'Leicester':'#003090', 'Liverpool':'#c8102e', 'Man City':'#6cabdd', 'Man United':'#da291c',\n 'Newcastle':'#241f20', 'Norwich':'#fff200', 'Sheffield United':'#ee2737', \n 'Southampton':'#d71920', 'Tottenham':'#132257', 'Watford':'#fbee23', 'West Ham':'#7a263a',\n 'Wolves':'#fdb913'}\n#https://towardsdatascience.com/visualizing-the-2019-20-english-premier-league-season-with-matplotlib-and-pandas-fd491a07cfda \n \n self.teams =list(set(df['HomeTeam'])) \n self.home_teams = list(df['HomeTeam'])\n self.away_teams = list(df['AwayTeam'])\n expected_values = pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded']) # initiate empty DataFrame\n # Naive approach, each team has a offense and a defense expected value\n # Generates a DataFrame with teams and their excpected values\n for i in range(len(self.teams)):\n avg_score = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTHG']) + np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTAG']))/(len(df)/len(self.teams)*2)\n avg_letin = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTAG']) + np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTHG']))/(len(df)/len(self.teams)*2)\n expected_values = expected_values.append( # Populate the DataFrame\n pd.DataFrame(\n [[self.teams[i],avg_score,avg_letin]], columns= ['Team','ExpectedScored','ExpectedConceded']\n )\n )\n expected_values.index = range(1,len(self.teams)+1)\n self.expected_values = expected_values # The input values for the naive approach\n \n # Including home advantage, each team has two home and away parameters\n # Generates a DataFrame with teams and their excpected values\n expected_values_home = pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded'])\n expected_values_away = pd.DataFrame(columns = ['Team','ExpectedScored','ExpectedConceded'])\n for i in range(len(self.teams)):\n avg_score_home = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTHG']))/(len(df)/len(self.teams))\n avg_letin_home = (np.sum(df.loc[df['HomeTeam'] == self.teams[i]]['FTAG']))/(len(df)/len(self.teams))\n avg_score_away = (np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTAG']))/(len(df)/len(self.teams))\n avg_letin_away = (np.sum(df.loc[df['AwayTeam'] == self.teams[i]]['FTHG']))/(len(df)/len(self.teams))\n expected_values_home = expected_values_home.append(\n pd.DataFrame(\n [[self.teams[i],avg_score_home,avg_letin_home]], columns = ['Team','ExpectedScored','ExpectedConceded'])\n )\n expected_values_away = expected_values_away.append(\n pd.DataFrame(\n [[self.teams[i],avg_score_away,avg_letin_away]], columns = ['Team','ExpectedScored','ExpectedConceded'])\n )\n expected_values_home.index = range(1,len(self.teams)+1)\n expected_values_away.index = range(1,len(self.teams)+1)\n self.expected_values_home = expected_values_home # The input values when \n self.expected_values_away = expected_values_away # considering home advantage\n\n def simulate_game_poisson(self,home_expected_scored, home_expected_conceded, away_expected_scored, away_expected_conceded):\n # Simple model to predict the result using poisson distribution\n home_expected = (home_expected_scored + away_expected_conceded)/2\n away_expected = (away_expected_scored + home_expected_conceded)/2\n home_goals = np.random.poisson(home_expected)\n away_goals = np.random.poisson(away_expected) \n return home_goals, away_goals\n \n\n \n def simulate_season(self):\n # A single season\n team_dict = {} # Using a dictionary to keep track of the Team instances\n for i in range(len(self.teams)):\n team_dict.update({\n self.teams[i] : Team(self.teams[i])})\n for i in range(len(self.df)):\n home_team = self.home_teams[i]\n away_team = self.away_teams[i]\n home_goals, away_goals = self.simulate_game_poisson(\n float(self.expected_values.loc[self.expected_values['Team'] == home_team]['ExpectedScored']),\n float(self.expected_values.loc[self.expected_values['Team'] == home_team]['ExpectedConceded']),\n float(self.expected_values.loc[self.expected_values['Team'] == away_team]['ExpectedScored']),\n float(self.expected_values.loc[self.expected_values['Team'] == away_team]['ExpectedConceded'])\n )\n team_dict[home_team].add_result(home_goals,away_goals)\n team_dict[away_team].add_result(away_goals,home_goals) \n table = Table()\n table.add_numbers(list(team_dict.values()))\n return table\n \n def simulate_season_homeaway(self): # Same as above but considering home advantage and away disadvantage\n team_dict = {}\n for i in range(len(self.teams)):\n team_dict.update({\n self.teams[i] : Team(self.teams[i])})\n for i in range(len(self.df)):\n home_team = self.home_teams[i]\n away_team = self.away_teams[i]\n home_goals, away_goals = self.simulate_game_poisson(\n float(self.expected_values_home.loc[self.expected_values_home['Team'] == home_team]['ExpectedScored']),\n float(self.expected_values_home.loc[self.expected_values_home['Team'] == home_team]['ExpectedConceded']),\n float(self.expected_values_away.loc[self.expected_values_away['Team'] == away_team]['ExpectedScored']),\n float(self.expected_values_away.loc[self.expected_values_away['Team'] == away_team]['ExpectedConceded'])\n )\n team_dict[home_team].add_result(home_goals,away_goals)\n team_dict[away_team].add_result(away_goals,home_goals) \n table = Table()\n table.add_numbers(list(team_dict.values())) \n return table\n \n def poisson_regression(self):\n # TBI, using a regression model from some module and compare it to my results\n pass\n\nclass Simulation(Stats): # Same as Stats but for simulating several seasons\n def __init__(self,df,n,team_of_interest = 'Liverpool'):\n super().__init__(df)\n self.n_seasons = n # Number of seasons to simulate\n self.team_of_interest = team_of_interest\n \n def simulate_seasons(self):\n season_list = np.array([])\n for i in range(self.n_seasons): # building a list of all seasons\n season_list = np.append(season_list,self.simulate_season())\n self.season_list = season_list # Last season is stored \n \n def simulate_seasons_homeaway(self):\n season_list = np.array([])\n for i in range(self.n_seasons): # building a list of all seasons\n season_list = np.append(season_list,self.simulate_season_homeaway())\n self.season_list_homeaway = season_list\n \n def plot_hist(self):\n #Plot histogram with selected teams positions\n fig = plt.figure(dpi=400)\n ax = fig.add_subplot(111)\n cut_off = 6 # The lowest position for which to extend the histogram\n # teams = list(self.team_colors.keys()) # to use all teams\n teams = ['Man City','Liverpool','Arsenal','Chelsea','Man United'] # to use selected teams\n places = np.array([])\n for team in teams:\n try: # The main case\n places = np.concatenate((places, # store the positions for each team for each season\n [self.season_list[season].table.loc[self.season_list[season].table['Team'] == team].index for season in range(len(self.season_list))]),\n axis = -1)\n except ValueError: # accounting for the edge case where there is no season list\n places = np.array(\n [self.season_list[season].table.loc[self.season_list[season].table['Team'] == team].index for season in range(len(self.season_list))])\n \n plt.hist(places, \n bins = np.arange(1, cut_off + 1.5) - 0.5, # Putting the bins over the xtick \n histtype = 'bar',\n color = [self.team_colors[t] for t in teams], # colors of selected teams\n ec = 'k', # Edgecolor \n alpha = 0.9, \n zorder = 2)\n plt.xticks(range(int(np.min(places)),int(np.max(places)+1)))\n plt.xlabel('Position')\n plt.ylabel('Frequency')\n plt.title('End of season placement distribution over ' + str(len(self.season_list))+' seasons')\n plt.legend(teams)\n ax.set_facecolor('lightgray')\n ax.grid(color = 'white',linewidth = 0.2,zorder = 1)\n plt.show()\n \n def calc_freq(self):\n # Calculate frequency of each final position for each team\n try: # If there is a simulation with results that can be counted\n teams = self.teams\n places = np.array([])\n for team in teams:\n try: # This is redundant \n places = np.concatenate((places,\n [self.season_list[season].table.loc[self.season_list[season].table['Team'] == team].index for season in range(len(self.season_list))]),\n axis = -1)\n except ValueError:\n places = np.array(\n [self.season_list[season].table.loc[self.season_list[season].table['Team'] == team].index for season in range(len(self.season_list))])\n \n freq = np.array([sum(places[:,t]==place+1) for t in range(len(teams)) for place in range(len(teams))])\n freq = freq.reshape((len(teams),len(teams))) # rows : teams, cols : places\n self.freq = freq\n except NameError:\n print('No simulation has been ran')\n\n \n def plot_pie(self,rows = 4,cols = 5): # Draw a pie chart for the distribution for each place\n def my_autopct(pct): # Utility function for plot_pie\n return ('%.2f' % pct) if pct > 5 else ''\n n_places = cols*rows\n for i in range(n_places):\n plt.figure(dpi=400)\n #ax = fig.add_subplot(rows,cols,i+1)\n plt.pie(\n self.freq[:,i],\n explode = [0.1 if t == self.team_of_interest else 0 for t in self.teams],\n labels = list(self.team_colors.keys()),\n colors = list(self.team_colors.values()),\n autopct=my_autopct, # Draw percentage\n labeldistance = 1,\n rotatelabels=True,\n radius=1.5)\n plt.tight_layout\n plt.title(\n 'Proportion of the times each team ended up in position '+str(i+1),\n loc = 'center',\n pad = 100)\n plt.show()\n \n def plot_position_crosstab(self): # Plots the heatmap \n fig = plt.figure(dpi=400)\n fig.set_size_inches(8,8)\n crosstab = pd.DataFrame(\n self.freq,\n columns=range(1,21),\n index = self.teams)\n crosstab = crosstab.sort_values(\n by=[crosstab.columns[i] for i in range(len(crosstab.columns))],\n ascending=[False]*len(crosstab.columns))\n self.crosstab = crosstab # If you want to do some statistics\n crosstab_percent = crosstab.div(np.ones((len(crosstab.columns),len(crosstab.index)))*self.n_seasons)\n crosstab_percent = crosstab_percent.mul(np.ones((len(crosstab.columns),len(crosstab.index)))*100)\n ax = sn.heatmap( # TBD, change the annotations to percentage of all seasons\n data=crosstab_percent.T,\n norm = colors.PowerNorm(gamma=0.4), # Ḿore emphasis towards the green end\n cmap = 'RdYlGn', # Is there a better one?\n cbar = False,\n annot = True,\n fmt=\".0f\",\n xticklabels=True, \n yticklabels=True)\n ax.set_yticks = range(1,21)\n #ax.tight_layout()\n ax.plot()"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.colors.PowerNorm",
"numpy.min",
"numpy.arange",
"pandas.DataFrame",
"numpy.random.poisson",
"matplotlib.pyplot.ylabel",
"numpy.max",
"matplotlib.pyplot.xlabel",
"numpy.array",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
easai/stat | [
"b0485454889531af15073d6b654c5a8ac70e6a98"
] | [
"src/stat/variation.py"
] | [
"\"\"\"\n変動係数\nCV = Var[X]/E[X]\nscipy.stats.variation(array)\n\"\"\"\n\nfrom scipy import stats\n\nBiden=[28.2,28.3,25.6]\nSanders=[17.5,17.6,15.7]\nWarren=[21.3,20.6,22.7]\n\nprint(stats.variation(Biden))\nprint(stats.variation(Sanders))\nprint(stats.variation(Warren))\n\n\"\"\"\n0.04567194460232075\n0.051558271077593726\n0.04054427508888167\n\"\"\""
] | [
[
"scipy.stats.variation"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
rbehal/CMED-Image-Analysis | [
"6fed72a381357fd964e5f8aab05c9810c419e681"
] | [
"ImageViewer.py"
] | [
"from PyQt5.QtGui import QImage, QPixmap, QPainter\nfrom PyQt5.QtWidgets import QApplication, QWidget\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.figure import Figure\nfrom numpy import arange\n\nfrom Image import Image\nfrom ImageCollection import ImageCollection\nfrom Export import ExportThread\n\nimport os, re\n\nclass ImageViewer:\n \"\"\"Image viewer class to display an image with zoom and pan functionaities.\"\"\"\n def __init__(self, imageLabels, window):\n self.bfImages = ImageCollection(\"BF\", imageLabels[0]) # Initialize image collection for each tab respectively\n self.trImages = ImageCollection(\"TR\", imageLabels[1]) \n\n self.currImageCol = self.trImages # Current image collection\n\n self.window = window\n self.qimage_scaled = QImage() # Scaled image to fit to the size of currImageCol.qlabel\n self.qpixmap = QPixmap() # QPixmap to fill the currImageCol.qlabel\n\n self.zoomX = 1 # Zoom factor w.r.t size of currImageCol.qlabel\n self.position = [0, 0] # Position of top left corner of currImageCol.qlabel w.r.t. qimage_scaled\n self.mousex, self.mousey = 0, 0\n self.panFlag = False # To enable or disable pan\n self.pressed = False # Mouse pressed\n\n self.basePath = \"\"\n self.dayFolders = [] # If populated, folder structure is in Days\n self.isZstack = False # If True, folder structure in in Z-Stack\n self.sharpnessGraphs = [] # Sharpness graph windows for Z-Stacks\n\n self.currImage = None # Current Image object being displayed in the viewer\n self.currImageIdx = -1 # Index of current image object being displayed (in qlist and col.list)\n self.numImages = -1 # Number of images in the current list of images being displayed\n self.qImageNameItems = [] # List of qitems for image names that populate the list in the GUI\n\n self.initializeQLabels()\n\n def initializeQLabels(self):\n \"\"\"\n Each image on each tab is represented by a qlabel object. This function\n initializes all mouse events and policies for each of the qlabels.\n \"\"\"\n self.trImages.qlabel.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)\n self.trImages.qlabel.setCursor(QtCore.Qt.OpenHandCursor)\n self.bfImages.qlabel.setSizePolicy(QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Ignored)\n self.bfImages.qlabel.setCursor(QtCore.Qt.OpenHandCursor)\n\n self.trImages.qlabel.mousePressEvent = self.mousePressAction\n self.trImages.qlabel.mouseMoveEvent = self.mouseMoveAction\n self.trImages.qlabel.mouseReleaseEvent = self.mouseReleaseAction\n self.trImages.qlabel.setMouseTracking(True)\n self.bfImages.qlabel.mousePressEvent = self.mousePressAction\n self.bfImages.qlabel.mouseMoveEvent = self.mouseMoveAction\n self.bfImages.qlabel.mouseReleaseEvent = self.mouseReleaseAction\n self.bfImages.qlabel.setMouseTracking(True)\n\n def onResize(self):\n \"\"\"Things to do when image is resized\"\"\"\n # Pixmap is the basis of the viwer. It is the medium for displaying, panning, and zooming around the QImage\n self.qpixmap = QPixmap(self.currImageCol.qlabel.size())\n self.qpixmap.fill(QtCore.Qt.gray)\n self.qimage_scaled = self.qimage.scaled(self.currImageCol.qlabel.width() * self.zoomX, self.currImageCol.qlabel.height() * self.zoomX, QtCore.Qt.KeepAspectRatioByExpanding)\n self.scaleUpdate()\n\n def getImages(self, pBar):\n \"\"\"\n Initialize and populate bfImages and trImages ImageCollection.\n Args:\n pBar: Thread object used to emit signals to the progress bar\n \"\"\"\n VALID_FORMAT = ('.TIFF', '.TIF') # Image formats supported\n id_pattern = \"(p\\d{1,4})\" # Image id example: 'scan_Plate_R_{p03}_0_A02f00d4.TIF',\n zStack_pattern = r\"z(\\d{1,4}).*d(\\d)\" # Zstack image example: 'EGFP_1mm_Plate_R_p00_{z79}_0_A02f00{d4}.TIF'\n\n self.bfImages.reset()\n self.trImages.reset()\n\n # If data is timelapse divided into BF and Texas Red Folders\n if len(self.dayFolders) == 0 and not self.isZstack:\n # Initializing progress bar\n totalNumFiles = len(os.listdir(self.bfImages.path)) + len(os.listdir(self.trImages.path))\n pBar.startPbar.emit(totalNumFiles)\n\n # Populate Bright Field image list\n for file in os.listdir(self.bfImages.path):\n pBar.incrementPbar.emit()\n if file.upper().endswith(VALID_FORMAT):\n im_path = os.path.join(self.bfImages.path, file)\n\n match = re.search(id_pattern, file)\n if match:\n id_ = match.group()\n else:\n continue\n\n image_obj = Image(id_, file, \"BF\", im_path, self)\n self.bfImages.list.append(image_obj)\n\n # Populate Texas Red image list\n for file in os.listdir(self.trImages.path):\n pBar.incrementPbar.emit()\n if file.upper().endswith(VALID_FORMAT):\n im_path = os.path.join(self.trImages.path, file)\n\n match = re.search(id_pattern, file)\n id_ = match.group()\n\n image_obj = Image(id_, file, \"TR\", im_path, self)\n self.trImages.list.append(image_obj)\n elif self.isZstack: # If Z-stack folder structure\n # Initialize progress bar\n pBar.startPbar.emit(len(os.listdir(self.basePath)))\n\n for file in os.listdir(self.basePath):\n pBar.incrementPbar.emit()\n if file.upper().endswith(VALID_FORMAT):\n im_path = os.path.join(self.basePath, file)\n\n match = re.search(zStack_pattern, file)\n if not match or len(match.groups()) != 2:\n continue\n # Needs (match.groups(),) to unpack tuple properly\n for id_, type_ in (match.groups(),):\n if type_ == \"4\": # Number beside \"d\" in the image name\n image_obj = Image(id_, file, \"BF\", im_path, self)\n self.bfImages.list.append(image_obj)\n elif type_ == \"2\": # Number beside \"d\" in the image name\n image_obj = Image(id_, file, \"TR\", im_path, self)\n self.trImages.list.append(image_obj)\n else:\n continue\n else: # Or else it must be daily folders\n # Initializing progress bar\n totalNumFiles = len(self.dayFolders*3) # 3 files in every day folder\n pBar.startPbar.emit(totalNumFiles)\n\n day_file_pattern = \"_.{6}d(\\d)\" # Looks for _ followed by 6 characters + d, and then a digit after that\n\n for day_num, day_path in self.dayFolders:\n for file in os.listdir(day_path):\n pBar.incrementPbar.emit()\n\n im_path = os.path.join(day_path, file)\n # All files with day structure have p00, so in id and name it's replaced with p[day_num]\n id_ = \"p{0:0=2d}\".format(int(day_num))\n name = file.replace(\"p00\",id_)\n\n match = re.search(day_file_pattern, file)\n if match:\n groups = match.groups()[0]\n if groups == \"4\": # Number beside \"d\" in the image name\n image_obj = Image(id_, name, \"BF\", im_path, self)\n self.bfImages.list.append(image_obj)\n elif groups == \"3\": # Number beside \"d\" in the image name\n image_obj = Image(id_, name, \"TR\", im_path, self)\n self.trImages.list.append(image_obj)\n else:\n continue\n\n self.bfImages.initMap()\n self.trImages.initMap()\n return\n\n def selectDir(self):\n \"\"\"\n Select a directory, then make and initialize ImageCollections based on folder structure.\n --> 3 possible folder structures: timelapse, day folders, and z-stack\n \"\"\"\n # open 'select folder' dialog box\n self.basePath = str(QtWidgets.QFileDialog.getExistingDirectory(self.window, \"Select Directory\")) + \"/\"\n if not self.basePath:\n QtWidgets.QMessageBox.warning(self.window, 'No Folder Selected', 'Please select a valid Folder')\n return\n\n # Get array of subdirectories with formatting removed (dir_clean)\n subdirs = next(os.walk(self.basePath))[1]\n dirs = [os.path.join(self.basePath, dir_) for dir_ in subdirs]\n dir_clean = list(map(str.strip, list(map(str.upper, subdirs))))\n\n day_pattern = \"DAY(\\d{1,2})\" # Regex for finding day folders\n for i in range(len(dir_clean)):\n dir_ = dir_clean[i]\n\n # If folder is structured in terms of BF/Texas Red\n if \"BF\" == dir_:\n self.bfImages.path = dirs[i]\n if \"TEXAS RED\" == dir_:\n self.trImages.path = dirs[i]\n\n # If folder is structured in terms of Day folders\n match = re.search(day_pattern, dir_)\n if match:\n day_num = match.groups()[0]\n self.dayFolders.append((day_num, dirs[i]))\n else:\n continue\n\n if len(dir_clean) == 0:\n # If folder is structured in terms of Z-Stack\n for file in os.listdir(self.basePath):\n zStack_pattern = r\"z(\\d{1,4}).*d(\\d)\"\n match = re.search(zStack_pattern, file)\n if match:\n self.isZstack = True\n break\n\n if len(self.dayFolders) + len(self.trImages.path + self.bfImages.path) == 0 and not self.isZstack:\n QtWidgets.QMessageBox.warning(self.window, 'Improper Folder Structure', 'Folder structure selected is not supported. Please refer to available documentation.')\n return\n elif self.bfImages.path == \"\" and len(self.dayFolders) == 0 and not self.isZstack:\n QtWidgets.QMessageBox.warning(self.window, 'Missing Folder', 'Brightfield (BF) folder cannot be found. Please select directory with BF folder.')\n return\n elif self.trImages.path == \"\" and len(self.dayFolders) == 0 and not self.isZstack:\n QtWidgets.QMessageBox.warning(self.window, 'Missing Folder', 'Texas Red folder cannot be found. Please select directory with Texas Red folder.')\n return\n\n self.window.tabWidget.setCurrentIndex(1)\n\n # Pass off loading images to a separate thread as it can be computationally intensive \n self.thread = InitializeImagesThread(self)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n self.thread.finished.connect(self.finishedInitializing)\n\n self.thread.start()\n\n def finishedInitializing(self):\n \"\"\"Set current image and list after loading and display\"\"\"\n # Display first image of TR and enable Pan\n self.currImageIdx = 0\n self.currImage = self.currImageCol.list[self.currImageIdx]\n self.numImages = len(self.currImageCol.list)\n\n self.changeImageList(self.currImageCol.list) # Initializelist of image names\n self.enablePan(True)\n self.resetZoom()\n # Enable the next image button on the gui if multiple images are loaded\n if self.numImages > 1:\n self.window.next_im.setEnabled(True)\n\n if self.isZstack:\n self.drawSharpnessGraphs()\n\n def resizeEvent(self, evt):\n \"\"\"\n Function called when image needs to resize. \n \"\"\"\n if self.currImageIdx >= 0:\n self.onResize()\n\n def nextImg(self):\n \"\"\"Loads the next image in the list.\"\"\"\n if self.currImage is None:\n return\n if self.currImageIdx < self.numImages -1:\n self.currImageIdx += 1\n self.changeImage()\n self.qImageNameItems[self.currImageIdx].setSelected(True)\n else:\n QtWidgets.QMessageBox.warning(self.window, 'Sorry', 'No more Images!')\n\n def prevImg(self):\n \"\"\"Loads the previous image in the list.\"\"\"\n if self.currImage is None:\n return\n if self.currImageIdx > 0:\n self.currImageIdx -= 1\n self.changeImage()\n self.qImageNameItems[self.currImageIdx].setSelected(True)\n else:\n QtWidgets.QMessageBox.warning(self.window, 'Sorry', 'No previous Image!')\n\n def item_click(self, item):\n \"\"\"Called when user clicks an image name in the list on the side. Navigates and displays that image.\"\"\"\n if self.currImageCol is not None:\n self.currImageIdx = self.qImageNameItems.index(item)\n self.changeImage()\n\n def action_move(self):\n \"\"\"Called when user attempts to click and drag mouse across image (pan)\"\"\"\n if self.window.toggle_move.isChecked():\n self.enablePan(True)\n\n def loadImage(self):\n \"\"\"Load and displays current image.\"\"\"\n if self.currImage is None:\n return\n\n # Redrawing if there is a base image allows for image shapes to be mapped to base shapes\n if self.currImageCol.baseImage is not None and not self.isZstack:\n self.currImage.redraw()\n\n self.qimage = self.currImage.imgQt\n self.qpixmap = QPixmap(self.currImageCol.qlabel.size())\n if not self.qimage.isNull():\n self.qimage_scaled = self.qimage.scaled(self.currImageCol.qlabel.width(), self.currImageCol.qlabel.height(), QtCore.Qt.KeepAspectRatioByExpanding)\n self.scaleUpdate()\n else:\n self.window.statusbar.showMessage('Cannot open this image! Try another one.', 5000)\n\n def scaleUpdate(self):\n \"\"\"\n This function actually draws the scaled image to currImageCol.qlabel.\n It will be repeatedly called when zooming or panning.\n \"\"\"\n if not self.qimage_scaled.isNull():\n # Check if position is within limits to prevent unbounded panning.\n px, py = self.position\n px = px if (px <= self.qimage_scaled.width() - self.currImageCol.qlabel.width()) else (self.qimage_scaled.width() - self.currImageCol.qlabel.width())\n py = py if (py <= self.qimage_scaled.height() - self.currImageCol.qlabel.height()) else (self.qimage_scaled.height() - self.currImageCol.qlabel.height())\n px = px if (px >= 0) else 0\n py = py if (py >= 0) else 0\n self.position = (px, py)\n\n if self.zoomX == 1:\n self.qpixmap.fill(QtCore.Qt.white)\n # The act of painting the qpixamp\n painter = QPainter()\n painter.begin(self.qpixmap)\n painter.drawImage(QtCore.QPoint(0, 0), self.qimage_scaled,\n QtCore.QRect(self.position[0], self.position[1], self.currImageCol.qlabel.width(), self.currImageCol.qlabel.height()) )\n painter.end()\n\n self.currImageCol.qlabel.setPixmap(self.qpixmap)\n else:\n pass\n\n def mousePressAction(self, QMouseEvent):\n \"\"\"Called when mouse is pressed\"\"\"\n if self.panFlag:\n self.pressed = QMouseEvent.pos() # Starting point of drag vector\n self.anchor = self.position # Save the pan position when panning starts\n\n def mouseMoveAction(self, QMouseEvent):\n \"\"\"Called when mouse is moved\"\"\"\n self.mousex, self.mousey = QMouseEvent.pos().x(), QMouseEvent.pos().y()\n if self.pressed:\n dx, dy = self.mousex - self.pressed.x(), self.mousey - self.pressed.y() # Calculate the drag vector\n self.position = self.anchor[0] - dx, self.anchor[1] - dy # Update pan position using drag vector\n self.scaleUpdate() # Show the image with udated pan position\n\n def mouseReleaseAction(self, QMouseEvent):\n \"\"\"Called when mouse is released\"\"\"\n self.pressed = None # Clear the starting point of drag vector \n\n def zoomPlus(self, scroll=False):\n \"\"\"\n Function called when the zoom + button is clicked or CTRL+Scroll is used/trackpad zoom.\n Args:\n scroll: True if function is not called through the button, but CTRL+scroll or trackpad\n \"\"\"\n px, py = self.position\n\n if scroll:\n # These calculations come from calculating where the mousex is relative to the original image\n # then multipying that by the new zoom, (zoomX + 1) --> ((self.mousex+px)/self.zoomX)*(self.zoomX+1)\n img_x = (self.mousex+px) + (self.mousex+px)/self.zoomX\n img_y = (self.mousey+py) + (self.mousey+py)/self.zoomX\n # Subtract mousex and mousey to get the new top left corner for scaleUpdate\n px, py = img_x - self.mousex, img_y - self.mousey\n else:\n # If zoom button is pressed --> zoom based on pre-defined amount (2x)\n px += self.currImageCol.qlabel.width()/2\n py += self.currImageCol.qlabel.height()/2\n\n self.zoomX += 1\n self.position = (px, py)\n self.qimage_scaled = self.qimage.scaled(self.currImageCol.qlabel.width() * self.zoomX, self.currImageCol.qlabel.height() * self.zoomX, QtCore.Qt.KeepAspectRatioByExpanding)\n self.scaleUpdate()\n\n def zoomMinus(self, scroll=False):\n \"\"\"\n Function called when the zoom - button is clicked or CTRL+Scroll is used/trackpad zoom.\n Args:\n scroll: True if function is not called through the button, but CTRL+scroll or trackpad\n \"\"\"\n if self.zoomX > 1:\n px, py = self.position\n\n # Same as zoomPlus but reverse\n if scroll:\n img_x = (self.mousex+px) - (self.mousex+px)/self.zoomX\n img_y = (self.mousey+py) - (self.mousey+py)/self.zoomX\n px, py = img_x - self.mousex, img_y - self.mousey\n else:\n px -= self.currImageCol.qlabel.width()/2\n py -= self.currImageCol.qlabel.height()/2\n\n self.zoomX -= 1\n self.position = (px, py)\n self.qimage_scaled = self.qimage.scaled(self.currImageCol.qlabel.width() * self.zoomX, self.currImageCol.qlabel.height() * self.zoomX, QtCore.Qt.KeepAspectRatioByExpanding)\n self.scaleUpdate()\n\n def resetZoom(self):\n \"\"\"Called when zoom reset button is clicked\"\"\"\n if self.currImage is None:\n return\n self.zoomX = 1\n self.position = [0, 0]\n self.qimage_scaled = self.qimage.scaled(self.currImageCol.qlabel.width() * self.zoomX, self.currImageCol.qlabel.height() * self.zoomX, QtCore.Qt.KeepAspectRatioByExpanding)\n self.scaleUpdate()\n\n def enablePan(self, value):\n \"\"\"Called when enable pan button is clicked\"\"\"\n self.panFlag = value\n\n def changeImageList(self, list_):\n \"\"\"\n Changes image list between TR images and BF images. \n Args:\n list_: List of image objects\n \"\"\"\n self.changeImage()\n # Make a list of qitems for the image names\n self.qImageNameItems = [QtWidgets.QListWidgetItem(img.name) for img in list_]\n self.window.qlist_images.clear()\n for item in self.qImageNameItems:\n self.window.qlist_images.addItem(item)\n self.qImageNameItems[self.currImageIdx].setSelected(True)\n\n def changeTab(self, idx):\n \"\"\"\n Called when one of the tabs in the GUI is clicked\n Args:\n idx: Index of tab that is clicked. 1 = Red Channel (TR), 2 = Bright Field (BF)\n \"\"\"\n if self.numImages > 0:\n if idx == 1:\n self.window.checkBox.setCheckState(0) # Draw Ellipses for red channel\n self.currImageCol = self.trImages\n self.changeImageList(self.trImages.list)\n else:\n self.window.checkBox.setCheckState(2) # Circles for Bright Field\n self.currImageCol = self.bfImages\n self.changeImageList(self.bfImages.list)\n\n def changeThreshold(self):\n \"\"\"Called when either the threshold slider or number box is altered\"\"\"\n if self.currImage is not None and not self.isZstack:\n self.currImage.threshold = self.window.threshold_slider.value()\n\n def changeRadiusRange(self):\n \"\"\"Called when either the radius range slider or number box is altered\"\"\"\n if self.currImage is not None and not self.isZstack:\n self.currImage.radiusRange = self.window.radius_slider.getRange()\n\n def changeImage(self):\n \"\"\"Called when changing image on screen. Handles loading image on GUI and calculating shapes.\"\"\"\n self.currImage = self.currImageCol.list[self.currImageIdx]\n self.loadImage()\n\n # Disables delay in calculation while changing the image so next image shapes gets calculated immediately\n self.window.disableDebounce()\n self.window.threshold_box.setValue(self.currImage.threshold)\n self.window.radius_slider.setRange(self.currImage.radiusRange[0], self.currImage.radiusRange[1])\n self.window.enableDebounce()\n\n if len(self.currImage.shapes) == 0 and not self.isZstack: # Don't draw if images are z-stack\n self.window.debounce.start()\n\n def setBaseImage(self):\n \"\"\"Marks current pair of images (both TR and BF) as their respective base images.\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n self.trImages.baseImage, self.bfImages.baseImage = self.trImages.map[self.currImage.id], self.bfImages.map[self.currImage.id]\n self.trImages.baseId, self.bfImages.baseId = self.currImage.id, self.currImage.id\n # Redraws allows for instant drawing of identification numbers for base shapes\n self.trImages.baseImage.redraw()\n self.bfImages.baseImage.redraw()\n self.loadImage()\n\n def clearBaseImage(self):\n \"\"\"Clears current base images in both the TR and BF image collections.\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n self.trImages.baseImage = None\n self.bfImages.baseImage = None\n self.trImages.map[self.currImage.id].redraw()\n self.bfImages.map[self.currImage.id].redraw()\n self.loadImage()\n\n def drawCircle(self):\n \"\"\"Detects and draws circles for current image\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n thresh = self.window.threshold_slider.value()\n rng = self.window.radius_slider.getRange()\n\n # Calculating and drawing circles is computationally intensive --> New thread\n self.thread = DrawCircleThread(self.currImage, thresh, rng, self.window)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n self.thread.finished.connect(self.loadImage)\n\n self.thread.start()\n\n def drawEllipse(self):\n \"\"\"Detects and draws ellipses for current image\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n thresh = self.window.threshold_slider.value()\n rng = self.window.radius_slider.getRange()\n\n self.thread = DrawEllipseThread(self.currImage, thresh, rng, self.window)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n self.thread.finished.connect(self.loadImage)\n\n self.thread.start()\n\n def recalculate(self):\n \"\"\"Recalculates and redraws whichever shapes are being detected\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n if self.window.checkBox.isChecked():\n self.drawCircle()\n else:\n self.drawEllipse()\n\n def exportAllExcel(self):\n \"\"\"Exports all currently drawn images as Excel data. Base image must be set before calling.\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n\n path = str(QtWidgets.QFileDialog.getExistingDirectory(self.window, \"Select Directory\")) + \"/\"\n self.thread = ExportThread(self.bfImages, self.trImages, \"all-excel\", path)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n\n self.thread.start()\n\n def exportSingleExcel(self):\n \"\"\"Exports shape dimensions of current images. Shapes should be drawn on both images in the current pair.\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n\n self.setBaseImage()\n path = str(QtWidgets.QFileDialog.getExistingDirectory(self.window, \"Select Directory\")) + \"/\"\n self.thread = ExportThread(self.bfImages, self.trImages, \"single-excel\", path)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n\n self.thread.start()\n\n def exportAllImages(self):\n \"\"\"Exports all currently drawn images as Images (png).\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n\n path = str(QtWidgets.QFileDialog.getExistingDirectory(self.window, \"Select Directory\")) + \"/\"\n path = path + \"Marked Images/\"\n os.mkdir(path)\n\n self.thread = ExportThread(self.bfImages, self.trImages, \"all-images\", path)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n\n self.thread.start()\n\n def exportSingleImage(self):\n \"\"\"Exports current image pair as Images (png).\"\"\"\n if self.currImage is None and not self.isZstack:\n return\n\n path = str(QtWidgets.QFileDialog.getExistingDirectory(self.window, \"Select Directory\")) + \"/\"\n path = path + \"Marked Images/\"\n os.mkdir(path)\n\n currImg, currImgComplement = self.trImages.map[self.currImage.id], self.bfImages.map[self.currImage.id]\n self.thread = ExportThread(currImg, currImgComplement, \"single-image\", path)\n\n self.thread.startPbar.connect(self.window.startPbar)\n self.thread.incrementPbar.connect(self.window.incrementPbar)\n self.thread.finishPbar.connect(self.window.finishPbar)\n\n self.thread.start()\n\n def drawSharpnessGraphs(self):\n \"\"\"Plots using popup MatPlotLib windows graphs of the sharpness of the images. This is used for Z-Stack images.\"\"\"\n self.thread1, self.thread2 = GetSharpnessThread(self.bfImages.list, \"Spheroid Sharpness\"), GetSharpnessThread(self.trImages.list, \"Sensor Sharpness\")\n\n for thread in (self.thread1, self.thread2):\n thread.startPbar.connect(self.window.startPbar)\n thread.incrementPbar.connect(self.window.incrementPbar)\n thread.finishPbar.connect(self.window.finishPbar)\n thread.finished.connect(self.showSharpnessGraphs)\n thread.start()\n\n def showSharpnessGraphs(self, imageSharpness, title):\n \"\"\"\n Shows MatPlotLib graphs in popup windows.\n Args:\n imageSharpness: Array of tuples containing the id_ of the image and its sharpness value\n title: Title of the plot. Either Sensor Sharpness or Spheroid Sharpness.\n \"\"\"\n x, y = [], []\n for id_, sharpness in imageSharpness:\n x.append(float(id_))\n y.append(1 / sharpness)\n\n if len(x) == 0 or len(y) == 0:\n return\n\n plot = PlotWindow(self, width=5, height=4, dpi=100)\n plot.axes.plot(x, y)\n plot.axes.set_xticks(arange(min(x), max(x)+1, 5.0))\n plot.axes.set_title(title)\n plot.setWindowTitle(title)\n plot.show()\n self.sharpnessGraphs.append(plot)\n\nclass InitializeImagesThread(QtCore.QThread):\n \"\"\"Thread object for loading images in\"\"\"\n # Progress bar signals, connected to respective functions in main\n finished = QtCore.pyqtSignal()\n startPbar = QtCore.pyqtSignal(int)\n incrementPbar = QtCore.pyqtSignal()\n finishPbar = QtCore.pyqtSignal()\n\n def __init__(self, viewer, parent=None):\n super(InitializeImagesThread, self).__init__(parent)\n self.viewer = viewer\n\n def run(self):\n self.viewer.getImages(self)\n self.finishPbar.emit()\n self.finished.emit()\n\nclass DrawCircleThread(QtCore.QThread):\n \"\"\"Thread object for calculating and drawing circles on image\"\"\"\n finished = QtCore.pyqtSignal()\n startPbar = QtCore.pyqtSignal(int)\n incrementPbar = QtCore.pyqtSignal()\n finishPbar = QtCore.pyqtSignal()\n\n def __init__(self, img, thresh, rng, window, parent=None):\n super(DrawCircleThread, self).__init__(parent)\n self.img = img\n self.thresh = thresh\n self.range = rng\n\n def run(self):\n self.img.drawCircle(self.thresh, self.range, self)\n self.finishPbar.emit()\n self.finished.emit()\n\nclass DrawEllipseThread(QtCore.QThread):\n \"\"\"Thread object for calculating and drawing ellipses on image\"\"\"\n finished = QtCore.pyqtSignal()\n startPbar = QtCore.pyqtSignal(int)\n incrementPbar = QtCore.pyqtSignal()\n finishPbar = QtCore.pyqtSignal()\n\n def __init__(self, img, thresh, rng, window, parent=None):\n super(DrawEllipseThread, self).__init__(parent)\n self.img = img\n self.thresh = thresh\n self.range = rng\n\n def run(self):\n self.img.drawEllipse(self.thresh, self.range, self)\n self.finishPbar.emit()\n self.finished.emit()\n\nclass GetSharpnessThread(QtCore.QThread):\n \"\"\"Thread object for calculating and plotting sharpness graphs for Z-Stack images\"\"\"\n finished = QtCore.pyqtSignal(object, str)\n startPbar = QtCore.pyqtSignal(int)\n incrementPbar = QtCore.pyqtSignal()\n finishPbar = QtCore.pyqtSignal()\n\n def __init__(self, image_list, title, parent=None):\n super(GetSharpnessThread, self).__init__(parent)\n self.list = image_list\n self.title = title\n\n def run(self):\n imageSharpness = []\n self.startPbar.emit(len(self.list))\n for num, image in enumerate(self.list):\n self.incrementPbar.emit()\n sharpness = image.getSharpness()\n imageSharpness.append((image.id, sharpness))\n self.finishPbar.emit()\n self.finished.emit(imageSharpness, self.title)\n\nclass PlotWindow(FigureCanvasQTAgg):\n \"\"\"Object for separate plot windows for sharpness graphs\"\"\"\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n fig = Figure(figsize=(width, height), dpi=dpi)\n self.axes = fig.add_subplot(111)\n super(PlotWindow, self).__init__(fig)"
] | [
[
"matplotlib.figure.Figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weimegan/fireroad053 | [
"45b82ee29798cf51cbea3ad5fc1d4a8fbdd08507"
] | [
"combos.py"
] | [
"import json\nimport csv\nimport pandas as pd\nimport numpy as np\nimport itertools\n\nf = open('finaldata/parsedsp21_dummy.json')\ndata = json.load(f)\n\nclassescsv = pd.read_csv('finaldata/parsedsp21_actual_classes.csv')\n\n#print(classescsv['id'])\nindToId = dict()\nfor i in range(len(classescsv)):\n indToId[i] = classescsv['id'][i]\n#print(indToId)\n\n#s = np.sum(data[\"6.006\"][\"sections\"][\"RecitationSession\"][0], axis=0)\n\n\ndef add_times():\n new_data = dict()\n for i in range(len(indToId)):\n c = indToId[i]\n new_data[c] = dict()\n for sec in data[c][\"sections\"].keys(): # section: lec, rec, lab\n new_data[c][sec] = []\n for j in range(len(data[c][\"sections\"][sec])): # timeslots per section\n summed_sec = np.sum(data[c][\"sections\"][sec][j], axis=0)\n new_data[c][sec].append(summed_sec.tolist())\n\n with open('finaldata/addedtimes.json', 'w') as output_file:\n json.dump(new_data, output_file)\n\n#add_times()\nf1 = open('finaldata/addedtimes.json')\nadded_data = json.load(f1)\n\n#print(np.shape(added_data[\"8.01\"][\"LectureSession\"]))\n\n#a = [[[1,0],[0,1]],[[1,0]],[[1,1],[0,0]]]\ndef combos(a):\n ls = list(itertools.product(*a))\n lscombos = [np.sum(i, axis=0).tolist() for i in ls]\n return lscombos\n\ndef create_combo_dict():\n combo_dict = dict()\n for i in range(len(indToId)):\n c = indToId[i]\n l = []\n for sec in added_data[c].keys():\n l.append(added_data[c][sec])\n combo_dict[c] = combos(l)\n with open('finaldata/combodict.json', 'w') as output:\n json.dump(combo_dict, output)\n\n#create_combo_dict()\n\nf2 = open('finaldata/combodict.json')\ncomb_data = json.load(f2)\n\n#print(comb_data[\"8.01\"][0][0]) # access binary value at timeslot 0 for the 0th combo of 8.01"
] | [
[
"pandas.read_csv",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
smartmzl/Quanlse | [
"7d5d00d5401d801aeb7cbcee381ccdd07331e8a7"
] | [
"Quanlse/QOperation/RotationGate.py"
] | [
"#!/usr/bin/python3\n# -*- coding: utf8 -*-\n\n# Copyright (c) 2021 Baidu, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nRotation Gate Operation\n\"\"\"\nimport importlib\nfrom typing import List, Optional, TYPE_CHECKING\n\nimport numpy\n\nfrom Quanlse.QOperation import QOperation\nfrom Quanlse.QRegPool import QRegStorage\n\nif TYPE_CHECKING:\n from Quanlse.QOperation import RotationArgument, OperationFunc\n\n\nclass RotationGateOP(QOperation):\n \"\"\"\n Rotation gate\n\n Use rotation parameters to create the quantum operators\n \"\"\"\n argumentList = None # type: List['RotationArgument']\n uGateArgumentList = None # type: List['RotationArgument']\n\n def __init__(self, gate: str, bits: int,\n angleList: List['RotationArgument'],\n uGateArgumentList: List['RotationArgument']) -> None:\n super().__init__(gate, bits)\n self.argumentList = angleList\n self.uGateArgumentList = uGateArgumentList\n\n def __call__(self, *qRegList: QRegStorage, gateTime: Optional[float] = None) -> None:\n self._op(list(qRegList), gateTime)\n\n def generateMatrix(self) -> numpy.ndarray:\n pass\n\n def _u3Matrix(self, theta: float, phi: float, lamda: float) -> numpy.ndarray:\n \"\"\"\n Generate a single-qubit rotation gate with 3 angles\n\n :param theta: angle\n :param phi: angle\n :param lamda: angle\n :return: U3 matrix\n \"\"\"\n\n self._matrix = numpy.array([[numpy.cos(theta / 2.0), -numpy.exp(1j * lamda) * numpy.sin(theta / 2.0)],\n [numpy.exp(1j * phi) * numpy.sin(theta / 2.0),\n numpy.exp(1j * lamda + 1j * phi) * numpy.cos(theta / 2.0)]])\n return self._matrix\n\n def _u2Matrix(self, phi: float, lamda: float) -> numpy.ndarray:\n \"\"\"\n Generate a single-qubit rotation gate with 2 angles\n\n :param phi: angle\n :param lamda: angle\n :return: U2 matrix\n \"\"\"\n\n self._matrix = (1 / numpy.sqrt(2)) * numpy.array([[1, -numpy.exp(1j * lamda)],\n [numpy.exp(1j * phi), numpy.exp(1j * (phi + lamda))]])\n return self._matrix\n\n def _u1Matrix(self, lamda: float) -> numpy.ndarray:\n \"\"\"\n Generate a single-qubit rotation gate along the Z-axis\n\n :param lamda: angle\n :return: U1 matrix\n \"\"\"\n\n self._matrix = numpy.array([[1, 0],\n [0, numpy.exp(1j * lamda)]])\n return self._matrix\n\n def _cu3Matrix(self, theta: float, phi: float, lamda: float) -> numpy.ndarray:\n self._matrix = numpy.kron(numpy.eye(2),\n numpy.array([[1, 0],\n [0, 0]])\n ) + \\\n numpy.kron(self._u3Matrix(theta, phi, lamda),\n numpy.array([[0, 0],\n [0, 1]])\n )\n return self._matrix\n\n def _generateUMatrix(self) -> None:\n uGateArgumentCount = len(\n [value for value in self.uGateArgumentList if isinstance(value, (float, int))])\n if uGateArgumentCount != len(self.uGateArgumentList):\n pass # has parameter\n elif uGateArgumentCount == 3:\n self._u3Matrix(*self.uGateArgumentList)\n elif uGateArgumentCount == 2:\n self._u2Matrix(*self.uGateArgumentList)\n elif uGateArgumentCount == 1:\n self._u1Matrix(*self.uGateArgumentList)\n\n def _generateCUMatrix(self) -> None:\n uGateArgumentCount = len([value for value in self.uGateArgumentList if isinstance(value, (float, int))])\n if uGateArgumentCount != len(self.uGateArgumentList):\n pass # has parameter\n elif uGateArgumentCount == 3:\n self._cu3Matrix(*self.uGateArgumentList)\n # elif uGateArgumentCount == 2:\n # self._cu2Matrix(*self.uGateArgumentList)\n # elif uGateArgumentCount == 1:\n # self._cu1Matrix(*self.uGateArgumentList)\n\n\ndef U(theta: 'RotationArgument',\n phi: Optional['RotationArgument'] = None,\n lamda: Optional['RotationArgument'] = None) -> 'OperationFunc':\n \"\"\"\n U Gate\n\n Generate a single-qubit U1 (or U2 or U3) gate according to the number of angles.\n \"\"\"\n uGateArgumentList = angleList = [value for value in [theta, phi, lamda] if value is not None]\n gate = RotationGateOP('U', 1, angleList, uGateArgumentList)\n gate.generateMatrix = gate._generateUMatrix()\n return gate\n\n\ndef RX(theta: 'RotationArgument') -> 'OperationFunc':\n \"\"\"\n RX Gate\n\n Single-qubit rotation about the X-axis.\n\n According to the relation: U3(theta, -pi/2, pi/2) = RX(theta)\n \"\"\"\n angleList = [theta]\n uGateArgumentList = [theta, -numpy.math.pi / 2, numpy.math.pi / 2]\n gate = RotationGateOP('RX', 1, angleList, uGateArgumentList)\n gate.generateMatrix = gate._generateUMatrix()\n return gate\n\n\ndef RY(theta: 'RotationArgument') -> 'OperationFunc':\n \"\"\"\n RY Gate\n\n Single-qubit rotation about the Y-axis.\n\n According to the relation: U3(theta, 0, 0) = RY(theta)\n \"\"\"\n angleList = [theta]\n uGateArgumentList = [theta, 0, 0]\n gate = RotationGateOP('RY', 1, angleList, uGateArgumentList)\n gate.generateMatrix = gate._generateUMatrix()\n return gate\n\n\ndef RZ(lamda: 'RotationArgument') -> 'OperationFunc':\n \"\"\"\n RZ Gate\n\n Single-qubit rotation about the Z-axis.\n\n According to the relation: U3(0, 0, lamda) = RZ(lamda)\n \"\"\"\n angleList = [lamda]\n uGateArgumentList = [0, 0, lamda]\n gate = RotationGateOP('RZ', 1, angleList, uGateArgumentList)\n gate.generateMatrix = gate._generateUMatrix()\n return gate\n"
] | [
[
"numpy.sqrt",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.exp",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cmougan/Novartis2021 | [
"72a6f088929a5a4546760f4a453ec4a77faf5856"
] | [
"NN_files/nnet.py"
] | [
"import pandas as pd\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport numpy as np\nfrom gauss_rank_scaler import GaussRankScaler\nfrom sklearn.model_selection import train_test_split\n\nimport random\nimport os\n\n\nrandom.seed(0)\n\n\nclass ReadDataset(Dataset):\n \"\"\"Read dataset.\"\"\"\n\n def __init__(self, csv_file, isTrain=None):\n \"\"\"\n Args:\n csv_file (str): Path to the csv file with the students data.\n\n \"\"\"\n self.df = pd.read_csv(csv_file).fillna(0)\n self.df.columns = self.df.columns.str.replace(\" \", \"\")\n\n self.X = self.df.drop(\n columns=[\"target\", \"Cluster\", \"brand_group\", \"cohort\", \"Country\"]\n )\n self.y = self.df.target.values\n\n self.scaler = GaussRankScaler()\n self.X = pd.DataFrame(self.scaler.fit_transform(self.X), columns=self.X.columns)\n\n X_train, X_test, y_train, y_test = train_test_split(\n self.X, self.y, random_state=42\n )\n if isTrain == True:\n self.X = X_train\n self.y = y_train\n else:\n self.X = X_test\n self.y = y_test\n\n def __len__(self):\n return len(self.X)\n\n def __shape__(self):\n return self.X.shape[1]\n\n def __getitem__(self, idx):\n # Convert idx from tensor to list due to pandas bug (that arises when using pytorch's random_split)\n if isinstance(idx, torch.Tensor):\n idx = idx.tolist()\n\n self.X.iloc[idx].values\n self.y[idx]\n\n return [self.X.iloc[idx].values, self.y[idx]]\n\n\nclass Net(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n self.fc1 = nn.Linear(input_dim, input_dim)\n self.relu1 = nn.SELU()\n self.batchnorm1 = nn.BatchNorm1d(input_dim)\n self.drop1 = nn.Dropout(0.05, inplace=False)\n\n # self.fc2 = nn.Linear(2*input_dim, input_dim)\n # self.relu2 = nn.SELU()\n # self.batchnorm2 = nn.BatchNorm1d(input_dim)\n # self.drop2 = nn.Dropout(0.05, inplace=False)\n\n self.fc3 = nn.Linear(input_dim, 2, bias=False)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu1(x)\n x = self.batchnorm1(x)\n x = self.drop1(x)\n \"\"\"\n x = self.fc2(x)\n x = self.relu2(x)\n x = self.batchnorm2(x)\n x = self.drop2(x)\n \"\"\"\n\n x = self.fc3(x)\n\n return x.squeeze()\n\n\nclass ResNet(nn.Module):\n def __init__(self, input_dim):\n super().__init__()\n self.fc1 = nn.Linear(input_dim, 6 * input_dim)\n self.relu1 = nn.SELU()\n self.batchnorm1 = nn.BatchNorm1d(6 * input_dim)\n self.drop1 = nn.Dropout(0.05, inplace=False)\n\n self.fc2 = nn.Linear(6 * input_dim, 3 * input_dim, bias=False)\n self.relu2 = nn.SELU()\n self.batchnorm2 = nn.BatchNorm1d(\n 3 * input_dim,\n eps=1e-05,\n momentum=0.1,\n affine=True,\n track_running_stats=True,\n )\n self.drop2 = nn.Dropout(0.05, inplace=False)\n\n self.fc3 = nn.Linear(3 * input_dim, 2 * input_dim, bias=False)\n self.relu3 = nn.SELU()\n self.batchnorm3 = nn.BatchNorm1d(\n 2 * input_dim + input_dim,\n eps=1e-05,\n momentum=0.1,\n affine=True,\n track_running_stats=True,\n )\n self.drop3 = nn.Dropout(0.05, inplace=False)\n\n self.fc4 = nn.Linear(2 * input_dim + input_dim, 1 * input_dim, bias=False)\n self.relu4 = nn.SELU()\n self.batchnorm4 = nn.BatchNorm1d(\n input_dim, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True\n )\n self.drop4 = nn.Dropout(0.05, inplace=False)\n\n self.fc5 = nn.Linear(input_dim, 2, bias=True)\n\n def forward(self, x):\n x1 = x\n x = self.fc1(x)\n x = self.relu1(x)\n self.batchnorm1(x)\n self.drop1(x)\n\n x = self.fc2(x)\n x = self.relu2(x)\n self.batchnorm2(x)\n self.drop2(x)\n\n x = self.fc3(x)\n x = self.relu3(torch.cat((x, x1), 1))\n self.batchnorm3(x)\n self.drop3(x)\n\n x = self.fc4(x)\n x = self.relu4(x)\n self.batchnorm4(x)\n self.drop4(x)\n\n x = self.fc5(x)\n\n return x.squeeze()\n\n def partial_forward(self, x):\n x1 = x\n x = self.fc1(x)\n x = self.relu1(x)\n self.batchnorm1(x)\n self.drop1(x)\n\n x = self.fc2(x)\n x = self.relu2(x)\n self.batchnorm2(x)\n self.drop2(x)\n\n x = self.fc3(x)\n x = self.relu3(torch.cat((x, x1), 1))\n self.batchnorm3(x)\n self.drop3(x)\n\n x = self.fc4(x)\n return x.squeeze()\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"pandas.read_csv",
"torch.cat",
"sklearn.model_selection.train_test_split",
"torch.nn.Linear",
"torch.nn.SELU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
michaeljohnclancy/spikeforest2 | [
"93bdde2c570aef9426b3d7bceb69f3605c9f005a"
] | [
"working/tests/kilosort2_crash_tests/thisoneworks_ks2_boyden_singularity.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nfrom spikeforest2 import sorters\nfrom spikeforest2 import processing\nimport hither_sf as hither\nimport kachery as ka\nimport os\n\nos.environ['HITHER_USE_SINGULARITY'] = 'TRUE'\n\nrecording_path = 'sha1dir://49b1fe491cbb4e0f90bde9cfc31b64f985870528.paired_boyden32c/915_10_1'\nsorting_true_path = 'sha1dir://49b1fe491cbb4e0f90bde9cfc31b64f985870528.paired_boyden32c/915_10_1/firings_true.mda'\n\nsorter_name = 'kilosort2'\nsorter = getattr(sorters, sorter_name)\nparams = {}\n\n# Determine whether we are going to use gpu based on the name of the sorter\ngpu = sorter_name in ['kilosort2', 'kilosort', 'tridesclous', 'ironclust']\n\n# In the future we will check whether we have the correct version of the wrapper here\n# Version: 0.1.5-w1\n\n# Download the data (if needed)\nka.set_config(fr='default_readonly')\nka.load_file(recording_path + '/raw.mda')\n\n# Run the spike sorting\nwith hither.config(container='default', gpu=gpu):\n sorting_result = sorter.run(\n recording_path=recording_path,\n sorting_out=hither.File(),\n **params\n )\nassert sorting_result.success\nsorting_path = sorting_result.outputs.sorting_out\n\n# Compare with ground truth\nwith hither.config(container='default'):\n compare_result = processing.compare_with_truth.run(\n sorting_path=sorting_path,\n sorting_true_path=sorting_true_path,\n json_out=hither.File()\n )\nassert compare_result.success\nobj = ka.load_object(compare_result.outputs.json_out._path)\n\naccuracies = [float(obj[i]['accuracy']) for i in obj.keys()]\nprint('ACCURACIES:')\nprint(accuracies)\nprint('')\n\naverage_accuracy = np.mean(accuracies)\nprint('AVERAGE-ACCURACY:', average_accuracy)"
] | [
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ChZPan/CT-image-DeepLearningRegression | [
"38c4e2ca5427affa0cc628b34c14b85e01dbb33c"
] | [
"src/resnet50.py"
] | [
"import numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, Input, Add, \\\n Activation, ZeroPadding2D, BatchNormalization, \\\n AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\nfrom keras.models import Model\nfrom keras.initializers import glorot_uniform\nfrom keras.optimizers import Adam\n\n# Define loss function\ndef mean_L2_loss(y_pred, y_true):\n \"\"\"\n Mean L2-norm regression loss\n \n Parameters\n ----------\n y_true : array-like of shape = (n_samples, vec_dim)\n y_pred : array-like of shape = (n_samples, vec_dim)\n\n Returns\n -------\n Loss : A positive floating point value, the best value is 0.0.\n \"\"\"\n d = y_pred - y_true\n return tf.reduce_mean(tf.norm(d, axis=1))\n\n\ndef rmse(y_pred, y_true):\n \"\"\"\n Root-mean-square-error metrics\n \n Parameters\n ----------\n y_true : array-like of shape = (n_samples, vec_dim)\n y_pred : array-like of shape = (n_samples, vec_dim)\n\n Returns\n -------\n Metrics : A positive floating point value, the best value is 0.0.\n \"\"\"\n d = y_pred - y_true\n return K.sqrt(K.mean(K.square(tf.norm(d, axis=1))))\n \n \ndef max_error(y_true, y_pred):\n \"\"\"\n max_error metric calculates the maximum residual error.\n \n Parameters\n ----------\n y_true : array-like of shape = (n_samples, vec_dim)\n y_pred : array-like of shape = (n_samples, vec_dim)\n\n Returns\n -------\n max_error : A positive floating point value, the best value is 0.0.\n \"\"\"\n d = y_pred - y_true\n return K.max(tf.norm(d, axis=1))\n\n\ndef idn_block(X, f, filters, stage, block):\n \"\"\"\n Implementation of the identity block\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main \n path\n filters -- python list of integers, defining the number of filters in the \n CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in \n the network\n block -- string/character, used to name the layers, depending on their \n position in the network\n \n Returns:\n X -- output of the identity block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value to later add back to the main path. \n X_shortcut = X\n \n # First component of main path\n X = Conv2D(filters = F1, kernel_size = (1,1), strides = (1,1), \n padding = 'valid', name = conv_name_base + '2a', \n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n # Second component of main path\n X = Conv2D(filters = F2, kernel_size = (f,f), strides = (1,1),\n padding = 'same', name = conv_name_base + '2b',\n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path \n X = Conv2D(filters = F3, kernel_size = (1,1), strides = (1,1), \n padding = 'valid', name = conv_name_base + '2c', \n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis=3, name = bn_name_base + '2c')(X)\n\n # Add shortcut value to main path, and pass it through a RELU activation \n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n return X\n \n\ndef conv_block(X, f, filters, stage, block, s=2):\n \"\"\"\n Implementation of the convolutional block\n \n Arguments:\n X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)\n f -- integer, specifying the shape of the middle CONV's window for the main \n path\n filters -- python list of integers, defining the number of filters in the \n CONV layers of the main path\n stage -- integer, used to name the layers, depending on their position in \n the network\n block -- string/character, used to name the layers, depending on their \n position in the network\n s -- Integer, specifying the stride to be used\n \n Returns:\n X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)\n \"\"\"\n \n # defining name basis\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n \n # Retrieve Filters\n F1, F2, F3 = filters\n \n # Save the input value\n X_shortcut = X\n\n # First component of main path \n X = Conv2D(F1, (1,1), strides = (s,s), name = conv_name_base + '2a',\n padding = 'valid', \n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)\n X = Activation('relu')(X)\n \n # Second component of main path\n X = Conv2D(F2, (f,f), strides = (1,1), name = conv_name_base + '2b',\n padding = 'same', \n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)\n X = Activation('relu')(X)\n\n # Third component of main path\n X = Conv2D(F3, (1,1), strides = (1,1), name = conv_name_base + '2c',\n padding = 'valid', \n kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)\n\n ##### SHORTCUT PATH #### \n X_shortcut = Conv2D(F3, (1,1), strides = (s,s), padding = 'valid',\n name = conv_name_base + '1',\n kernel_initializer = glorot_uniform(seed=0))(X_shortcut)\n X_shortcut = BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut)\n\n # Add shortcut value to main path, and pass it through a RELU activation\n X = Add()([X, X_shortcut])\n X = Activation('relu')(X)\n \n return X\n \n\ndef ResNet50(input_shape, lr_power=-3.0, lr_decay=0.0, \n extra_layers=None, dropouts=None):\n \"\"\"\n Implementation of the popular ResNet50 the following architecture:\n\n Arguments:\n input_shape -- shape of the images of the dataset\n classes -- integer, number of classes\n\n Returns:\n model -- a Model() instance in Keras\n \"\"\"\n \n # Define the input as a tensor with shape input_shape\n X_input = Input(input_shape)\n \n # Zero-Padding\n X = ZeroPadding2D((1, 1))(X_input) # mod (3,3) -> (1,1)\n \n # Stage 1\n X = Conv2D(64, (3, 3), strides=(1, 1), name='conv1', # mod (7,7) -> (3,3); (2,2) -> (1,1)\n kernel_initializer=glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = conv_block(X, f=3, filters=[64, 64, 256], \n stage=2, block='a', s=1)\n X = idn_block(X, 3, [64, 64, 256], stage=2, block='b')\n X = idn_block(X, 3, [64, 64, 256], stage=2, block='c')\n\n # Stage 3\n X = conv_block(X, f=3, filters=[128,128,512],\n stage=3, block='a', s=2) \n X = idn_block(X, f=3, filters=[128,128,512], stage=3, block='b')\n X = idn_block(X, f=3, filters=[128,128,512], stage=3, block='c')\n X = idn_block(X, f=3, filters=[128,128,512], stage=3, block='d')\n\n # Stage 4\n X = conv_block(X, f=3, filters=[256,256,1024],\n stage=4, block='a', s=2) \n X = idn_block(X, f=3, filters=[256,256,1024], stage=4, block='b')\n X = idn_block(X, f=3, filters=[256,256,1024], stage=4, block='c')\n X = idn_block(X, f=3, filters=[256,256,1024], stage=4, block='d')\n X = idn_block(X, f=3, filters=[256,256,1024], stage=4, block='e')\n X = idn_block(X, f=3, filters=[256,256,1024], stage=4, block='f')\n\n # Stage 5\n X = conv_block(X, f=3, filters=[512,512,2048],\n stage=5, block='a', s=2) \n X = idn_block(X, f=3, filters=[512,512,2048], stage=5, block='b')\n X = idn_block(X, f=3, filters=[512,512,2048], stage=5, block='c')\n\n # AVGPOOL \n X = AveragePooling2D((2,2), name='avg_pool')(X)\n \n # Flatten\n X = Flatten()(X)\n \n # Add extra dense layers\n if extra_layers is not None:\n assert len(extra_layers) == len(dropouts), \\\n \"Arguments do Not match in length: extra_layers, dropouts.\"\n for i, layer, dpout in (zip(range(len(extra_layers)), extra_layers, dropouts)):\n X = Dense(layer, name='fc_'+str(i)+'_'+str(layer), activation='relu',\n kernel_initializer=glorot_uniform(seed=0))(X)\n X = Dropout(dpout, seed=0, name='dropout_'+str(i)+'_'+str(dpout))(X)\n\n # Output \n X = Dense(2, name='fc_outputs', kernel_initializer=glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name = 'ResNet50')\n \n # Compile model\n learning_rate = 10.0**(lr_power)\n optim = Adam(lr=learning_rate, decay=lr_decay)\n model.compile(loss=mean_L2_loss, optimizer='adam',\n metrics=[rmse, max_error])\n \n return model"
] | [
[
"tensorflow.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
samuelbroscheit/kge | [
"208f310d199aa3c2059467ee24c28cae86bbc10b"
] | [
"kge/util/dump.py"
] | [
"import time\nimport os\nfrom collections import OrderedDict\nimport sys\nimport torch\nimport csv\nimport yaml\nimport re\nimport socket\nimport copy\n\nfrom kge.job import Trace\nfrom kge import Config\n\n\n## EXPORTED METHODS #####################################################################\n\n\ndef add_dump_parsers(subparsers):\n # 'kge dump' can have associated sub-commands which can have different args\n parser_dump = subparsers.add_parser(\"dump\", help=\"Dump objects to stdout\")\n subparsers_dump = parser_dump.add_subparsers(\n title=\"dump_command\", dest=\"dump_command\"\n )\n subparsers_dump.required = True\n _add_dump_trace_parser(subparsers_dump)\n _add_dump_checkpoint_parser(subparsers_dump)\n _add_dump_config_parser(subparsers_dump)\n\n\ndef dump(args):\n \"\"\"Executes the 'kge dump' commands. \"\"\"\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()\n\n\ndef get_config_for_job_id(job_id, folder_path):\n config = Config(load_default=True)\n if job_id:\n config_path = os.path.join(\n folder_path, \"config\", job_id.split(\"-\")[0] + \".yaml\"\n )\n else:\n config_path = os.path.join(folder_path, \"config.yaml\")\n if os.path.isfile(config_path):\n config.load(config_path, create=True)\n else:\n raise Exception(\"Could not find config file for {}\".format(job_id))\n return config\n\n\n### DUMP CHECKPOINT #####################################################################\n\n\ndef _add_dump_checkpoint_parser(subparsers_dump):\n parser_dump_checkpoint = subparsers_dump.add_parser(\n \"checkpoint\", help=(\"Dump information stored in a checkpoint\")\n )\n parser_dump_checkpoint.add_argument(\n \"source\",\n help=\"A path to either a checkpoint or a job folder (then uses best or, \"\n \"if not present, last checkpoint).\",\n nargs=\"?\",\n default=\".\",\n )\n parser_dump_checkpoint.add_argument(\n \"--keys\",\n \"-k\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to include (separated by space)\",\n )\n\n\ndef _dump_checkpoint(args):\n \"\"\"Executes the 'dump checkpoint' command.\"\"\"\n\n # Determine checkpoint to use\n if os.path.isfile(args.source):\n checkpoint_file = args.source\n else:\n checkpoint_file = Config.get_best_or_last_checkpoint(args.source)\n\n # Load the checkpoint and strip some fieleds\n checkpoint = torch.load(checkpoint_file, map_location=\"cpu\")\n\n # Dump it\n print(f\"# Dump of checkpoint: {checkpoint_file}\")\n excluded_keys = {\"model\", \"optimizer_state_dict\"}\n if args.keys is not None:\n excluded_keys = {key for key in excluded_keys if key not in args.keys}\n excluded_keys = excluded_keys.union(\n {key for key in checkpoint if key not in args.keys}\n )\n excluded_keys = {key for key in excluded_keys if key in checkpoint}\n for key in excluded_keys:\n del checkpoint[key]\n if excluded_keys:\n print(f\"# Excluded keys: {excluded_keys}\")\n yaml.dump(checkpoint, sys.stdout)\n\n\n### DUMP TRACE ##########################################################################\n\n\ndef _add_dump_trace_parser(subparsers_dump):\n parser_dump_trace = subparsers_dump.add_parser(\n \"trace\",\n help=(\n \"Process and dump trace to stdout and/or csv. The trace will be processed \"\n \"backwards, starting with a specified job_id.\"\n ),\n )\n\n parser_dump_trace.add_argument(\n \"source\",\n help=\"A path to either a checkpoint or a job folder.\",\n nargs=\"?\",\n default=\".\",\n )\n\n parser_dump_trace.add_argument(\n \"--checkpoint\",\n default=False,\n action=\"store_const\",\n const=True,\n help=(\n \"If source is a path to a job folder and --checkpoint is set the best \"\n \"(if present) or last checkpoint will be used to determine the job_id\"\n ),\n )\n\n parser_dump_trace.add_argument(\n \"--job_id\",\n default=False,\n help=(\n \"Specifies the training job id in the trace \"\n \"from where to start processing backward\"\n ),\n )\n\n parser_dump_trace.add_argument(\n \"--max_epoch\",\n default=False,\n help=(\n \"Specifies the epoch in the trace\"\n \"from where to start processing backwards\"\n ),\n )\n\n parser_dump_trace.add_argument(\n \"--truncate\",\n default=False,\n action=\"store_const\",\n const=True,\n help=(\n \"If a checkpoint is used (by providing one explicitly as source or by \"\n \"using --checkpoint), --truncate will define the max_epoch to process as\"\n \"specified by the checkpoint\"\n ),\n )\n\n for argument in [\n \"--train\",\n \"--valid\",\n \"--test\",\n \"--search\",\n \"--yaml\",\n \"--batch\",\n \"--example\",\n \"--timeit\",\n \"--no-header\",\n ]:\n parser_dump_trace.add_argument(\n argument, action=\"store_const\", const=True, default=False\n )\n parser_dump_trace.add_argument(\n \"--no-default-keys\", \"-K\", action=\"store_const\", const=True, default=False\n )\n\n parser_dump_trace.add_argument(\"--keysfile\", default=False)\n parser_dump_trace.add_argument(\"--keys\", \"-k\", nargs=\"*\", type=str)\n\n\ndef _dump_trace(args):\n \"\"\" Executes the 'dump trace' command.\"\"\"\n start = time.time()\n if (args.train or args.valid or args.test) and args.search:\n print(\n \"--search and --train, --valid, --test are mutually exclusive\",\n file=sys.stderr,\n )\n exit(1)\n entry_type_specified = True\n if not (args.train or args.valid or args.test or args.search):\n entry_type_specified = False\n args.train = True\n args.valid = True\n args.test = True\n\n checkpoint_path = None\n if \".pt\" in os.path.split(args.source)[-1]:\n checkpoint_path = args.source\n folder_path = os.path.split(args.source)[0]\n else:\n # determine job_id and epoch from last/best checkpoint automatically\n if args.checkpoint:\n checkpoint_path = Config.get_best_or_last_checkpoint(args.source)\n folder_path = args.source\n if not args.checkpoint and args.truncate:\n raise ValueError(\n \"You can only use --truncate when a checkpoint is specified.\"\n \"Consider using --checkpoint or provide a checkpoint file as source\"\n )\n trace = os.path.join(folder_path, \"trace.yaml\")\n if not os.path.isfile(trace):\n sys.stderr.write(\"No trace found at {}\\n\".format(trace))\n exit(1)\n\n keymap = OrderedDict()\n additional_keys = []\n if args.keysfile:\n with open(args.keysfile, \"r\") as keyfile:\n additional_keys = keyfile.readlines()\n if args.keys:\n additional_keys += args.keys\n for line in additional_keys:\n line = line.rstrip(\"\\n\").replace(\" \", \"\")\n name_key = line.split(\"=\")\n if len(name_key) == 1:\n name_key += name_key\n keymap[name_key[0]] = name_key[1]\n\n job_id = None\n epoch = int(args.max_epoch)\n # use job_id and epoch from checkpoint\n if checkpoint_path and args.truncate:\n checkpoint = torch.load(f=checkpoint_path, map_location=\"cpu\")\n job_id = checkpoint[\"job_id\"]\n epoch = checkpoint[\"epoch\"]\n # only use job_id from checkpoint\n elif checkpoint_path:\n checkpoint = torch.load(f=checkpoint_path, map_location=\"cpu\")\n job_id = checkpoint[\"job_id\"]\n # override job_id and epoch with user arguments\n if args.job_id:\n job_id = args.job_id\n if not epoch:\n epoch = float(\"inf\")\n\n entries, job_epochs = [], {}\n if not args.search:\n entries, job_epochs = Trace.grep_training_trace_entries(\n tracefile=trace,\n train=args.train,\n test=args.test,\n valid=args.valid,\n example=args.example,\n batch=args.batch,\n job_id=job_id,\n epoch_of_last=epoch,\n )\n if not entries and (args.search or not entry_type_specified):\n entries = Trace.grep_entries(tracefile=trace, conjunctions=[f\"scope: train\"])\n epoch = None\n if entries:\n args.search = True\n if not entries:\n print(\"No relevant trace entries found.\", file=sys.stderr)\n exit(1)\n\n middle = time.time()\n if not args.yaml:\n csv_writer = csv.writer(sys.stdout)\n # dict[new_name] = (lookup_name, where)\n # if where==\"config\"/\"trace\" it will be looked up automatically\n # if where==\"sep\" it must be added in in the write loop separately\n if args.no_default_keys:\n default_attributes = OrderedDict()\n else:\n default_attributes = OrderedDict(\n [\n (\"job_id\", (\"job_id\", \"sep\")),\n (\"dataset\", (\"dataset.name\", \"config\")),\n (\"model\", (\"model\", \"sep\")),\n (\"reciprocal\", (\"reciprocal\", \"sep\")),\n (\"job\", (\"job\", \"sep\")),\n (\"job_type\", (\"type\", \"trace\")),\n (\"split\", (\"split\", \"sep\")),\n (\"epoch\", (\"epoch\", \"trace\")),\n (\"avg_loss\", (\"avg_loss\", \"trace\")),\n (\"avg_penalty\", (\"avg_penalty\", \"trace\")),\n (\"avg_cost\", (\"avg_cost\", \"trace\")),\n (\"metric_name\", (\"valid.metric\", \"config\")),\n (\"metric\", (\"metric\", \"sep\")),\n ]\n )\n if args.search:\n default_attributes[\"child_folder\"] = (\"folder\", \"trace\")\n default_attributes[\"child_job_id\"] = (\"child_job_id\", \"sep\")\n\n if not args.no_header:\n csv_writer.writerow(\n list(default_attributes.keys()) + [key for key in keymap.keys()]\n )\n # store configs for job_id's s.t. they need to be loaded only once\n configs = {}\n warning_shown = False\n for entry in entries:\n if epoch and not entry.get(\"epoch\") <= float(epoch):\n continue\n # filter out not needed entries from a previous job when\n # a job was resumed from the middle\n if entry.get(\"job\") == \"train\":\n job_id = entry.get(\"job_id\")\n if entry.get(\"epoch\") > job_epochs[job_id]:\n continue\n\n # find relevant config file\n child_job_id = entry.get(\"child_job_id\") if \"child_job_id\" in entry else None\n config_key = (\n entry.get(\"folder\") + \"/\" + str(child_job_id)\n if args.search\n else entry.get(\"job_id\")\n )\n if config_key in configs.keys():\n config = configs[config_key]\n else:\n if args.search:\n if not child_job_id and not warning_shown:\n # This warning is from Dec 19, 2019. TODO remove\n print(\n \"Warning: You are dumping the trace of an older search job. \"\n \"This is fine only if \"\n \"the config.yaml files in each subfolder have not been modified \"\n \"after running the corresponding training job.\",\n file=sys.stderr,\n )\n warning_shown = True\n config = get_config_for_job_id(\n child_job_id, os.path.join(folder_path, entry.get(\"folder\"))\n )\n entry[\"type\"] = config.get(\"train.type\")\n else:\n config = get_config_for_job_id(entry.get(\"job_id\"), folder_path)\n configs[config_key] = config\n\n new_attributes = OrderedDict()\n if config.get_default(\"model\") == \"reciprocal_relations_model\":\n model = config.get_default(\"reciprocal_relations_model.base_model.type\")\n # the string that substitutes $base_model in keymap if it exists\n subs_model = \"reciprocal_relations_model.base_model\"\n reciprocal = 1\n else:\n model = config.get_default(\"model\")\n subs_model = model\n reciprocal = 0\n for new_key in keymap.keys():\n lookup = keymap[new_key]\n if \"$base_model\" in lookup:\n lookup = lookup.replace(\"$base_model\", subs_model)\n try:\n if lookup == \"$folder\":\n val = os.path.abspath(folder_path)\n elif lookup == \"$checkpoint\":\n val = os.path.abspath(checkpoint_path)\n elif lookup == \"$machine\":\n val = socket.gethostname()\n else:\n val = config.get_default(lookup)\n except:\n # creates empty field if key is not existing\n val = entry.get(lookup)\n if type(val) == bool and val:\n val = 1\n elif type(val) == bool and not val:\n val = 0\n new_attributes[new_key] = val\n if not args.yaml:\n # find the actual values for the default attributes\n actual_default = default_attributes.copy()\n for new_key in default_attributes.keys():\n lookup, where = default_attributes[new_key]\n if where == \"config\":\n actual_default[new_key] = config.get(lookup)\n elif where == \"trace\":\n actual_default[new_key] = entry.get(lookup)\n # keys with separate treatment\n # \"split\" in {train,test,valid} for the datatype\n # \"job\" in {train,eval,valid,search}\n if entry.get(\"job\") == \"train\":\n actual_default[\"split\"] = \"train\"\n actual_default[\"job\"] = \"train\"\n elif entry.get(\"job\") == \"eval\":\n actual_default[\"split\"] = entry.get(\"data\") # test or valid\n if entry.get(\"resumed_from_job_id\"):\n actual_default[\"job\"] = \"eval\" # from \"kge eval\"\n else:\n actual_default[\"job\"] = \"valid\" # child of training job\n else:\n actual_default[\"job\"] = entry.get(\"job\")\n actual_default[\"split\"] = entry.get(\"data\")\n actual_default[\"job_id\"] = entry.get(\"job_id\").split(\"-\")[0]\n actual_default[\"model\"] = model\n actual_default[\"reciprocal\"] = reciprocal\n # lookup name is in config value is in trace\n actual_default[\"metric\"] = entry.get(config.get_default(\"valid.metric\"))\n if args.search:\n actual_default[\"child_job_id\"] = entry.get(\"child_job_id\").split(\"-\")[0]\n for key in list(actual_default.keys()):\n if key not in default_attributes:\n del actual_default[key]\n csv_writer.writerow(\n [actual_default[new_key] for new_key in actual_default.keys()]\n + [new_attributes[new_key] for new_key in new_attributes.keys()]\n )\n else:\n entry.update({\"reciprocal\": reciprocal, \"model\": model})\n if keymap:\n entry.update(new_attributes)\n sys.stdout.write(re.sub(\"[{}']\", \"\", str(entry)))\n sys.stdout.write(\"\\n\")\n end = time.time()\n if args.timeit:\n sys.stdout.write(\"Grep + processing took {} \\n\".format(middle - start))\n sys.stdout.write(\"Writing took {}\".format(end - middle))\n\n\n### DUMP CONFIG ########################################################################\n\n\ndef _add_dump_config_parser(subparsers_dump):\n parser_dump_config = subparsers_dump.add_parser(\n \"config\", help=(\"Dump a configuration\")\n )\n parser_dump_config.add_argument(\n \"source\",\n help=\"A path to either a checkpoint, a config file, or a job folder.\",\n nargs=\"?\",\n default=\".\",\n )\n\n parser_dump_config.add_argument(\n \"--minimal\",\n \"-m\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Only dump configuration options different from the default configuration (default)\",\n )\n parser_dump_config.add_argument(\n \"--raw\",\n \"-r\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Dump the config as is\",\n )\n parser_dump_config.add_argument(\n \"--full\",\n \"-f\",\n default=False,\n action=\"store_const\",\n const=True,\n help=\"Add all values from the default configuration before dumping the config\",\n )\n\n parser_dump_config.add_argument(\n \"--include\",\n \"-i\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to include (separated by space). \"\n \"All subkeys are also included. Cannot be used with --raw.\",\n )\n\n parser_dump_config.add_argument(\n \"--exclude\",\n \"-e\",\n type=str,\n nargs=\"*\",\n help=\"List of keys to exclude (separated by space). \"\n \"All subkeys are also exluded. Applied after --include. \"\n \"Cannot be used with --raw.\",\n )\n\n\ndef _dump_config(args):\n \"\"\" Executes the 'dump config' command.\"\"\"\n if not (args.raw or args.full or args.minimal):\n args.minimal = True\n\n if args.raw + args.full + args.minimal != 1:\n raise ValueError(\"Exactly one of --raw, --full, or --minimal must be set\")\n\n if args.raw and (args.include or args.exclude):\n raise ValueError(\n \"--include and --exclude cannot be used with --raw \"\n \"(use --full or --minimal instead).\"\n )\n\n config = Config()\n config_file = None\n if os.path.isdir(args.source):\n config_file = os.path.join(args.source, \"config.yaml\")\n config.load(config_file)\n elif \".yaml\" in os.path.split(args.source)[-1]:\n config_file = args.source\n config.load(config_file)\n else: # a checkpoint\n checkpoint_file = torch.load(args.source, map_location=\"cpu\")\n if args.raw:\n config = checkpoint_file[\"config\"]\n else:\n config.load_options(checkpoint_file[\"config\"].options)\n\n def print_options(options):\n # drop all arguments that are not included\n if args.include:\n args.include = set(args.include)\n options_copy = copy.deepcopy(options)\n for key in options_copy.keys():\n prefix = key\n keep = False\n while True:\n if prefix in args.include:\n keep = True\n break\n else:\n last_dot_index = prefix.rfind(\".\")\n if last_dot_index < 0:\n break\n else:\n prefix = prefix[:last_dot_index]\n if not keep:\n del options[key]\n\n # remove all arguments that are excluded\n if args.exclude:\n args.exclude = set(args.exclude)\n options_copy = copy.deepcopy(options)\n for key in options_copy.keys():\n prefix = key\n while True:\n if prefix in args.exclude:\n del options[key]\n break\n else:\n last_dot_index = prefix.rfind(\".\")\n if last_dot_index < 0:\n break\n else:\n prefix = prefix[:last_dot_index]\n\n # convert the remaining options to a Config and print it\n config = Config(load_default=False)\n config.set_all(options, create=True)\n print(yaml.dump(config.options))\n\n if args.raw:\n if config_file:\n with open(config_file, \"r\") as f:\n print(f.read())\n else:\n print_options(config.options)\n elif args.full:\n print_options(config.options)\n else: # minimal\n default_config = Config()\n imports = config.get(\"import\")\n if imports is not None:\n if not isinstance(imports, list):\n imports = [imports]\n for module_name in imports:\n default_config._import(module_name)\n default_options = Config.flatten(default_config.options)\n new_options = Config.flatten(config.options)\n minimal_options = {}\n\n for option, value in new_options.items():\n if option not in default_options or default_options[option] != value:\n minimal_options[option] = value\n\n print_options(minimal_options)\n"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JanS97/PlaNet | [
"c03c1d5b51fd20d1ec907f6591856d283092767e"
] | [
"main.py"
] | [
"import argparse\nfrom math import inf\nimport os\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.distributions import Normal\nfrom torch.distributions.kl import kl_divergence\nfrom torch.nn import functional as F\nfrom torchvision.utils import make_grid, save_image\nfrom tqdm import tqdm\nfrom env import CONTROL_SUITE_ENVS, Env, GYM_ENVS, EnvBatcher\nfrom memory import ExperienceReplay\nfrom models import bottle, Encoder, ObservationModel, RewardModel, TransitionModel\nfrom planner import MPCPlanner\nfrom utils import lineplot, write_video\n\n\n# Hyperparameters\nparser = argparse.ArgumentParser(description='PlaNet')\nparser.add_argument('--id', type=str, default='default', help='Experiment ID')\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='Random seed')\nparser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')\nparser.add_argument('--env', type=str, default='Pendulum-v0', choices=GYM_ENVS + CONTROL_SUITE_ENVS, help='Gym/Control Suite environment')\nparser.add_argument('--symbolic-env', action='store_true', help='Symbolic features')\nparser.add_argument('--max-episode-length', type=int, default=1000, metavar='T', help='Max episode length')\nparser.add_argument('--experience-size', type=int, default=1000000, metavar='D', help='Experience replay size') # Original implementation has an unlimited buffer size, but 1 million is the max experience collected anyway\nparser.add_argument('--activation-function', type=str, default='relu', choices=dir(F), help='Model activation function')\nparser.add_argument('--embedding-size', type=int, default=1024, metavar='E', help='Observation embedding size') # Note that the default encoder for visual observations outputs a 1024D vector; for other embedding sizes an additional fully-connected layer is used\nparser.add_argument('--hidden-size', type=int, default=200, metavar='H', help='Hidden size')\nparser.add_argument('--belief-size', type=int, default=200, metavar='H', help='Belief/hidden size')\nparser.add_argument('--state-size', type=int, default=30, metavar='Z', help='State/latent size')\nparser.add_argument('--action-repeat', type=int, default=2, metavar='R', help='Action repeat')\nparser.add_argument('--action-noise', type=float, default=0.3, metavar='ε', help='Action noise')\nparser.add_argument('--episodes', type=int, default=1000, metavar='E', help='Total number of episodes')\nparser.add_argument('--seed-episodes', type=int, default=5, metavar='S', help='Seed episodes')\nparser.add_argument('--collect-interval', type=int, default=100, metavar='C', help='Collect interval')\nparser.add_argument('--batch-size', type=int, default=50, metavar='B', help='Batch size')\nparser.add_argument('--chunk-size', type=int, default=50, metavar='L', help='Chunk size')\nparser.add_argument('--overshooting-distance', type=int, default=50, metavar='D', help='Latent overshooting distance/latent overshooting weight for t = 1')\nparser.add_argument('--overshooting-kl-beta', type=float, default=0, metavar='β>1', help='Latent overshooting KL weight for t > 1 (0 to disable)')\nparser.add_argument('--overshooting-reward-scale', type=float, default=0, metavar='R>1', help='Latent overshooting reward prediction weight for t > 1 (0 to disable)')\nparser.add_argument('--global-kl-beta', type=float, default=0, metavar='βg', help='Global KL weight (0 to disable)')\nparser.add_argument('--free-nats', type=float, default=3, metavar='F', help='Free nats')\nparser.add_argument('--bit-depth', type=int, default=5, metavar='B', help='Image bit depth (quantisation)')\nparser.add_argument('--learning-rate', type=float, default=1e-3, metavar='α', help='Learning rate') \nparser.add_argument('--learning-rate-schedule', type=int, default=0, metavar='αS', help='Linear learning rate schedule (optimisation steps from 0 to final learning rate; 0 to disable)') \nparser.add_argument('--adam-epsilon', type=float, default=1e-4, metavar='ε', help='Adam optimiser epsilon value') \n# Note that original has a linear learning rate decay, but it seems unlikely that this makes a significant difference\nparser.add_argument('--grad-clip-norm', type=float, default=1000, metavar='C', help='Gradient clipping norm')\nparser.add_argument('--planning-horizon', type=int, default=12, metavar='H', help='Planning horizon distance')\nparser.add_argument('--optimisation-iters', type=int, default=10, metavar='I', help='Planning optimisation iterations')\nparser.add_argument('--candidates', type=int, default=1000, metavar='J', help='Candidate samples per iteration')\nparser.add_argument('--top-candidates', type=int, default=100, metavar='K', help='Number of top candidates to fit')\nparser.add_argument('--test', action='store_true', help='Test only')\nparser.add_argument('--test-interval', type=int, default=25, metavar='I', help='Test interval (episodes)')\nparser.add_argument('--test-episodes', type=int, default=10, metavar='E', help='Number of test episodes')\nparser.add_argument('--checkpoint-interval', type=int, default=50, metavar='I', help='Checkpoint interval (episodes)')\nparser.add_argument('--checkpoint-experience', action='store_true', help='Checkpoint experience replay')\nparser.add_argument('--models', type=str, default='', metavar='M', help='Load model checkpoint')\nparser.add_argument('--experience-replay', type=str, default='', metavar='ER', help='Load experience replay')\nparser.add_argument('--render', action='store_true', help='Render environment')\nargs = parser.parse_args()\nargs.overshooting_distance = min(args.chunk_size, args.overshooting_distance) # Overshooting distance cannot be greater than chunk size\nprint(' ' * 26 + 'Options')\nfor k, v in vars(args).items():\n print(' ' * 26 + k + ': ' + str(v))\n\n\n# Setup\nresults_dir = os.path.join('results', args.id)\nos.makedirs(results_dir, exist_ok=True)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\nif torch.cuda.is_available() and not args.disable_cuda:\n args.device = torch.device('cuda')\n torch.cuda.manual_seed(args.seed)\nelse:\n args.device = torch.device('cpu')\nmetrics = {'steps': [], 'episodes': [], 'train_rewards': [], 'test_episodes': [], 'test_rewards': [], 'observation_loss': [], 'reward_loss': [], 'kl_loss': []}\n\n\n# Initialise training environment and experience replay memory\nenv = Env(args.env, args.symbolic_env, args.seed, args.max_episode_length, args.action_repeat, args.bit_depth)\nif args.experience_replay is not '' and os.path.exists(args.experience_replay):\n D = torch.load(args.experience_replay)\n metrics['steps'], metrics['episodes'] = [D.steps] * D.episodes, list(range(1, D.episodes + 1))\nelif not args.test:\n D = ExperienceReplay(args.experience_size, args.symbolic_env, env.observation_size, env.action_size, args.bit_depth, args.device)\n # Initialise dataset D with S random seed episodes\n for s in range(1, args.seed_episodes + 1):\n observation, done, t = env.reset(), False, 0\n while not done:\n action = env.sample_random_action()\n next_observation, reward, done = env.step(action)\n D.append(observation, action, reward, done)\n observation = next_observation\n t += 1\n metrics['steps'].append(t * args.action_repeat + (0 if len(metrics['steps']) == 0 else metrics['steps'][-1]))\n metrics['episodes'].append(s)\n\n\n# Initialise model parameters randomly\ntransition_model = TransitionModel(args.belief_size, args.state_size, env.action_size, args.hidden_size, args.embedding_size, args.activation_function).to(device=args.device)\nobservation_model = ObservationModel(args.symbolic_env, env.observation_size, args.belief_size, args.state_size, args.embedding_size, args.activation_function).to(device=args.device)\nreward_model = RewardModel(args.belief_size, args.state_size, args.hidden_size, args.activation_function).to(device=args.device)\nencoder = Encoder(args.symbolic_env, env.observation_size, args.embedding_size, args.activation_function).to(device=args.device)\nparam_list = list(transition_model.parameters()) + list(observation_model.parameters()) + list(reward_model.parameters()) + list(encoder.parameters())\noptimiser = optim.Adam(param_list, lr=0 if args.learning_rate_schedule != 0 else args.learning_rate, eps=args.adam_epsilon)\nif args.models is not '' and os.path.exists(args.models):\n model_dicts = torch.load(args.models)\n transition_model.load_state_dict(model_dicts['transition_model'])\n observation_model.load_state_dict(model_dicts['observation_model'])\n reward_model.load_state_dict(model_dicts['reward_model'])\n encoder.load_state_dict(model_dicts['encoder'])\n optimiser.load_state_dict(model_dicts['optimiser'])\nplanner = MPCPlanner(env.action_size, args.planning_horizon, args.optimisation_iters, args.candidates, args.top_candidates, transition_model, reward_model, env.action_range[0], env.action_range[1])\nglobal_prior = Normal(torch.zeros(args.batch_size, args.state_size, device=args.device), torch.ones(args.batch_size, args.state_size, device=args.device)) # Global prior N(0, I)\nfree_nats = torch.full((1, ), args.free_nats, dtype=torch.float32, device=args.device) # Allowed deviation in KL divergence\n\n\ndef update_belief_and_act(args, env, planner, transition_model, encoder, belief, posterior_state, action, observation, min_action=-inf, max_action=inf, explore=False):\n # Infer belief over current state q(s_t|o≤t,a<t) from the history\n belief, _, _, _, posterior_state, _, _ = transition_model(posterior_state, action.unsqueeze(dim=0), belief, encoder(observation).unsqueeze(dim=0)) # Action and observation need extra time dimension\n belief, posterior_state = belief.squeeze(dim=0), posterior_state.squeeze(dim=0) # Remove time dimension from belief/state\n action = planner(belief, posterior_state) # Get action from planner(q(s_t|o≤t,a<t), p)\n if explore:\n action = action + args.action_noise * torch.randn_like(action) # Add exploration noise ε ~ p(ε) to the action\n action.clamp_(min=min_action, max=max_action) # Clip action range\n next_observation, reward, done = env.step(action.cpu() if isinstance(env, EnvBatcher) else action[0].cpu()) # Perform environment step (action repeats handled internally)\n return belief, posterior_state, action, next_observation, reward, done\n\n\n# Testing only\nif args.test:\n # Set models to eval mode\n transition_model.eval()\n reward_model.eval()\n encoder.eval()\n with torch.no_grad():\n total_reward = 0\n for _ in tqdm(range(args.test_episodes)):\n observation = env.reset()\n belief, posterior_state, action = torch.zeros(1, args.belief_size, device=args.device), torch.zeros(1, args.state_size, device=args.device), torch.zeros(1, env.action_size, device=args.device)\n pbar = tqdm(range(args.max_episode_length // args.action_repeat))\n for t in pbar:\n belief, posterior_state, action, observation, reward, done = update_belief_and_act(args, env, planner, transition_model, encoder, belief, posterior_state, action, observation.to(device=args.device), env.action_range[0], env.action_range[1])\n total_reward += reward\n if args.render:\n env.render()\n if done:\n pbar.close()\n break\n print('Average Reward:', total_reward / args.test_episodes)\n env.close()\n quit()\n\n\n# Training (and testing)\nfor episode in tqdm(range(metrics['episodes'][-1] + 1, args.episodes + 1), total=args.episodes, initial=metrics['episodes'][-1] + 1):\n # Model fitting\n losses = []\n for s in tqdm(range(args.collect_interval)):\n # Draw sequence chunks {(o_t, a_t, r_t+1, terminal_t+1)} ~ D uniformly at random from the dataset (including terminal flags)\n observations, actions, rewards, nonterminals = D.sample(args.batch_size, args.chunk_size) # Transitions start at time t = 0\n # Create initial belief and state for time t = 0\n init_belief, init_state = torch.zeros(args.batch_size, args.belief_size, device=args.device), torch.zeros(args.batch_size, args.state_size, device=args.device)\n # Update belief/state using posterior from previous belief/state, previous action and current observation (over entire sequence at once)\n beliefs, prior_states, prior_means, prior_std_devs, posterior_states, posterior_means, posterior_std_devs = transition_model(init_state, actions[:-1], init_belief, bottle(encoder, (observations[1:], )), nonterminals[:-1])\n # Calculate observation likelihood, reward likelihood and KL losses (for t = 0 only for latent overshooting); sum over final dims, average over batch and time (original implementation, though paper seems to miss 1/T scaling?)\n observation_loss = F.mse_loss(bottle(observation_model, (beliefs, posterior_states)), observations[1:], reduction='none').sum(dim=2 if args.symbolic_env else (2, 3, 4)).mean(dim=(0, 1))\n reward_loss = F.mse_loss(bottle(reward_model, (beliefs, posterior_states)), rewards[:-1], reduction='none').mean(dim=(0, 1))\n kl_loss = torch.max(kl_divergence(Normal(posterior_means, posterior_std_devs), Normal(prior_means, prior_std_devs)).sum(dim=2), free_nats).mean(dim=(0, 1)) # Note that normalisation by overshooting distance and weighting by overshooting distance cancel out\n if args.global_kl_beta != 0:\n kl_loss += args.global_kl_beta * kl_divergence(Normal(posterior_means, posterior_std_devs), global_prior).sum(dim=2).mean(dim=(0, 1))\n # Calculate latent overshooting objective for t > 0\n if args.overshooting_kl_beta != 0:\n overshooting_vars = [] # Collect variables for overshooting to process in batch\n for t in range(1, args.chunk_size - 1):\n d = min(t + args.overshooting_distance, args.chunk_size - 1) # Overshooting distance\n t_, d_ = t - 1, d - 1 # Use t_ and d_ to deal with different time indexing for latent states\n seq_pad = (0, 0, 0, 0, 0, t - d + args.overshooting_distance) # Calculate sequence padding so overshooting terms can be calculated in one batch\n # Store (0) actions, (1) nonterminals, (2) rewards, (3) beliefs, (4) posterior states, (5) posterior means, (6) posterior standard deviations and (7) sequence masks\n overshooting_vars.append((F.pad(actions[t:d], seq_pad), F.pad(nonterminals[t:d], seq_pad), F.pad(rewards[t:d], seq_pad[2:]), beliefs[t_], posterior_states[t_].detach(), F.pad(posterior_means[t_ + 1:d_ + 1].detach(), seq_pad), F.pad(posterior_std_devs[t_ + 1:d_ + 1].detach(), seq_pad, value=1), F.pad(torch.ones(d - t, args.batch_size, args.state_size, device=args.device), seq_pad))) # Posterior standard deviations must be padded with > 0 to prevent infinite KL divergences\n overshooting_vars = tuple(zip(*overshooting_vars))\n # Update belief/state using prior from previous belief/state and previous action (over entire sequence at once)\n beliefs, prior_states, prior_means, prior_std_devs = transition_model(torch.cat(overshooting_vars[4], dim=0), torch.cat(overshooting_vars[0], dim=1), torch.cat(overshooting_vars[3], dim=0), None, torch.cat(overshooting_vars[1], dim=1))\n seq_mask = torch.cat(overshooting_vars[7], dim=1)\n # Calculate overshooting KL loss with sequence mask\n kl_loss += (1 / args.overshooting_distance) * args.overshooting_kl_beta * torch.max((kl_divergence(Normal(torch.cat(overshooting_vars[5], dim=1), torch.cat(overshooting_vars[6], dim=1)), Normal(prior_means, prior_std_devs)) * seq_mask).sum(dim=2), free_nats).mean(dim=(0, 1)) * (args.chunk_size - 1) # Update KL loss (compensating for extra average over each overshooting/open loop sequence) \n # Calculate overshooting reward prediction loss with sequence mask\n if args.overshooting_reward_scale != 0:\n reward_loss += (1 / args.overshooting_distance) * args.overshooting_reward_scale * F.mse_loss(bottle(reward_model, (beliefs, prior_states)) * seq_mask[:, :, 0], torch.cat(overshooting_vars[2], dim=1), reduction='none').mean(dim=(0, 1)) * (args.chunk_size - 1) # Update reward loss (compensating for extra average over each overshooting/open loop sequence) \n\n # Apply linearly ramping learning rate schedule\n if args.learning_rate_schedule != 0:\n for group in optimiser.param_groups:\n group['lr'] = min(group['lr'] + args.learning_rate / args.learning_rate_schedule, args.learning_rate)\n # Update model parameters\n optimiser.zero_grad()\n (observation_loss + reward_loss + kl_loss).backward()\n nn.utils.clip_grad_norm_(param_list, args.grad_clip_norm, norm_type=2)\n optimiser.step()\n # Store (0) observation loss (1) reward loss (2) KL loss\n losses.append([observation_loss.item(), reward_loss.item(), kl_loss.item()])\n\n # Update and plot loss metrics\n losses = tuple(zip(*losses))\n metrics['observation_loss'].append(losses[0])\n metrics['reward_loss'].append(losses[1])\n metrics['kl_loss'].append(losses[2])\n lineplot(metrics['episodes'][-len(metrics['observation_loss']):], metrics['observation_loss'], 'observation_loss', results_dir)\n lineplot(metrics['episodes'][-len(metrics['reward_loss']):], metrics['reward_loss'], 'reward_loss', results_dir)\n lineplot(metrics['episodes'][-len(metrics['kl_loss']):], metrics['kl_loss'], 'kl_loss', results_dir)\n\n\n # Data collection\n with torch.no_grad():\n observation, total_reward = env.reset(), 0\n belief, posterior_state, action = torch.zeros(1, args.belief_size, device=args.device), torch.zeros(1, args.state_size, device=args.device), torch.zeros(1, env.action_size, device=args.device)\n pbar = tqdm(range(args.max_episode_length // args.action_repeat))\n for t in pbar:\n belief, posterior_state, action, next_observation, reward, done = update_belief_and_act(args, env, planner, transition_model, encoder, belief, posterior_state, action, observation.to(device=args.device), env.action_range[0], env.action_range[1], explore=True)\n D.append(observation, action.cpu(), reward, done)\n total_reward += reward\n observation = next_observation\n if args.render:\n env.render()\n if done:\n pbar.close()\n break\n \n # Update and plot train reward metrics\n metrics['steps'].append(t + metrics['steps'][-1])\n metrics['episodes'].append(episode)\n metrics['train_rewards'].append(total_reward)\n lineplot(metrics['episodes'][-len(metrics['train_rewards']):], metrics['train_rewards'], 'train_rewards', results_dir)\n\n\n # Test model\n if episode % args.test_interval == 0:\n # Set models to eval mode\n transition_model.eval()\n observation_model.eval()\n reward_model.eval()\n encoder.eval()\n # Initialise parallelised test environments\n test_envs = EnvBatcher(Env, (args.env, args.symbolic_env, args.seed, args.max_episode_length, args.action_repeat, args.bit_depth), {}, args.test_episodes)\n \n with torch.no_grad():\n observation, total_rewards, video_frames = test_envs.reset(), np.zeros((args.test_episodes, )), []\n belief, posterior_state, action = torch.zeros(args.test_episodes, args.belief_size, device=args.device), torch.zeros(args.test_episodes, args.state_size, device=args.device), torch.zeros(args.test_episodes, env.action_size, device=args.device)\n pbar = tqdm(range(args.max_episode_length // args.action_repeat))\n for t in pbar:\n belief, posterior_state, action, next_observation, reward, done = update_belief_and_act(args, test_envs, planner, transition_model, encoder, belief, posterior_state, action, observation.to(device=args.device), env.action_range[0], env.action_range[1])\n total_rewards += reward.numpy()\n if not args.symbolic_env: # Collect real vs. predicted frames for video\n video_frames.append(make_grid(torch.cat([observation, observation_model(belief, posterior_state).cpu()], dim=3) + 0.5, nrow=5).numpy()) # Decentre\n observation = next_observation\n if done.sum().item() == args.test_episodes:\n pbar.close()\n break\n \n # Update and plot reward metrics (and write video if applicable) and save metrics\n metrics['test_episodes'].append(episode)\n metrics['test_rewards'].append(total_rewards.tolist())\n lineplot(metrics['test_episodes'], metrics['test_rewards'], 'test_rewards', results_dir)\n lineplot(np.asarray(metrics['steps'])[np.asarray(metrics['test_episodes']) - 1], metrics['test_rewards'], 'test_rewards_steps', results_dir, xaxis='step')\n if not args.symbolic_env:\n episode_str = str(episode).zfill(len(str(args.episodes)))\n write_video(video_frames, 'test_episode_%s' % episode_str, results_dir) # Lossy compression\n save_image(torch.as_tensor(video_frames[-1]), os.path.join(results_dir, 'test_episode_%s.png' % episode_str))\n torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))\n\n # Set models to train mode\n transition_model.train()\n observation_model.train()\n reward_model.train()\n encoder.train()\n # Close test environments\n test_envs.close()\n\n\n # Checkpoint models\n if episode % args.checkpoint_interval == 0:\n torch.save({'transition_model': transition_model.state_dict(), 'observation_model': observation_model.state_dict(), 'reward_model': reward_model.state_dict(), 'encoder': encoder.state_dict(), 'optimiser': optimiser.state_dict()}, os.path.join(results_dir, 'models_%d.pth' % episode))\n if args.checkpoint_experience:\n torch.save(D, os.path.join(results_dir, 'experience.pth')) # Warning: will fail with MemoryError with large memory sizes\n\n\n# Close training environment\nenv.close()\n"
] | [
[
"torch.randn_like",
"torch.load",
"torch.zeros",
"torch.cat",
"numpy.asarray",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.ones",
"numpy.zeros",
"torch.nn.functional.pad",
"torch.optim.Adam",
"torch.full",
"torch.distributions.Normal",
"torch.as_tensor",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.nn.utils.clip_grad_norm_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hthieu166/selab-aic20-track-2 | [
"5a87a075e64711388e06fc22171ee314cca1ae10"
] | [
"src/losses/triplet_loss_online_utils.py"
] | [
"from itertools import combinations\n\nimport numpy as np\nimport torch\n\n\ndef pdist(vectors):\n distance_matrix = -2 * vectors.mm(torch.t(vectors)) + vectors.pow(2).sum(dim=1).view(1, -1) + vectors.pow(2).sum(\n dim=1).view(-1, 1)\n return distance_matrix\n\n\nclass PairSelector:\n \"\"\"\n Implementation should return indices of positive pairs and negative pairs that will be passed to compute\n Contrastive Loss\n return positive_pairs, negative_pairs\n \"\"\"\n\n def __init__(self):\n pass\n\n def get_pairs(self, embeddings, labels):\n raise NotImplementedError\n\n\nclass AllPositivePairSelector(PairSelector):\n \"\"\"\n Discards embeddings and generates all possible pairs given labels.\n If balance is True, negative pairs are a random sample to match the number of positive samples\n \"\"\"\n def __init__(self, balance=True):\n super(AllPositivePairSelector, self).__init__()\n self.balance = balance\n\n def get_pairs(self, embeddings, labels):\n labels = labels.cpu().data.numpy()\n all_pairs = np.array(list(combinations(range(len(labels)), 2)))\n all_pairs = torch.LongTensor(all_pairs)\n positive_pairs = all_pairs[(labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]\n negative_pairs = all_pairs[(labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]\n if self.balance:\n negative_pairs = negative_pairs[torch.randperm(len(negative_pairs))[:len(positive_pairs)]]\n\n return positive_pairs, negative_pairs\n\n\nclass HardNegativePairSelector(PairSelector):\n \"\"\"\n Creates all possible positive pairs. For negative pairs, pairs with smallest distance are taken into consideration,\n matching the number of positive pairs.\n \"\"\"\n\n def __init__(self, cpu=True):\n super(HardNegativePairSelector, self).__init__()\n self.cpu = cpu\n\n def get_pairs(self, embeddings, labels):\n if self.cpu:\n embeddings = embeddings.cpu()\n distance_matrix = pdist(embeddings)\n\n labels = labels.cpu().data.numpy()\n all_pairs = np.array(list(combinations(range(len(labels)), 2)))\n all_pairs = torch.LongTensor(all_pairs)\n positive_pairs = all_pairs[(labels[all_pairs[:, 0]] == labels[all_pairs[:, 1]]).nonzero()]\n negative_pairs = all_pairs[(labels[all_pairs[:, 0]] != labels[all_pairs[:, 1]]).nonzero()]\n\n negative_distances = distance_matrix[negative_pairs[:, 0], negative_pairs[:, 1]]\n negative_distances = negative_distances.cpu().data.numpy()\n top_negatives = np.argpartition(negative_distances, len(positive_pairs))[:len(positive_pairs)]\n top_negative_pairs = negative_pairs[torch.LongTensor(top_negatives)]\n\n return positive_pairs, top_negative_pairs\n\n\nclass TripletSelector:\n \"\"\"\n Implementation should return indices of anchors, positive and negative samples\n return np array of shape [N_triplets x 3]\n \"\"\"\n\n def __init__(self):\n pass\n\n def get_triplets(self, embeddings, labels):\n raise NotImplementedError\n\n\nclass AllTripletSelector(TripletSelector):\n \"\"\"\n Returns all possible triplets\n May be impractical in most cases\n \"\"\"\n\n def __init__(self):\n super(AllTripletSelector, self).__init__()\n\n def get_triplets(self, embeddings, labels):\n labels = labels.cpu().data.numpy()\n triplets = []\n for label in set(labels):\n label_mask = (labels == label)\n label_indices = np.where(label_mask)[0]\n if len(label_indices) < 2:\n continue\n negative_indices = np.where(np.logical_not(label_mask))[0]\n anchor_positives = list(combinations(label_indices, 2)) # All anchor-positive pairs\n\n # Add all negatives for all positive pairs\n temp_triplets = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives\n for neg_ind in negative_indices]\n triplets += temp_triplets\n\n return torch.LongTensor(np.array(triplets))\n\n\ndef hardest_negative(loss_values):\n hard_negative = np.argmax(loss_values)\n return hard_negative if loss_values[hard_negative] > 0 else None\n\n\ndef random_hard_negative(loss_values):\n hard_negatives = np.where(loss_values > 0)[0]\n return np.random.choice(hard_negatives) if len(hard_negatives) > 0 else None\n\n\ndef semihard_negative(loss_values, margin):\n semihard_negatives = np.where(np.logical_and(loss_values < margin, loss_values > 0))[0]\n return np.random.choice(semihard_negatives) if len(semihard_negatives) > 0 else None\n\n\nclass FunctionNegativeTripletSelector(TripletSelector):\n \"\"\"\n For each positive pair, takes the hardest negative sample (with the greatest triplet loss value) to create a triplet\n Margin should match the margin used in triplet loss.\n negative_selection_fn should take array of loss_values for a given anchor-positive pair and all negative samples\n and return a negative index for that pair\n \"\"\"\n\n def __init__(self, margin, negative_selection_fn, cpu=True):\n super(FunctionNegativeTripletSelector, self).__init__()\n self.cpu = cpu\n self.margin = margin\n self.negative_selection_fn = negative_selection_fn\n\n def get_triplets(self, embeddings, labels):\n if self.cpu:\n embeddings = embeddings.cpu()\n distance_matrix = pdist(embeddings)\n distance_matrix = distance_matrix.cpu()\n\n labels = labels.cpu().data.numpy()\n triplets = []\n\n for label in set(labels):\n label_mask = (labels == label)\n label_indices = np.where(label_mask)[0]\n if len(label_indices) < 2:\n continue\n negative_indices = np.where(np.logical_not(label_mask))[0]\n anchor_positives = list(combinations(label_indices, 2)) # All anchor-positive pairs\n anchor_positives = np.array(anchor_positives)\n\n ap_distances = distance_matrix[anchor_positives[:, 0], anchor_positives[:, 1]]\n for anchor_positive, ap_distance in zip(anchor_positives, ap_distances):\n loss_values = ap_distance - distance_matrix[torch.LongTensor(np.array([anchor_positive[0]])), torch.LongTensor(negative_indices)] + self.margin\n loss_values = loss_values.data.cpu().numpy()\n hard_negative = self.negative_selection_fn(loss_values)\n if hard_negative is not None:\n hard_negative = negative_indices[hard_negative]\n triplets.append([anchor_positive[0], anchor_positive[1], hard_negative])\n\n if len(triplets) == 0:\n triplets.append([anchor_positive[0], anchor_positive[1], negative_indices[0]])\n\n triplets = np.array(triplets)\n\n return torch.LongTensor(triplets)\n\n\ndef HardestNegativeTripletSelector(margin, cpu=False): return FunctionNegativeTripletSelector(margin=margin,\n negative_selection_fn=hardest_negative,\n cpu=cpu)\n\n\ndef RandomNegativeTripletSelector(margin, cpu=False): return FunctionNegativeTripletSelector(margin=margin,\n negative_selection_fn=random_hard_negative,\n cpu=cpu)\n\n\ndef SemihardNegativeTripletSelector(margin, cpu=False): return FunctionNegativeTripletSelector(margin=margin,\n negative_selection_fn=lambda x: semihard_negative(x, margin),\n cpu=cpu)"
] | [
[
"torch.t",
"numpy.logical_not",
"torch.LongTensor",
"numpy.logical_and",
"numpy.random.choice",
"numpy.argmax",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyidi/MPT | [
"76c1376d73ade2ecb5bb1bdd171e6f4d266951e5"
] | [
"MPAtt/model/mobileNet.py"
] | [
"'''MobileNetV3 in PyTorch.\nSee the paper \"Inverted Residuals and Linear Bottlenecks:\nMobile Networks for Classification, Detection and Segmentation\" for more details.\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\n\n\nclass hswish(nn.Module):\n def forward(self, x):\n out = x * F.relu6(x + 3, inplace=True) / 6\n return out\n\n\nclass hsigmoid(nn.Module):\n def forward(self, x):\n out = F.relu6(x + 3, inplace=True) / 6\n return out\n\n\nclass SeModule(nn.Module):\n def __init__(self, in_size, reduction=4):\n super(SeModule, self).__init__()\n self.se = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(in_size // reduction),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(in_size),\n hsigmoid()\n )\n\n def forward(self, x):\n return x * self.se(x)\n\n\nclass Block(nn.Module):\n '''expand + depthwise + pointwise'''\n def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):\n super(Block, self).__init__()\n self.stride = stride\n self.se = semodule\n\n self.conv1 = nn.Conv2d(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn1 = nn.BatchNorm2d(expand_size)\n self.nolinear1 = nolinear\n self.conv2 = nn.Conv2d(expand_size, expand_size, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=expand_size, bias=False)\n self.bn2 = nn.BatchNorm2d(expand_size)\n self.nolinear2 = nolinear\n self.conv3 = nn.Conv2d(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn3 = nn.BatchNorm2d(out_size)\n\n self.shortcut = nn.Sequential()\n if stride == 1 and in_size != out_size:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_size),\n )\n\n def forward(self, x):\n out = self.nolinear1(self.bn1(self.conv1(x)))\n out = self.nolinear2(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n if self.se != None:\n out = self.se(out)\n out = out + self.shortcut(x) if self.stride==1 else out\n return out\n\n\nclass MobileNetV3_Large(nn.Module):\n def __init__(self, num_classes=6):\n super(MobileNetV3_Large, self).__init__()\n self.conv1 = nn.Conv2d(6, 16, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.hs1 = hswish()\n\n self.bneck = nn.Sequential(\n Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),\n Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),\n Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),\n Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),\n Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),\n Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),\n Block(3, 40, 240, 80, hswish(), None, 2),\n Block(3, 80, 200, 80, hswish(), None, 1),\n Block(3, 80, 184, 80, hswish(), None, 1),\n Block(3, 80, 184, 80, hswish(), None, 1),\n Block(3, 80, 480, 112, hswish(), SeModule(112), 1),\n Block(3, 112, 672, 112, hswish(), SeModule(112), 1),\n Block(5, 112, 672, 160, hswish(), SeModule(160), 1),\n Block(5, 160, 672, 160, hswish(), SeModule(160), 2),\n Block(5, 160, 960, 160, hswish(), SeModule(160), 1),\n )\n\n\n self.conv2 = nn.Conv2d(160, 960, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(960)\n self.hs2 = hswish()\n self.linear3 = nn.Linear(960, 1280)\n self.bn3 = nn.BatchNorm1d(1280)\n self.hs3 = hswish()\n self.linear4 = nn.Linear(1280, num_classes)\n self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x):\n out = self.hs1(self.bn1(self.conv1(x)))\n out = self.bneck(out)\n out = self.hs2(self.bn2(self.conv2(out)))\n out = F.avg_pool2d(out, 7)\n out = out.view(out.size(0), -1)\n out = self.hs3(self.bn3(self.linear3(out)))\n out = self.linear4(out)\n return out\n\n\n\nclass MobileNetV3_Small(nn.Module):\n def __init__(self, num_classes=10):\n super(MobileNetV3_Small, self).__init__()\n self.conv1 = nn.Conv2d(10, 16, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.hs1 = hswish()\n\n self.bneck = nn.Sequential(\n Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),\n Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),\n Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),\n Block(5, 24, 96, 40, hswish(), SeModule(40), 2),\n Block(5, 40, 240, 40, hswish(), SeModule(40), 1),\n Block(5, 40, 240, 40, hswish(), SeModule(40), 1),\n Block(5, 40, 120, 48, hswish(), SeModule(48), 1),\n Block(5, 48, 144, 48, hswish(), SeModule(48), 1),\n Block(5, 48, 288, 96, hswish(), SeModule(96), 2),\n Block(5, 96, 576, 96, hswish(), SeModule(96), 1),\n Block(5, 96, 576, 96, hswish(), SeModule(96), 1),\n )\n\n\n self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn2 = nn.BatchNorm2d(576)\n self.hs2 = hswish()\n self.linear3 = nn.Linear(576, 1280)\n self.bn3 = nn.BatchNorm1d(1280)\n self.hs3 = hswish()\n self.linear4 = nn.Linear(1280, num_classes)\n self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant_(m.weight, 1)\n init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal_(m.weight, std=0.001)\n if m.bias is not None:\n init.constant_(m.bias, 0)\n\n def forward(self, x):\n out = self.hs1(self.bn1(self.conv1(x)))\n out = self.bneck(out)\n out = self.hs2(self.bn2(self.conv2(out)))\n out = F.avg_pool2d(out, 7)\n out = out.view(out.size(0), -1)\n out = self.hs3(self.bn3(self.linear3(out)))\n out = self.linear4(out)\n return out\n\n\n\ndef test():\n net = MobileNetV3_Small()\n x = torch.randn(2,3,224,224)\n y = net(x)\n print(y.size())\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"torch.nn.functional.relu6",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
awesome-archive/mixmatch | [
"77bf67ddf15fa51b6784d5aad1a4793b43352f7f"
] | [
"scripts/check_split.py"
] | [
"#!/usr/bin/env python\n\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Script to measure the overlap between data splits.\n\nThere should not be any overlap unless the original dataset has duplicates.\n\"\"\"\n\nimport hashlib\nimport itertools\nimport os\nfrom absl import app\nfrom absl import flags\nfrom libml import data, utils\nimport tensorflow as tf\nfrom tqdm import trange\n\nflags.DEFINE_integer('batch', 1024, 'Batch size.')\nflags.DEFINE_integer('samples', 1 << 20, 'Number of samples to load.')\n\nFLAGS = flags.FLAGS\n\nDATASETS = {}\nDATASETS.update([data.DataSet.creator('cifar10', seed, label, valid, lambda x: x)\n for seed, label, valid in\n itertools.product(range(6), [250, 500, 1000, 2000, 4000], [1, 5000])])\nDATASETS.update([data.DataSet.creator('cifar100', seed, label, valid, lambda x: x)\n for seed, label, valid in\n itertools.product(range(6), [10000], [1, 5000])])\nDATASETS.update([data.DataSet.creator('stl10', seed, label, valid, lambda x: x, height=96, width=96, do_memoize=False)\n for seed, label, valid in itertools.product(range(6), [1000, 5000], [1, 500])])\nDATASETS.update([data.DataSet.creator('svhn', seed, label, valid, lambda x: x, do_memoize=False)\n for seed, label, valid in\n itertools.product(range(6), [250, 500, 1000, 2000, 4000], [1, 5000])])\nDATASETS.update([data.DataSet.creator('svhn_noextra', seed, label, valid, lambda x: x, do_memoize=False)\n for seed, label, valid in\n itertools.product(range(6), [250, 500, 1000, 2000, 4000], [1, 5000])])\n\n\ndef to_byte(d: dict):\n return tf.to_int32(tf.round(127.5 * (d['image'] + 1)))\n\n\ndef collect_hashes(sess, group, data):\n data = data.map(to_byte).batch(FLAGS.batch).prefetch(1).make_one_shot_iterator().get_next()\n hashes = set()\n hasher = hashlib.sha512\n for _ in trange(0, FLAGS.samples, FLAGS.batch, desc='Building hashes for %s' % group, leave=False):\n try:\n batch = sess.run(data)\n except tf.errors.OutOfRangeError:\n break\n for img in batch:\n hashes.add(hasher(img).digest())\n return hashes\n\n\ndef main(argv):\n del argv\n utils.setup_tf()\n dataset = DATASETS[FLAGS.dataset]()\n with tf.Session(config=utils.get_config()) as sess:\n hashes = (collect_hashes(sess, 'labeled', dataset.eval_labeled),\n collect_hashes(sess, 'unlabeled', dataset.eval_unlabeled),\n collect_hashes(sess, 'validation', dataset.valid),\n collect_hashes(sess, 'test', dataset.test))\n print('Overlap matrix (should be an almost perfect diagonal matrix with counts).')\n groups = 'labeled unlabeled validation test'.split()\n fmt = '%-10s %10s %10s %10s %10s'\n print(fmt % tuple([''] + groups))\n for p, x in enumerate(hashes):\n overlaps = [len(x & y) for y in hashes]\n print(fmt % tuple([groups[p]] + overlaps))\n\n\nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = ''\n app.run(main)\n"
] | [
[
"tensorflow.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
AngelLiang/hacking-influxdb-python | [
"d5d12499f3755199d5eedd8b363450f1cf4073bd"
] | [
"influxdb/_dataframe_client.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"DataFrame client for InfluxDB.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\nfrom collections import defaultdict\n\nimport pandas as pd\nimport numpy as np\n\nfrom .client import InfluxDBClient\nfrom .line_protocol import _escape_tag\n\n\ndef _pandas_time_unit(time_precision):\n unit = time_precision\n if time_precision == 'm':\n unit = 'ms'\n elif time_precision == 'u':\n unit = 'us'\n elif time_precision == 'n':\n unit = 'ns'\n assert unit in ('s', 'ms', 'us', 'ns')\n return unit\n\n\ndef _escape_pandas_series(s):\n return s.apply(lambda v: _escape_tag(v))\n\n\nclass DataFrameClient(InfluxDBClient):\n \"\"\"DataFrameClient instantiates InfluxDBClient to connect to the backend.\n\n The ``DataFrameClient`` object holds information necessary to connect\n to InfluxDB. Requests can be made to InfluxDB directly through the client.\n The client reads and writes from pandas DataFrames.\n \"\"\"\n\n EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')\n\n def write_points(self,\n dataframe,\n measurement,\n tags=None,\n tag_columns=None,\n field_columns=None,\n time_precision=None,\n database=None,\n retention_policy=None,\n batch_size=None,\n protocol='line',\n numeric_precision=None):\n \"\"\"Write to multiple time series names.\n\n :param dataframe: data points in a DataFrame\n :param measurement: name of measurement\n :param tags: dictionary of tags, with string key-values\n :param time_precision: [Optional, default None] Either 's', 'ms', 'u'\n or 'n'.\n :param batch_size: [Optional] Value to write the points in batches\n instead of all at one time. Useful for when doing data dumps from\n one database to another or when doing a massive write operation\n :type batch_size: int\n :param protocol: Protocol for writing data. Either 'line' or 'json'.\n :param numeric_precision: Precision for floating point values.\n Either None, 'full' or some int, where int is the desired decimal\n precision. 'full' preserves full precision for int and float\n datatypes. Defaults to None, which preserves 14-15 significant\n figures for float and all significant figures for int datatypes.\n \"\"\"\n if tag_columns is None:\n tag_columns = []\n\n if field_columns is None:\n field_columns = []\n\n if batch_size:\n number_batches = int(math.ceil(len(dataframe) / float(batch_size)))\n\n for batch in range(number_batches):\n start_index = batch * batch_size\n end_index = (batch + 1) * batch_size\n\n if protocol == 'line':\n points = self._convert_dataframe_to_lines(\n dataframe.iloc[start_index:end_index].copy(),\n measurement=measurement,\n global_tags=tags,\n time_precision=time_precision,\n tag_columns=tag_columns,\n field_columns=field_columns,\n numeric_precision=numeric_precision)\n else:\n points = self._convert_dataframe_to_json(\n dataframe.iloc[start_index:end_index].copy(),\n measurement=measurement,\n tags=tags,\n time_precision=time_precision,\n tag_columns=tag_columns,\n field_columns=field_columns)\n\n super(DataFrameClient, self).write_points(\n points,\n time_precision,\n database,\n retention_policy,\n protocol=protocol)\n\n return True\n\n if protocol == 'line':\n points = self._convert_dataframe_to_lines(\n dataframe,\n measurement=measurement,\n global_tags=tags,\n tag_columns=tag_columns,\n field_columns=field_columns,\n time_precision=time_precision,\n numeric_precision=numeric_precision)\n else:\n points = self._convert_dataframe_to_json(\n dataframe,\n measurement=measurement,\n tags=tags,\n time_precision=time_precision,\n tag_columns=tag_columns,\n field_columns=field_columns)\n\n super(DataFrameClient, self).write_points(\n points,\n time_precision,\n database,\n retention_policy,\n protocol=protocol)\n\n return True\n\n def query(self,\n query,\n params=None,\n bind_params=None,\n epoch=None,\n expected_response_code=200,\n database=None,\n raise_errors=True,\n chunked=False,\n chunk_size=0,\n method=\"GET\",\n dropna=True):\n \"\"\"\n Query data into a DataFrame.\n\n .. danger::\n In order to avoid injection vulnerabilities (similar to `SQL\n injection <https://www.owasp.org/index.php/SQL_Injection>`_\n vulnerabilities), do not directly include untrusted data into the\n ``query`` parameter, use ``bind_params`` instead.\n\n :param query: the actual query string\n :param params: additional parameters for the request, defaults to {}\n :param bind_params: bind parameters for the query:\n any variable in the query written as ``'$var_name'`` will be\n replaced with ``bind_params['var_name']``. Only works in the\n ``WHERE`` clause and takes precedence over ``params['params']``\n :param epoch: response timestamps to be in epoch format either 'h',\n 'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is\n RFC3339 UTC format with nanosecond precision\n :param expected_response_code: the expected status code of response,\n defaults to 200\n :param database: database to query, defaults to None\n :param raise_errors: Whether or not to raise exceptions when InfluxDB\n returns errors, defaults to True\n :param chunked: Enable to use chunked responses from InfluxDB.\n With ``chunked`` enabled, one ResultSet is returned per chunk\n containing all results within that chunk\n :param chunk_size: Size of each chunk to tell InfluxDB to use.\n :param dropna: drop columns where all values are missing\n :returns: the queried data\n :rtype: :class:`~.ResultSet`\n \"\"\"\n query_args = dict(params=params,\n bind_params=bind_params,\n epoch=epoch,\n expected_response_code=expected_response_code,\n raise_errors=raise_errors,\n chunked=chunked,\n database=database,\n method=method,\n chunk_size=chunk_size)\n results = super(DataFrameClient, self).query(query, **query_args)\n if query.strip().upper().startswith(\"SELECT\"):\n if len(results) > 0:\n return self._to_dataframe(results, dropna)\n else:\n return {}\n else:\n return results\n\n def _to_dataframe(self, rs, dropna=True):\n result = defaultdict(list)\n if isinstance(rs, list):\n return map(self._to_dataframe, rs)\n\n for key, data in rs.items():\n name, tags = key\n if tags is None:\n key = name\n else:\n key = (name, tuple(sorted(tags.items())))\n df = pd.DataFrame(data)\n df.time = pd.to_datetime(df.time)\n df.set_index('time', inplace=True)\n if df.index.tzinfo is None:\n df.index = df.index.tz_localize('UTC')\n df.index.name = None\n result[key].append(df)\n for key, data in result.items():\n df = pd.concat(data).sort_index()\n if dropna:\n df.dropna(how='all', axis=1, inplace=True)\n result[key] = df\n\n return result\n\n @staticmethod\n def _convert_dataframe_to_json(dataframe,\n measurement,\n tags=None,\n tag_columns=None,\n field_columns=None,\n time_precision=None):\n\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError('Must be DataFrame, but type was: {0}.'\n .format(type(dataframe)))\n if not (isinstance(dataframe.index, pd.PeriodIndex) or\n isinstance(dataframe.index, pd.DatetimeIndex)):\n raise TypeError('Must be DataFrame with DatetimeIndex or '\n 'PeriodIndex.')\n\n # Make sure tags and tag columns are correctly typed\n tag_columns = tag_columns if tag_columns is not None else []\n field_columns = field_columns if field_columns is not None else []\n tags = tags if tags is not None else {}\n # Assume field columns are all columns not included in tag columns\n if not field_columns:\n field_columns = list(\n set(dataframe.columns).difference(set(tag_columns)))\n\n dataframe.index = pd.to_datetime(dataframe.index)\n if dataframe.index.tzinfo is None:\n dataframe.index = dataframe.index.tz_localize('UTC')\n\n # Convert column to strings\n dataframe.columns = dataframe.columns.astype('str')\n\n # Convert dtype for json serialization\n dataframe = dataframe.astype('object')\n\n precision_factor = {\n \"n\": 1,\n \"u\": 1e3,\n \"ms\": 1e6,\n \"s\": 1e9,\n \"m\": 1e9 * 60,\n \"h\": 1e9 * 3600,\n }.get(time_precision, 1)\n\n points = [\n {'measurement': measurement,\n 'tags': dict(list(tag.items()) + list(tags.items())),\n 'fields': rec,\n 'time': np.int64(ts.value / precision_factor)}\n for ts, tag, rec in zip(dataframe.index,\n dataframe[tag_columns].to_dict('record'),\n dataframe[field_columns].to_dict('record'))\n ]\n\n return points\n\n def _convert_dataframe_to_lines(self,\n dataframe,\n measurement,\n field_columns=None,\n tag_columns=None,\n global_tags=None,\n time_precision=None,\n numeric_precision=None):\n\n dataframe = dataframe.dropna(how='all').copy()\n if len(dataframe) == 0:\n return []\n\n if not isinstance(dataframe, pd.DataFrame):\n raise TypeError('Must be DataFrame, but type was: {0}.'\n .format(type(dataframe)))\n if not (isinstance(dataframe.index, pd.PeriodIndex) or\n isinstance(dataframe.index, pd.DatetimeIndex)):\n raise TypeError('Must be DataFrame with DatetimeIndex or '\n 'PeriodIndex.')\n\n dataframe = dataframe.rename(\n columns={item: _escape_tag(item) for item in dataframe.columns})\n # Create a Series of columns for easier indexing\n column_series = pd.Series(dataframe.columns)\n\n if field_columns is None:\n field_columns = []\n\n if tag_columns is None:\n tag_columns = []\n\n if global_tags is None:\n global_tags = {}\n\n # Make sure field_columns and tag_columns are lists\n field_columns = list(field_columns) if list(field_columns) else []\n tag_columns = list(tag_columns) if list(tag_columns) else []\n\n # If field columns but no tag columns, assume rest of columns are tags\n if field_columns and (not tag_columns):\n tag_columns = list(column_series[~column_series.isin(\n field_columns)])\n\n # If no field columns, assume non-tag columns are fields\n if not field_columns:\n field_columns = list(column_series[~column_series.isin(\n tag_columns)])\n\n precision_factor = {\n \"n\": 1,\n \"u\": 1e3,\n \"ms\": 1e6,\n \"s\": 1e9,\n \"m\": 1e9 * 60,\n \"h\": 1e9 * 3600,\n }.get(time_precision, 1)\n\n # Make array of timestamp ints\n if isinstance(dataframe.index, pd.PeriodIndex):\n time = ((dataframe.index.to_timestamp().values.astype(np.int64) /\n precision_factor).astype(np.int64).astype(str))\n else:\n time = ((pd.to_datetime(dataframe.index).values.astype(np.int64) /\n precision_factor).astype(np.int64).astype(str))\n\n # If tag columns exist, make an array of formatted tag keys and values\n if tag_columns:\n\n # Make global_tags as tag_columns\n if global_tags:\n for tag in global_tags:\n dataframe[tag] = global_tags[tag]\n tag_columns.append(tag)\n\n tag_df = dataframe[tag_columns]\n tag_df = tag_df.fillna('') # replace NA with empty string\n tag_df = tag_df.sort_index(axis=1)\n tag_df = self._stringify_dataframe(\n tag_df, numeric_precision, datatype='tag')\n\n # join preprendded tags, leaving None values out\n tags = tag_df.apply(\n lambda s: [',' + s.name + '=' + v if v else '' for v in s])\n tags = tags.sum(axis=1)\n\n del tag_df\n elif global_tags:\n tag_string = ''.join(\n [\",{}={}\".format(k, _escape_tag(v)) if v else ''\n for k, v in sorted(global_tags.items())]\n )\n tags = pd.Series(tag_string, index=dataframe.index)\n else:\n tags = ''\n\n # Make an array of formatted field keys and values\n field_df = dataframe[field_columns]\n # Keep the positions where Null values are found\n mask_null = field_df.isnull().values\n\n field_df = self._stringify_dataframe(field_df,\n numeric_precision,\n datatype='field')\n\n field_df = (field_df.columns.values + '=').tolist() + field_df\n field_df[field_df.columns[1:]] = ',' + field_df[\n field_df.columns[1:]]\n field_df = field_df.where(~mask_null, '') # drop Null entries\n fields = field_df.sum(axis=1)\n del field_df\n\n # Generate line protocol string\n measurement = _escape_tag(measurement)\n points = (measurement + tags + ' ' + fields + ' ' + time).tolist()\n return points\n\n @staticmethod\n def _stringify_dataframe(dframe, numeric_precision, datatype='field'):\n\n # Prevent modification of input dataframe\n dframe = dframe.copy()\n\n # Find int and string columns for field-type data\n int_columns = dframe.select_dtypes(include=['integer']).columns\n string_columns = dframe.select_dtypes(include=['object']).columns\n\n # Convert dframe to string\n if numeric_precision is None:\n # If no precision specified, convert directly to string (fast)\n dframe = dframe.astype(str)\n elif numeric_precision == 'full':\n # If full precision, use repr to get full float precision\n float_columns = (dframe.select_dtypes(\n include=['floating']).columns)\n nonfloat_columns = dframe.columns[~dframe.columns.isin(\n float_columns)]\n dframe[float_columns] = dframe[float_columns].applymap(repr)\n dframe[nonfloat_columns] = (dframe[nonfloat_columns].astype(str))\n elif isinstance(numeric_precision, int):\n # If precision is specified, round to appropriate precision\n float_columns = (dframe.select_dtypes(\n include=['floating']).columns)\n nonfloat_columns = dframe.columns[~dframe.columns.isin(\n float_columns)]\n dframe[float_columns] = (dframe[float_columns].round(\n numeric_precision))\n\n # If desired precision is > 10 decimal places, need to use repr\n if numeric_precision > 10:\n dframe[float_columns] = (dframe[float_columns].applymap(repr))\n dframe[nonfloat_columns] = (dframe[nonfloat_columns]\n .astype(str))\n else:\n dframe = dframe.astype(str)\n else:\n raise ValueError('Invalid numeric precision.')\n\n if datatype == 'field':\n # If dealing with fields, format ints and strings correctly\n dframe[int_columns] += 'i'\n dframe[string_columns] = '\"' + dframe[string_columns] + '\"'\n elif datatype == 'tag':\n dframe = dframe.apply(_escape_pandas_series)\n\n dframe.columns = dframe.columns.astype(str)\n\n return dframe\n\n def _datetime_to_epoch(self, datetime, time_precision='s'):\n seconds = (datetime - self.EPOCH).total_seconds()\n if time_precision == 'h':\n return seconds / 3600\n elif time_precision == 'm':\n return seconds / 60\n elif time_precision == 's':\n return seconds\n elif time_precision == 'ms':\n return seconds * 1e3\n elif time_precision == 'u':\n return seconds * 1e6\n elif time_precision == 'n':\n return seconds * 1e9\n"
] | [
[
"pandas.concat",
"pandas.to_datetime",
"pandas.Series",
"pandas.DataFrame",
"numpy.int64",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zaxtax/arviz | [
"c78deefeeb355d3cee11a93fc148f9198dde8b35"
] | [
"arviz/tests/external_tests/test_data_cmdstan.py"
] | [
"# pylint: disable=no-member, invalid-name, redefined-outer-name\n# pylint: disable=too-many-lines\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom ... import from_cmdstan\n\nfrom ..helpers import check_multiple_attrs\n\n\nclass TestDataCmdStan:\n @pytest.fixture(scope=\"session\")\n def data_directory(self):\n here = os.path.dirname(os.path.abspath(__file__))\n data_directory = os.path.join(here, \"..\", \"saved_models\")\n return data_directory\n\n @pytest.fixture(scope=\"class\")\n def paths(self, data_directory):\n paths = {\n \"no_warmup\": [\n os.path.join(data_directory, \"cmdstan/output_no_warmup1.csv\"),\n os.path.join(data_directory, \"cmdstan/output_no_warmup2.csv\"),\n os.path.join(data_directory, \"cmdstan/output_no_warmup3.csv\"),\n os.path.join(data_directory, \"cmdstan/output_no_warmup4.csv\"),\n ],\n \"warmup\": [\n os.path.join(data_directory, \"cmdstan/output_warmup1.csv\"),\n os.path.join(data_directory, \"cmdstan/output_warmup2.csv\"),\n os.path.join(data_directory, \"cmdstan/output_warmup3.csv\"),\n os.path.join(data_directory, \"cmdstan/output_warmup4.csv\"),\n ],\n \"no_warmup_glob\": os.path.join(data_directory, \"cmdstan/output_no_warmup[0-9].csv\"),\n \"warmup_glob\": os.path.join(data_directory, \"cmdstan/output_warmup[0-9].csv\"),\n \"eight_schools_glob\": os.path.join(\n data_directory, \"cmdstan/eight_schools_output[0-9].csv\"\n ),\n \"eight_schools\": [\n os.path.join(data_directory, \"cmdstan/eight_schools_output1.csv\"),\n os.path.join(data_directory, \"cmdstan/eight_schools_output2.csv\"),\n os.path.join(data_directory, \"cmdstan/eight_schools_output3.csv\"),\n os.path.join(data_directory, \"cmdstan/eight_schools_output4.csv\"),\n ],\n }\n return paths\n\n @pytest.fixture(scope=\"class\")\n def observed_data_paths(self, data_directory):\n observed_data_paths = [\n os.path.join(data_directory, \"cmdstan/eight_schools.data.R\"),\n os.path.join(data_directory, \"cmdstan/example_stan.data.R\"),\n os.path.join(data_directory, \"cmdstan/example_stan.json\"),\n ]\n\n return observed_data_paths\n\n def get_inference_data(self, posterior, **kwargs):\n return from_cmdstan(posterior=posterior, **kwargs)\n\n def test_sample_stats(self, paths):\n for key, path in paths.items():\n if \"missing\" in key:\n continue\n inference_data = self.get_inference_data(path)\n assert hasattr(inference_data, \"sample_stats\")\n assert \"step_size\" in inference_data.sample_stats.attrs\n assert inference_data.sample_stats.attrs[\"step_size\"] == \"stepsize\"\n\n def test_inference_data_shapes(self, paths):\n \"\"\"Assert that shapes are transformed correctly\"\"\"\n for key, path in paths.items():\n if \"eight\" in key or \"missing\" in key:\n continue\n inference_data = self.get_inference_data(path)\n test_dict = {\"posterior\": [\"x\", \"y\", \"Z\"]}\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n assert inference_data.posterior[\"y\"].shape == (4, 100)\n assert inference_data.posterior[\"x\"].shape == (4, 100, 3)\n assert inference_data.posterior[\"Z\"].shape == (4, 100, 4, 6)\n dims = [\"chain\", \"draw\"]\n y_mean_true = 0\n y_mean = inference_data.posterior[\"y\"].mean(dim=dims)\n assert np.isclose(y_mean, y_mean_true, atol=1e-1)\n x_mean_true = np.array([1, 2, 3])\n x_mean = inference_data.posterior[\"x\"].mean(dim=dims)\n assert np.isclose(x_mean, x_mean_true, atol=1e-1).all()\n Z_mean_true = np.array([1, 2, 3, 4])\n Z_mean = inference_data.posterior[\"Z\"].mean(dim=dims).mean(axis=1)\n assert np.isclose(Z_mean, Z_mean_true, atol=7e-1).all()\n assert \"comments\" in inference_data.posterior.attrs\n\n def test_inference_data_input_types1(self, paths, observed_data_paths):\n \"\"\"Check input types\n\n posterior --> str, list of str\n prior --> str, list of str\n posterior_predictive --> str, variable in posterior\n observed_data --> Rdump format\n observed_data_var --> str, variable\n log_likelihood --> str\n coords --> one to many\n dims --> one to many\n \"\"\"\n for key, path in paths.items():\n if \"eight\" not in key:\n continue\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=\"y_hat\",\n predictions=\"y_hat\",\n prior=path,\n prior_predictive=\"y_hat\",\n observed_data=observed_data_paths[0],\n observed_data_var=\"y\",\n constant_data=observed_data_paths[0],\n constant_data_var=\"y\",\n predictions_constant_data=observed_data_paths[0],\n predictions_constant_data_var=\"y\",\n log_likelihood=\"log_lik\",\n coords={\"school\": np.arange(8)},\n dims={\n \"theta\": [\"school\"],\n \"y\": [\"school\"],\n \"log_lik\": [\"school\"],\n \"y_hat\": [\"school\"],\n \"eta\": [\"school\"],\n },\n )\n test_dict = {\n \"posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"posterior_predictive\": [\"y_hat\"],\n \"predictions\": [\"y_hat\"],\n \"prior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"prior_predictive\": [\"y_hat\"],\n \"sample_stats\": [\"diverging\"],\n \"observed_data\": [\"y\"],\n \"constant_data\": [\"y\"],\n \"predictions_constant_data\": [\"y\"],\n \"log_likelihood\": [\"log_lik\"],\n }\n if \"output_warmup\" in path:\n test_dict.update({\"warmup_posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"]})\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n\n def test_inference_data_input_types2(self, paths, observed_data_paths):\n \"\"\"Check input types (change, see earlier)\n\n posterior_predictive --> List[str], variable in posterior\n observed_data_var --> List[str], variable\n \"\"\"\n for key, path in paths.items():\n if \"eight\" not in key:\n continue\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=[\"y_hat\"],\n predictions=[\"y_hat\"],\n prior=path,\n prior_predictive=[\"y_hat\"],\n observed_data=observed_data_paths[0],\n observed_data_var=[\"y\"],\n constant_data=observed_data_paths[0],\n constant_data_var=[\"y\"],\n predictions_constant_data=observed_data_paths[0],\n predictions_constant_data_var=[\"y\"],\n coords={\"school\": np.arange(8)},\n dims={\n \"theta\": [\"school\"],\n \"y\": [\"school\"],\n \"log_lik\": [\"school\"],\n \"y_hat\": [\"school\"],\n \"eta\": [\"school\"],\n },\n dtypes={\"theta\": np.int64},\n )\n test_dict = {\n \"posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"posterior_predictive\": [\"y_hat\"],\n \"predictions\": [\"y_hat\"],\n \"prior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"prior_predictive\": [\"y_hat\"],\n \"sample_stats\": [\"diverging\"],\n \"observed_data\": [\"y\"],\n \"constant_data\": [\"y\"],\n \"predictions_constant_data\": [\"y\"],\n \"log_likelihood\": [\"log_lik\"],\n }\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n assert isinstance(inference_data.posterior.theta.data.flat[0], np.integer)\n\n def test_inference_data_input_types3(self, paths, observed_data_paths):\n \"\"\"Check input types (change, see earlier)\n\n posterior_predictive --> str, csv file\n coords --> one to many + one to one (default dim)\n dims --> one to many\n \"\"\"\n for key, path in paths.items():\n if \"eight\" not in key:\n continue\n post_pred = paths[\"eight_schools_glob\"]\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=post_pred,\n prior=path,\n prior_predictive=post_pred,\n observed_data=observed_data_paths[0],\n observed_data_var=[\"y\"],\n log_likelihood=[\"log_lik\", \"y_hat\"],\n coords={\n \"school\": np.arange(8),\n \"log_lik_dim_0\": np.arange(8),\n \"y_hat\": np.arange(8),\n },\n dims={\"theta\": [\"school\"], \"y\": [\"school\"], \"y_hat\": [\"school\"], \"eta\": [\"school\"]},\n )\n test_dict = {\n \"posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"sample_stats\": [\"diverging\"],\n \"prior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"prior_predictive\": [\"y_hat\"],\n \"observed_data\": [\"y\"],\n \"posterior_predictive\": [\"y_hat\"],\n \"log_likelihood\": [\"log_lik\", \"y_hat\"],\n }\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n\n def test_inference_data_input_types4(self, paths):\n \"\"\"Check input types (change, see earlier)\n\n coords --> one to many + one to one (non-default dim)\n dims --> one to many + one to one\n \"\"\"\n\n paths_ = paths[\"no_warmup\"]\n for path in [paths_, paths_[0]]:\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=path,\n prior=path,\n prior_predictive=path,\n observed_data=None,\n observed_data_var=None,\n log_likelihood=False,\n coords={\"rand\": np.arange(3)},\n dims={\"x\": [\"rand\"]},\n )\n test_dict = {\n \"posterior\": [\"x\", \"y\", \"Z\"],\n \"prior\": [\"x\", \"y\", \"Z\"],\n \"prior_predictive\": [\"x\", \"y\", \"Z\"],\n \"sample_stats\": [\"lp\"],\n \"sample_stats_prior\": [\"lp\"],\n \"posterior_predictive\": [\"x\", \"y\", \"Z\"],\n \"~log_likelihood\": [\"\"],\n }\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n\n def test_inference_data_input_types5(self, paths, observed_data_paths):\n \"\"\"Check input types (change, see earlier)\n\n posterior_predictive is None\n prior_predictive is None\n \"\"\"\n for key, path in paths.items():\n if \"eight\" not in key:\n continue\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=None,\n prior=path,\n prior_predictive=None,\n observed_data=observed_data_paths[0],\n observed_data_var=[\"y\"],\n log_likelihood=[\"y_hat\"],\n coords={\"school\": np.arange(8), \"log_lik_dim\": np.arange(8)},\n dims={\n \"theta\": [\"school\"],\n \"y\": [\"school\"],\n \"log_lik\": [\"log_lik_dim\"],\n \"y_hat\": [\"school\"],\n \"eta\": [\"school\"],\n },\n )\n test_dict = {\n \"posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\", \"log_lik\"],\n \"prior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"log_likelihood\": [\"y_hat\", \"~log_lik\"],\n \"observed_data\": [\"y\"],\n \"sample_stats_prior\": [\"lp\"],\n }\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n\n def test_inference_data_input_types6(self, paths, observed_data_paths):\n \"\"\"Check input types (change, see earlier)\n\n log_likelihood --> dict\n \"\"\"\n for key, path in paths.items():\n if \"eight\" not in key:\n continue\n post_pred = paths[\"eight_schools_glob\"]\n inference_data = self.get_inference_data(\n posterior=path,\n posterior_predictive=post_pred,\n prior=path,\n prior_predictive=post_pred,\n observed_data=observed_data_paths[0],\n observed_data_var=[\"y\"],\n log_likelihood={\"y\": \"log_lik\"},\n coords={\n \"school\": np.arange(8),\n \"log_lik_dim_0\": np.arange(8),\n \"y_hat\": np.arange(8),\n },\n dims={\"theta\": [\"school\"], \"y\": [\"school\"], \"y_hat\": [\"school\"], \"eta\": [\"school\"]},\n )\n test_dict = {\n \"posterior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"sample_stats\": [\"diverging\"],\n \"prior\": [\"mu\", \"tau\", \"theta_tilde\", \"theta\"],\n \"prior_predictive\": [\"y_hat\"],\n \"observed_data\": [\"y\"],\n \"posterior_predictive\": [\"y_hat\"],\n \"log_likelihood\": [\"y\", \"~log_lik\"],\n }\n fails = check_multiple_attrs(test_dict, inference_data)\n assert not fails\n\n def test_inference_data_observed_data1(self, observed_data_paths):\n \"\"\"Read Rdump/JSON, check shapes are correct\n\n All variables\n \"\"\"\n # Check the Rdump (idx=1) and equivalent JSON data file (idx=2)\n for data_idx in (1, 2):\n path = observed_data_paths[data_idx]\n inference_data = self.get_inference_data(posterior=None, observed_data=path)\n assert hasattr(inference_data, \"observed_data\")\n assert len(inference_data.observed_data.data_vars) == 3\n assert inference_data.observed_data[\"x\"].shape == (1,)\n assert inference_data.observed_data[\"x\"][0] == 1\n assert inference_data.observed_data[\"y\"].shape == (3,)\n assert inference_data.observed_data[\"Z\"].shape == (4, 5)\n\n def test_inference_data_observed_data2(self, observed_data_paths):\n \"\"\"Read Rdump/JSON, check shapes are correct\n\n One variable as str\n \"\"\"\n # Check the Rdump (idx=1) and equivalent JSON data file (idx=2)\n for data_idx in (1, 2):\n path = observed_data_paths[data_idx]\n inference_data = self.get_inference_data(\n posterior=None, observed_data=path, observed_data_var=\"x\"\n )\n assert hasattr(inference_data, \"observed_data\")\n assert len(inference_data.observed_data.data_vars) == 1\n assert inference_data.observed_data[\"x\"].shape == (1,)\n\n def test_inference_data_observed_data3(self, observed_data_paths):\n \"\"\"Read Rdump/JSON, check shapes are correct\n\n One variable as a list\n \"\"\"\n # Check the Rdump (idx=1) and equivalent JSON data file (idx=2)\n for data_idx in (1, 2):\n path = observed_data_paths[data_idx]\n inference_data = self.get_inference_data(\n posterior=None, observed_data=path, observed_data_var=[\"x\"]\n )\n assert hasattr(inference_data, \"observed_data\")\n assert len(inference_data.observed_data.data_vars) == 1\n assert inference_data.observed_data[\"x\"].shape == (1,)\n\n def test_inference_data_observed_data4(self, observed_data_paths):\n \"\"\"Read Rdump/JSON, check shapes are correct\n\n Many variables as list\n \"\"\"\n # Check the Rdump (idx=1) and equivalent JSON data file (idx=2)\n for data_idx in (1, 2):\n path = observed_data_paths[data_idx]\n inference_data = self.get_inference_data(\n posterior=None, observed_data=path, observed_data_var=[\"y\", \"Z\"]\n )\n assert hasattr(inference_data, \"observed_data\")\n assert len(inference_data.observed_data.data_vars) == 2\n assert inference_data.observed_data[\"y\"].shape == (3,)\n assert inference_data.observed_data[\"Z\"].shape == (4, 5)\n"
] | [
[
"numpy.arange",
"numpy.array",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wecacuee/habitat-sim | [
"973ab45c08e8b6d7e578db87b25700fbfdd10a02"
] | [
"tests/test_simulator.py"
] | [
"import multiprocessing\nimport os.path as osp\nimport random\n\nimport magnum as mn\nimport numpy as np\nimport pytest\n\nimport examples.settings\nimport habitat_sim\n\n\ndef test_no_navmesh_smoke(sim):\n sim_cfg = habitat_sim.SimulatorConfiguration()\n agent_config = habitat_sim.AgentConfiguration()\n # No sensors as we are only testing to see if things work\n # with no navmesh and the navmesh isn't used for any exisitng sensors\n agent_config.sensor_specifications = []\n\n sim_cfg.scene.id = \"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb\"\n # Make it try to load a navmesh that doesn't exists\n sim_cfg.scene.filepaths[\"navmesh\"] = \"/tmp/dne.navmesh\"\n\n sim.reconfigure(habitat_sim.Configuration(sim_cfg, [agent_config]))\n\n sim.initialize_agent(0)\n\n random.seed(0)\n for _ in range(50):\n obs = sim.step(random.choice(list(agent_config.action_space.keys())))\n # Can't collide with no navmesh\n assert not obs[\"collided\"]\n\n\ndef test_empty_scene(sim):\n cfg_settings = examples.settings.default_sim_settings.copy()\n\n # keyword \"NONE\" initializes a scene with no scene mesh\n cfg_settings[\"scene\"] = \"NONE\"\n # test that depth sensor doesn't mind an empty scene\n cfg_settings[\"depth_sensor\"] = True\n\n hab_cfg = examples.settings.make_cfg(cfg_settings)\n sim.reconfigure(hab_cfg)\n\n # test that empty frames can be rendered without a scene mesh\n for _ in range(2):\n obs = sim.step(random.choice(list(hab_cfg.agents[0].action_space.keys())))\n\n\ndef test_sim_reset(sim):\n agent_config = sim.config.agents[0]\n sim.initialize_agent(0)\n initial_state = sim.agents[0].initial_state\n # Take random steps in the environment\n for _ in range(10):\n action = random.choice(list(agent_config.action_space.keys()))\n obs = sim.step(action)\n\n sim.reset()\n new_state = sim.agents[0].get_state()\n same_position = all(initial_state.position == new_state.position)\n same_rotation = np.isclose(\n initial_state.rotation, new_state.rotation, rtol=1e-4\n ) # Numerical error can cause slight deviations\n assert same_position and same_rotation\n\n\n# Make sure you can keep a reference to an agent alive without crashing\ndef _test_keep_agent_tgt():\n sim_cfg = habitat_sim.SimulatorConfiguration()\n agent_config = habitat_sim.AgentConfiguration()\n\n sim_cfg.scene.id = \"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb\"\n agents = []\n\n for _ in range(3):\n sim = habitat_sim.Simulator(habitat_sim.Configuration(sim_cfg, [agent_config]))\n\n agents.append(sim.get_agent(0))\n\n sim.close()\n\n\n# Make sure you can construct and destruct the simulator multiple times\ndef _test_multiple_construct_destroy_tgt():\n sim_cfg = habitat_sim.SimulatorConfiguration()\n agent_config = habitat_sim.AgentConfiguration()\n\n sim_cfg.scene.id = \"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb\"\n\n for _ in range(3):\n sim = habitat_sim.Simulator(habitat_sim.Configuration(sim_cfg, [agent_config]))\n\n sim.close()\n\n\[email protected](\n \"test_fn\", [_test_keep_agent_tgt, _test_multiple_construct_destroy_tgt]\n)\ndef test_subproc_fns(test_fn):\n mp_ctx = multiprocessing.get_context(\"spawn\")\n\n # Run this test in a subprocess as things with OpenGL\n # contexts get messy\n p = mp_ctx.Process(target=test_fn)\n\n p.start()\n p.join()\n\n assert p.exitcode == 0\n\n\ndef test_scene_bounding_boxes(sim):\n cfg_settings = examples.settings.default_sim_settings.copy()\n cfg_settings[\"scene\"] = \"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb\"\n hab_cfg = examples.settings.make_cfg(cfg_settings)\n sim.reconfigure(hab_cfg)\n scene_graph = sim.get_active_scene_graph()\n root_node = scene_graph.get_root_node()\n root_node.compute_cumulative_bb()\n scene_bb = root_node.cumulative_bb\n ground_truth = mn.Range3D.from_size(\n mn.Vector3(-0.775869, -0.0233012, -1.6706), mn.Vector3(6.76937, 3.86304, 3.5359)\n )\n assert ground_truth == scene_bb\n\n\ndef test_object_template_editing(sim):\n cfg_settings = examples.settings.default_sim_settings.copy()\n cfg_settings[\"scene\"] = \"data/scene_datasets/habitat-test-scenes/van-gogh-room.glb\"\n cfg_settings[\"enable_physics\"] = True\n hab_cfg = examples.settings.make_cfg(cfg_settings)\n sim.reconfigure(hab_cfg)\n\n # test creating a new template with a test asset\n transform_box_path = osp.abspath(\"data/test_assets/objects/transform_box.glb\")\n transform_box_template = habitat_sim.attributes.PhysicsObjectAttributes()\n transform_box_template.set_render_asset_handle(transform_box_path)\n old_library_size = sim.get_physics_object_library_size()\n transform_box_template_id = sim.load_object_template(\n transform_box_template, \"transform_box_template\"\n )\n assert sim.get_physics_object_library_size() > old_library_size\n assert transform_box_template_id != -1\n\n # test loading a test asset template from file\n sphere_path = osp.abspath(\"data/test_assets/objects/sphere\")\n old_library_size = sim.get_physics_object_library_size()\n template_ids = sim.load_object_configs(sphere_path)\n assert len(template_ids) > 0\n assert sim.get_physics_object_library_size() > old_library_size\n\n # test getting and editing template reference\n sphere_template = sim.get_object_template(template_ids[0])\n assert sphere_template.get_render_asset_handle().endswith(\"sphere.glb\")\n sphere_scale = np.array([2.0, 2.0, 2.0])\n sphere_template.set_scale(sphere_scale)\n sphere_template2 = sim.get_object_template(template_ids[0])\n assert sphere_template2.get_scale() == sphere_scale\n\n # test adding a new object\n object_id = sim.add_object(template_ids[0])\n assert object_id != -1\n"
] | [
[
"numpy.array",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
geosharma/PyNite | [
"efffccdbff6727d3b271ba2937e35892d9df8c00"
] | [
"Examples/large_grid_frame/create_frame_data.py"
] | [
"## -*- coding: utf-8 -*-\n\"\"\"\nMIT License\n\nCopyright (c) 2020 tamalone1\n\"\"\"\nimport numpy as np\nimport itertools, csv, os\n\n# Nodes coordinates in a 3D rectangle\nx_values = np.linspace(0, 10, 5)\ny_values = np.linspace(0, 50, 11)\nz_values = np.linspace(0, 10, 5)\n\n# Create a 3D grid of nodes (list of lists of lists)\ngrid = []\n# A series number for each node\nid_number = itertools.count()\n# Take each x-coordinate\nfor i, x in enumerate(x_values):\n # Each element is a list of y-coordinates\n grid.append([])\n for j, y in enumerate(y_values):\n # Each element is a list of z-coordinates\n grid[i].append([])\n for k, z in enumerate(z_values):\n # Pack together the corresponding name and coordinates\n grid[i][j].append(('N'+str(next(id_number)), x, y, z))\n\n# Write the nodes to a CSV file\nfilename = os.path.join(os.path.dirname(__file__), 'gridnodes.csv')\nwith open(filename, mode='w', newline='') as f:\n csv_writer = csv.writer(f)\n nodes_at_top_level = []\n # Write the header\n csv_writer.writerow(['Name', 'X', 'Y', 'Z'])\n # Write the node list to the file\n # chain() unpacks the first two layers (x- and y-coordinates)\n for nodes_xy in itertools.chain(*grid):\n # for-loop unpacks the third layer (z-coordinates)\n for node_z in nodes_xy:\n # Each item is a tuple, writerows() unpacks it into the file\n csv_writer.writerow(node_z)\n # if node y-value is the maximum\n if node_z[2] == max(y_values):\n # keep the node name for later\n nodes_at_top_level.append(node_z[0])\n\n# Connect each point to the adjacent one\nconnections = {}\nfor row_idx, row in enumerate(grid):\n for col_idx, col in enumerate(row):\n for z_idx, value in enumerate(col):\n connections[value[0]] = []\n try:\n # Get value from next column\n connections[value[0]].append(grid[row_idx][col_idx + 1][z_idx][0])\n except IndexError:\n # Ignore IndexError caused by hitting the end of the line\n pass\n try:\n # Get value from next row\n connections[value[0]].append(grid[row_idx + 1][col_idx][z_idx][0])\n except IndexError:\n # Ignore IndexError caused by hitting the end of the line\n pass\n try:\n # Get value from next z-value\n connections[value[0]].append(grid[row_idx][col_idx][z_idx + 1][0])\n except IndexError:\n # Ignore IndexError caused by hitting the end of the line\n pass\n\nmember_list = []\nmember_id = itertools.count()\nfor first_node, connected_nodes in connections.items():\n for second_node in connected_nodes:\n member_list.append(('m' + str(next(member_id)), first_node, second_node))\n\nfilename = os.path.join(os.path.dirname(__file__), 'gridmembers.csv')\nwith open(filename, mode='w', newline='') as f:\n csv_writer = csv.writer(f)\n # Write the header\n csv_writer.writerow(['Name', 'i_node', 'j_node'])\n # Write the member list to the file\n csv_writer.writerows(member_list)\n\n\ndef filter_node_coordinates(nodes, coordinate, value):\n \"\"\" Return the nodes with coordinate == value. \n \n node: Node3D, or object with coordinates\n coordinate: str, coordinate name\n value: value with appropriate type\n \"\"\"\n # # Create list of matching results\n # matches = []\n # # Unpack nodes (iterable) and check each element\n # for node in nodes:\n # if getattr(node, coordinate) == value:\n # match.append(node)\n # return matches\n return getattr(node, coordinate) == value\n\n\n# Create a file of supports\nfilename = os.path.join(os.path.dirname(__file__), 'gridsupports.csv')\nwith open(filename, mode='w', newline='') as f:\n support_types = ['Node', 'support_DX', 'support_DY', 'support_DZ', 'support_RX',\n 'support_RY', 'support_RZ']\n csv_writer = csv.DictWriter(f, fieldnames=support_types, restval=False)\n csv_writer.writeheader()\n\n # Take each x-coordinate\n for i in grid:\n # i = grid[i]\n for j in i:\n # j = grid[i][j]\n for k in j:\n # k = grid[i][j][k] = node tuple(name, X, Y, Z)\n if k[2] == 0:\n # node is at Y=0, add base supports\n name = k[0]\n # Write the supports to the file\n csv_writer.writerow({'Node': name,\n 'support_DX': True,\n 'support_DY': True,\n 'support_DZ': True})\n\n# Create a file of loads\n# Find all nodes at top level\nfilename = os.path.join(os.path.dirname(__file__), 'gridnodesloads.csv')\nwith open(filename, mode='w', newline='') as f:\n csv_writer = csv.writer(f)\n # Write the header\n csv_writer.writerow(['Name', 'Direction', 'P'])\n # Find the nodes at y_max and apply load to them\n for node in nodes_at_top_level:\n csv_writer.writerow([node, 'FY', -10.0])"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MrSyee/rl_algorithms | [
"5b5276982032f8a8a614b9466849b7b3ef245b3e"
] | [
"rl_algorithms/common/abstract/agent.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Abstract Agent used for all agents.\n\n- Author: Curt Park\n- Contact: [email protected]\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nimport argparse\nimport os\nimport shutil\nimport subprocess\nfrom typing import Tuple, Union\n\nimport gym\nfrom gym.spaces import Discrete\nimport numpy as np\nimport torch\nimport wandb\n\nfrom rl_algorithms.utils.config import ConfigDict\n\n\nclass Agent(ABC):\n \"\"\"Abstract Agent used for all agents.\n\n Attributes:\n env (gym.Env): openAI Gym environment\n args (argparse.Namespace): arguments including hyperparameters and training settings\n log_cfg (ConfigDict): configuration for saving log and checkpoint\n env_name (str) : gym env name for logging\n sha (str): sha code of current git commit\n state_dim (int): dimension of states\n action_dim (int): dimension of actions\n is_discrete (bool): shows whether the action is discrete\n\n \"\"\"\n\n def __init__(self, env: gym.Env, args: argparse.Namespace, log_cfg: ConfigDict):\n \"\"\"Initialize.\"\"\"\n self.args = args\n self.env = env\n self.log_cfg = log_cfg\n\n self.env_name = env.spec.id if env.spec is not None else env.name\n\n if not self.args.test:\n self.ckpt_path = (\n f\"./checkpoint/{self.env_name}/{log_cfg.agent}/{log_cfg.curr_time}/\"\n )\n os.makedirs(self.ckpt_path, exist_ok=True)\n\n # save configuration\n shutil.copy(self.args.cfg_path, os.path.join(self.ckpt_path, \"config.py\"))\n\n if isinstance(env.action_space, Discrete):\n self.is_discrete = True\n else:\n self.is_discrete = False\n\n # for logging\n self.sha = (\n subprocess.check_output([\"git\", \"rev-parse\", \"--short\", \"HEAD\"])[:-1]\n .decode(\"ascii\")\n .strip()\n )\n\n @abstractmethod\n def select_action(self, state: np.ndarray) -> Union[torch.Tensor, np.ndarray]:\n pass\n\n @abstractmethod\n def step(\n self, action: Union[torch.Tensor, np.ndarray]\n ) -> Tuple[np.ndarray, np.float64, bool, dict]:\n pass\n\n @abstractmethod\n def update_model(self) -> Tuple[torch.Tensor, ...]:\n pass\n\n @abstractmethod\n def load_params(self, path: str):\n if not os.path.exists(path):\n raise Exception(\n f\"[ERROR] the input path does not exist. Wrong path: {path}\"\n )\n\n @abstractmethod\n def save_params(self, params: dict, n_episode: int):\n os.makedirs(self.ckpt_path, exist_ok=True)\n\n path = os.path.join(self.ckpt_path + self.sha + \"_ep_\" + str(n_episode) + \".pt\")\n torch.save(params, path)\n\n print(\"[INFO] Saved the model and optimizer to\", path)\n\n @abstractmethod\n def write_log(self, log_value: tuple): # type: ignore\n pass\n\n @abstractmethod\n def train(self):\n pass\n\n def set_wandb(self):\n wandb.init(\n project=self.env_name,\n name=f\"{self.log_cfg.agent}/{self.log_cfg.curr_time}\",\n )\n wandb.config.update(vars(self.args))\n shutil.copy(self.args.cfg_path, os.path.join(wandb.run.dir, \"config.py\"))\n\n def interim_test(self):\n self.args.test = True\n\n print()\n print(\"===========\")\n print(\"Start Test!\")\n print(\"===========\")\n\n self._test(interim_test=True)\n\n print(\"===========\")\n print(\"Test done!\")\n print(\"===========\")\n print()\n\n self.args.test = False\n\n def test(self):\n \"\"\"Test the agent.\"\"\"\n # logger\n if self.args.log:\n self.set_wandb()\n\n self._test()\n\n # termination\n self.env.close()\n\n def _test(self, interim_test: bool = False):\n \"\"\"Common test routine.\"\"\"\n\n if interim_test:\n test_num = self.args.interim_test_num\n else:\n test_num = self.args.episode_num\n\n for i_episode in range(test_num):\n state = self.env.reset()\n done = False\n score = 0\n step = 0\n\n while not done:\n if self.args.render:\n self.env.render()\n\n action = self.select_action(state)\n next_state, reward, done, _ = self.step(action)\n\n state = next_state\n score += reward\n step += 1\n\n print(\n \"[INFO] test %d\\tstep: %d\\ttotal score: %d\" % (i_episode, step, score)\n )\n\n if self.args.log:\n wandb.log({\"test score\": score})\n"
] | [
[
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
William-An/WaveMusician | [
"d1829e51f8a6d7fee2ff5571375b7488681796bb"
] | [
"WaveForms/main.py"
] | [
"import dwf\nimport time\nimport sys\nimport random\nfrom mido import MidiFile\nfrom musicConstants import NOTES_FREQ\nfrom midi2Cmd import MidiFileParser\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n##\n# \n# @description MusicWave for Analog Discovery Device 2, using WaveForm SDK\n#\n# @author William\n# @email [email protected]\n#\n# @version 01/17/2019\n#\n##\n\n# TODO Support more cmdline args for config\nmidiFileName = sys.argv[1]\nlogLevel = sys.argv[2].upper() if len(sys.argv) > 2 else 'INFO'\n\n\nOUTPUT_NODE_TYPE = dwf.DwfAnalogOut.NODE.CARRIER # Output node type\n\n# Connect to device\nprint(\"Connecting to first device found...\")\ndwf_ao = dwf.DwfAnalogOut()\nprint(\"Device connected\")\n\nmid = MidiFile(midiFileName)\nloader = MidiFileParser(mid, logLevel)\n\nprint(\"Playing: %s\" % mid.filename)\n\n\"\"\"\n# One note\nfor cmd in loader.play():\n\t# TODO Set output function\n\t# cmd['waveFunc']\n\n\tdwf_ao.nodeEnableSet(cmd['channel'], OUTPUT_NODE_TYPE, True)\t\t\t\t \t\t\t\t# Enable channel note\n\tdwf_ao.nodeFunctionSet(cmd['channel'], OUTPUT_NODE_TYPE, dwf.DwfAnalogOut.FUNC.SINE) \t# Set output function\n\tdwf_ao.nodeAmplitudeSet(cmd['channel'], OUTPUT_NODE_TYPE, cmd['amplitude']) \t\t\t\t# Set amplitude: cmd['amplitude']\n\tdwf_ao.nodeFrequencySet(cmd['channel'], OUTPUT_NODE_TYPE, eval(cmd['frequency']))\n\tdwf_ao.configure(cmd['channel'], cmd['output'])\n\n\"\"\"\nwaveSum = []\nwaveData = []\ncount = 0\n# Multi Notes\nfor cmd in loader.playMultiNote(initialFrequency=1):\n\tcount += 1\n\tif cmd['channel'] > 2:\n\t\tcmd['channel'] = random.randint(0, 1)\n\tdwf_ao.nodeEnableSet(cmd['channel'], OUTPUT_NODE_TYPE, True)\t\n\tdwf_ao.nodeFunctionSet(cmd['channel'], OUTPUT_NODE_TYPE, dwf.DwfAnalogOut.FUNC.CUSTOM)\t\t# Set output function\n\tdwf_ao.nodeAmplitudeSet(cmd['channel'], OUTPUT_NODE_TYPE, cmd['amplitude'] * 0.5) \t\t\t\t# Set amplitude: cmd['amplitude']\n\tdwf_ao.nodeFrequencySet(cmd['channel'], OUTPUT_NODE_TYPE, cmd['frequency'])\n\tdwf_ao.nodeDataSet(cmd['channel'], OUTPUT_NODE_TYPE, cmd['data'])\n\tdwf_ao.configure(cmd['channel'], cmd['output'])\n\twaveSum.append(cmd['waveSum'])\n\twaveData = cmd['data']\n\tif logLevel == 'DEBUG' and count % 100 == 0:\n\t\tplt.plot(waveData[8192//2 - 1000 : 8192//2 + 1000])\n\t\tplt.show()\n\nif logLevel == 'DEBUG':\n\tplt.plot(waveData)\n\tplt.show()\n\n# Release\ndwf_ao.close()\n\n"
] | [
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lbasek/named-entity-recognition | [
"d21e41442b67161285efe02a6cb032ce63b8ecf2"
] | [
"evaluation.py"
] | [
"import itertools\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, precision_recall_fscore_support\n\nfrom utils.classification_report import classification_report\nfrom utils.plot_confusion_matrix_util import plot_confusion_matrix\n\n\ndef evaluate(model, test, test_input, labels_vocab, save_path, name):\n test_eval = model.evaluate(test_input, np.array(test.y))\n print('Test loss:', test_eval[0])\n print('Test accuracy:', test_eval[1])\n\n predicted_values = np.argmax(model.predict(test_input), axis=-1)\n true_values = np.argmax(test.y, -1)\n\n # flatten to single array with class labels\n true_values = list(itertools.chain(*true_values))\n predicted_values = list(itertools.chain(*predicted_values))\n\n orig_stdout = sys.stdout\n f = open(save_path + 'results.txt', 'w')\n sys.stdout = f\n\n print(\"Macro Precision/Recall/F1 score:\")\n print(precision_recall_fscore_support(true_values, predicted_values, average='macro'))\n print(60 * \"-\")\n\n print(\"Micro Precision/Recall/F1 score:\")\n print(precision_recall_fscore_support(true_values, predicted_values, average='micro'))\n print(60 * \"-\")\n\n keys = list(labels_vocab.stoi.keys())\n values = list(labels_vocab.stoi.values())\n\n # Classification report's\n macro_report = classification_report(true_values, predicted_values, labels=values, target_names=keys, digits=4, average='macro')\n print(macro_report)\n print(60 * \"-\")\n\n micro_report = classification_report(true_values, predicted_values, labels=values, target_names=keys, digits=4, average='micro')\n print(micro_report)\n\n sys.stdout = orig_stdout\n f.close()\n\n # Confusion Matrix\n cnf_matrix = confusion_matrix(true_values, predicted_values)\n np.set_printoptions(precision=2)\n plot_confusion_matrix(cnf_matrix, classes=list(labels_vocab.stoi.keys()), normalize=True, title='Confusion matrix - ' + name)\n plt.savefig(save_path + '/images/confusion_matrix.png', dpi=200, format='png', bbox_inches='tight')\n plt.close()\n"
] | [
[
"numpy.set_printoptions",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.savefig",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.argmax",
"matplotlib.pyplot.close",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mzzhong2/isce2 | [
"7e9c86910afcbe3e39815ebf5ecc744e0c9caee8"
] | [
"contrib/geo_autoRIFT/geogrid/GeogridOptical.py"
] | [
"#!/usr/bin/env python3\n\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Copyright 2019 California Institute of Technology. ALL RIGHTS RESERVED.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# United States Government Sponsorship acknowledged. This software is subject to\n# U.S. export control laws and regulations and has been classified as 'EAR99 NLR'\n# (No [Export] License Required except when exporting to an embargoed country,\n# end user, or in support of a prohibited end use). By downloading this software,\n# the user agrees to comply with all applicable U.S. export laws and regulations.\n# The user has the responsibility to obtain export licenses, or other export\n# authority as may be required before exporting this software to any 'EAR99'\n# embargoed foreign country or citizen of those countries.\n#\n# Authors: Piyush Agram, Yang Lei\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n\n\n\n\nimport pdb\nimport subprocess\nimport re\nimport string\n\nclass GeogridOptical():\n '''\n Class for mapping regular geographic grid on radar imagery.\n '''\n\n def runGeogrid(self):\n '''\n Do the actual processing.\n '''\n\n ##Determine appropriate EPSG system\n self.epsgDem = self.getProjectionSystem(self.demname)\n self.epsgDat = self.getProjectionSystem(self.dat1name)\n \n ###Determine extent of data needed\n bbox = self.determineBbox()\n\n \n ##Run\n self.geogrid()\n\n\n def getProjectionSystem(self, filename):\n '''\n Testing with Greenland.\n '''\n if not filename:\n raise Exception('File {0} does not exist'.format(filename))\n\n from osgeo import gdal, osr\n ds = gdal.Open(filename, gdal.GA_ReadOnly)\n srs = osr.SpatialReference()\n srs.ImportFromWkt(ds.GetProjection())\n srs.AutoIdentifyEPSG()\n ds = None\n# pdb.set_trace()\n\n if srs.IsGeographic():\n epsgstr = srs.GetAuthorityCode('GEOGCS')\n elif srs.IsProjected():\n epsgstr = srs.GetAuthorityCode('PROJCS')\n elif srs.IsLocal():\n raise Exception('Local coordinate system encountered')\n else:\n raise Exception('Non-standard coordinate system encountered')\n if not epsgstr: #Empty string->use shell command gdalsrsinfo for last trial\n cmd = 'gdalsrsinfo -o epsg {0}'.format(filename)\n epsgstr = subprocess.check_output(cmd, shell=True)\n# pdb.set_trace()\n epsgstr = re.findall(\"EPSG:(\\d+)\", str(epsgstr))[0]\n# pdb.set_trace()\n if not epsgstr: #Empty string\n raise Exception('Could not auto-identify epsg code')\n# pdb.set_trace()\n epsgcode = int(epsgstr)\n# pdb.set_trace()\n return epsgcode\n\n def determineBbox(self, zrange=[-200,4000]):\n '''\n Dummy.\n '''\n import numpy as np\n import datetime\n from osgeo import osr\n \n# import pdb\n# pdb.set_trace()\n\n\n samples = self.startingX + np.array([0, self.numberOfSamples]) * self.XSize\n lines = self.startingY + np.array([0, self.numberOfLines]) * self.YSize\n\n coordDat = osr.SpatialReference()\n if self.epsgDat:\n coordDat.ImportFromEPSG(self.epsgDat)\n else:\n raise Exception('EPSG code does not exist for image data')\n \n\n coordDem = osr.SpatialReference()\n if self.epsgDem:\n coordDem.ImportFromEPSG(self.epsgDem)\n else:\n raise Exception('EPSG code does not exist for DEM')\n \n \n trans = osr.CoordinateTransformation(coordDat, coordDem)\n \n \n\n utms = []\n xyzs = []\n\n\n ### Four corner coordinates\n for ss in samples:\n for ll in lines:\n for zz in zrange:\n utms.append([ss,ll,zz])\n x,y,z = trans.TransformPoint(ss, ll, zz)\n xyzs.append([x,y,z])\n\n utms = np.array(utms)\n xyzs = np.array(xyzs)\n\n self._xlim = [np.min(xyzs[:,0]), np.max(xyzs[:,0])]\n self._ylim = [np.min(xyzs[:,1]), np.max(xyzs[:,1])]\n\n \n \n \n \n \n def geogrid(self):\n \n # For now print inputs that were obtained\n \n print(\"\\nOptical Image parameters: \")\n print(\"X-direction coordinate: \" + str(self.startingX) + \" \" + str(self.XSize))\n print(\"Y-direction coordinate: \" + str(self.startingY) + \" \" + str(self.YSize))\n print(\"Dimensions: \" + str(self.numberOfSamples) + \" \" + str(self.numberOfLines) + \"\\n\")\n \n print(\"Map inputs: \")\n print(\"EPSG: \" + str(self.epsgDem))\n print(\"Smallest Allowable Chip Size in m: \" + str(self.chipSizeX0))\n print(\"Repeat Time: \" + str(self.repeatTime))\n print(\"XLimits: \" + str(self._xlim[0]) + \" \" + str(self._xlim[1]))\n print(\"YLimits: \" + str(self._ylim[0]) + \" \" + str(self._ylim[1]))\n print(\"Extent in km: \" + str((self._xlim[1]-self._xlim[0])/1000.0) + \" \" + str((self._ylim[1]-self._ylim[0])/1000.0))\n if (self.demname != \"\"):\n print(\"DEM: \" + str(self.demname))\n if (self.dhdxname != \"\"):\n print(\"Slopes: \" + str(self.dhdxname) + \" \" + str(self.dhdyname))\n if (self.vxname != \"\"):\n print(\"Velocities: \" + str(self.vxname) + \" \" + str(self.vyname))\n if (self.srxname != \"\"):\n print(\"Search Range: \" + str(self.srxname) + \" \" + str(self.sryname))\n if (self.csminxname != \"\"):\n print(\"Chip Size Min: \" + str(self.csminxname) + \" \" + str(self.csminyname))\n if (self.csmaxxname != \"\"):\n print(\"Chip Size Max: \" + str(self.csmaxxname) + \" \" + str(self.csmaxyname))\n if (self.ssmname != \"\"):\n print(\"Stable Surface Mask: \" + str(self.ssmname))\n\n\n print(\"\\nOutputs: \")\n\n print(\"Window locations: \" + str(self.winlocname))\n\n if (self.dhdxname != \"\"):\n if (self.vxname != \"\"):\n print(\"Window offsets: \" + str(self.winoffname))\n \n print(\"Window rdr_off2vel_x vector: \" + str(self.winro2vxname))\n print(\"Window rdr_off2vel_y vector: \" + str(self.winro2vyname))\n \n if (self.srxname != \"\"):\n print(\"Window search range: \" + str(self.winsrname))\n\n if (self.csminxname != \"\"):\n print(\"Window chip size min: \" + str(self.wincsminname))\n if (self.csmaxxname != \"\"):\n print(\"Window chip size max: \" + str(self.wincsmaxname))\n if (self.ssmname != \"\"):\n print(\"Window stable surface mask: \" + str(self.winssmname))\n\n print(\"Output Nodata Value: \" + str(self.nodata_out) + \"\\n\")\n\n \n \n print(\"Starting processing .... \")\n \n \n \n \n from osgeo import gdal, osr\n import numpy as np\n import struct\n \n# pdb.set_trace()\n demDS = gdal.Open(self.demname, gdal.GA_ReadOnly)\n \n if (self.dhdxname != \"\"):\n sxDS = gdal.Open(self.dhdxname, gdal.GA_ReadOnly)\n syDS = gdal.Open(self.dhdyname, gdal.GA_ReadOnly)\n \n if (self.vxname != \"\"):\n vxDS = gdal.Open(self.vxname, gdal.GA_ReadOnly)\n vyDS = gdal.Open(self.vyname, gdal.GA_ReadOnly)\n \n if (self.srxname != \"\"):\n srxDS = gdal.Open(self.srxname, gdal.GA_ReadOnly)\n sryDS = gdal.Open(self.sryname, gdal.GA_ReadOnly)\n \n if (self.csminxname != \"\"):\n csminxDS = gdal.Open(self.csminxname, gdal.GA_ReadOnly)\n csminyDS = gdal.Open(self.csminyname, gdal.GA_ReadOnly)\n \n if (self.csmaxxname != \"\"):\n csmaxxDS = gdal.Open(self.csmaxxname, gdal.GA_ReadOnly)\n csmaxyDS = gdal.Open(self.csmaxyname, gdal.GA_ReadOnly)\n \n if (self.ssmname != \"\"):\n ssmDS = gdal.Open(self.ssmname, gdal.GA_ReadOnly)\n \n if demDS is None:\n raise Exception('Error opening DEM file {0}'.format(self.demname))\n \n if (self.dhdxname != \"\"):\n if (sxDS is None):\n raise Exception('Error opening x-direction slope file {0}'.format(self.dhdxname))\n if (syDS is None):\n raise Exception('Error opening y-direction slope file {0}'.format(self.dhdyname))\n \n if (self.vxname != \"\"):\n if (vxDS is None):\n raise Exception('Error opening x-direction velocity file {0}'.format(self.vxname))\n if (vyDS is None):\n raise Exception('Error opening y-direction velocity file {0}'.format(self.vyname))\n\n if (self.srxname != \"\"):\n if (srxDS is None):\n raise Exception('Error opening x-direction search range file {0}'.format(self.srxname))\n if (sryDS is None):\n raise Exception('Error opening y-direction search range file {0}'.format(self.sryname))\n\n if (self.csminxname != \"\"):\n if (csminxDS is None):\n raise Exception('Error opening x-direction chip size min file {0}'.format(self.csminxname))\n if (csminyDS is None):\n raise Exception('Error opening y-direction chip size min file {0}'.format(self.csminyname))\n\n if (self.csmaxxname != \"\"):\n if (csmaxxDS is None):\n raise Exception('Error opening x-direction chip size max file {0}'.format(self.csmaxxname))\n if (csmaxyDS is None):\n raise Exception('Error opening y-direction chip size max file {0}'.format(self.csmaxyname))\n \n if (self.ssmname != \"\"):\n if (ssmDS is None):\n raise Exception('Error opening stable surface mask file {0}'.format(self.ssmname))\n\n geoTrans = demDS.GetGeoTransform()\n demXSize = demDS.RasterXSize\n demYSize = demDS.RasterYSize\n \n \n # Get offsets and size to read from DEM\n lOff = int(np.max( [np.floor((self._ylim[1] - geoTrans[3])/geoTrans[5]), 0.]))\n# pdb.set_trace()\n lCount = int(np.min([ np.ceil((self._ylim[0] - geoTrans[3])/geoTrans[5]), demYSize-1.]) - lOff)\n\n pOff = int(np.max([ np.floor((self._xlim[0] - geoTrans[0])/geoTrans[1]), 0.]))\n pCount = int(np.min([ np.ceil((self._xlim[1] - geoTrans[0])/geoTrans[1]), demXSize-1.]) - pOff)\n\n print(\"Xlimits : \" + str(geoTrans[0] + pOff * geoTrans[1]) + \" \" + str(geoTrans[0] + (pOff + pCount) * geoTrans[1]))\n\n print(\"Ylimits : \" + str(geoTrans[3] + (lOff + lCount) * geoTrans[5]) + \" \" + str(geoTrans[3] + lOff * geoTrans[5]))\n \n print(\"Origin index (in DEM) of geogrid: \" + str(pOff) + \" \" + str(lOff))\n \n print(\"Dimensions of geogrid: \" + str(pCount) + \" x \" + str(lCount))\n \n projDem = osr.SpatialReference()\n if self.epsgDem:\n projDem.ImportFromEPSG(self.epsgDem)\n else:\n raise Exception('EPSG code does not exist for DEM')\n \n projDat = osr.SpatialReference()\n if self.epsgDat:\n projDat.ImportFromEPSG(self.epsgDat)\n else:\n raise Exception('EPSG code does not exist for image data')\n \n fwdTrans = osr.CoordinateTransformation(projDem, projDat)\n invTrans = osr.CoordinateTransformation(projDat, projDem)\n \n if (self.vxname != \"\"):\n nodata = vxDS.GetRasterBand(1).GetNoDataValue()\n else:\n nodata = 0\n\n nodata_out = self.nodata_out\n\n\n pszFormat = \"GTiff\"\n adfGeoTransform = ( geoTrans[0] + pOff * geoTrans[1], geoTrans[1], 0, geoTrans[3] + lOff * geoTrans[5], 0, geoTrans[5] )\n oSRS = osr.SpatialReference()\n pszSRS_WKT = projDem.ExportToWkt()\n\n\n\n poDriver = gdal.GetDriverByName(pszFormat)\n if( poDriver is None ):\n raise Exception('Cannot create gdal driver for output')\n\n pszDstFilename = self.winlocname\n poDstDS = poDriver.Create(pszDstFilename, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Int32)\n poDstDS.SetGeoTransform( adfGeoTransform )\n poDstDS.SetProjection( pszSRS_WKT )\n\n poBand1 = poDstDS.GetRasterBand(1)\n poBand2 = poDstDS.GetRasterBand(2)\n poBand1.SetNoDataValue(nodata_out)\n poBand2.SetNoDataValue(nodata_out)\n\n\n\n if ((self.dhdxname != \"\")&(self.vxname != \"\")):\n poDriverOff = gdal.GetDriverByName(pszFormat)\n if( poDriverOff is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameOff = self.winoffname\n poDstDSOff = poDriverOff.Create(pszDstFilenameOff, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Int32)\n poDstDSOff.SetGeoTransform( adfGeoTransform )\n poDstDSOff.SetProjection( pszSRS_WKT )\n \n poBand1Off = poDstDSOff.GetRasterBand(1)\n poBand2Off = poDstDSOff.GetRasterBand(2)\n poBand1Off.SetNoDataValue(nodata_out)\n poBand2Off.SetNoDataValue(nodata_out)\n\n\n if ((self.dhdxname != \"\")&(self.srxname != \"\")):\n poDriverSch = gdal.GetDriverByName(pszFormat)\n if( poDriverSch is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameSch = self.winsrname\n poDstDSSch = poDriverSch.Create(pszDstFilenameSch, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Int32)\n poDstDSSch.SetGeoTransform( adfGeoTransform )\n poDstDSSch.SetProjection( pszSRS_WKT )\n \n poBand1Sch = poDstDSSch.GetRasterBand(1)\n poBand2Sch = poDstDSSch.GetRasterBand(2)\n poBand1Sch.SetNoDataValue(nodata_out)\n poBand2Sch.SetNoDataValue(nodata_out)\n\n if (self.csminxname != \"\"):\n poDriverMin = gdal.GetDriverByName(pszFormat)\n if( poDriverMin is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameMin = self.wincsminname\n poDstDSMin = poDriverMin.Create(pszDstFilenameMin, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Int32)\n poDstDSMin.SetGeoTransform( adfGeoTransform )\n poDstDSMin.SetProjection( pszSRS_WKT )\n \n poBand1Min = poDstDSMin.GetRasterBand(1)\n poBand2Min = poDstDSMin.GetRasterBand(2)\n poBand1Min.SetNoDataValue(nodata_out)\n poBand2Min.SetNoDataValue(nodata_out)\n \n if (self.csmaxxname != \"\"):\n poDriverMax = gdal.GetDriverByName(pszFormat)\n if( poDriverMax is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameMax = self.wincsmaxname\n poDstDSMax = poDriverMax.Create(pszDstFilenameMax, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Int32)\n poDstDSMax.SetGeoTransform( adfGeoTransform )\n poDstDSMax.SetProjection( pszSRS_WKT )\n \n poBand1Max = poDstDSMax.GetRasterBand(1)\n poBand2Max = poDstDSMax.GetRasterBand(2)\n poBand1Max.SetNoDataValue(nodata_out)\n poBand2Max.SetNoDataValue(nodata_out)\n\n\n if (self.ssmname != \"\"):\n poDriverMsk = gdal.GetDriverByName(pszFormat)\n if( poDriverMsk is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameMsk = self.winssmname\n poDstDSMsk = poDriverMsk.Create(pszDstFilenameMsk, xsize=pCount, ysize=lCount, bands=1, eType=gdal.GDT_Int32)\n poDstDSMsk.SetGeoTransform( adfGeoTransform )\n poDstDSMsk.SetProjection( pszSRS_WKT )\n \n poBand1Msk = poDstDSMsk.GetRasterBand(1)\n poBand1Msk.SetNoDataValue(nodata_out)\n\n\n\n\n if (self.dhdxname != \"\"):\n poDriverRO2VX = gdal.GetDriverByName(pszFormat)\n if( poDriverRO2VX is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameRO2VX = self.winro2vxname\n poDstDSRO2VX = poDriverRO2VX.Create(pszDstFilenameRO2VX, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Float64)\n poDstDSRO2VX.SetGeoTransform( adfGeoTransform )\n poDstDSRO2VX.SetProjection( pszSRS_WKT )\n \n poBand1RO2VX = poDstDSRO2VX.GetRasterBand(1)\n poBand2RO2VX = poDstDSRO2VX.GetRasterBand(2)\n poBand1RO2VX.SetNoDataValue(nodata_out)\n poBand2RO2VX.SetNoDataValue(nodata_out)\n\n\n poDriverRO2VY = gdal.GetDriverByName(pszFormat)\n if( poDriverRO2VY is None ):\n raise Exception('Cannot create gdal driver for output')\n \n pszDstFilenameRO2VY = self.winro2vyname\n poDstDSRO2VY = poDriverRO2VY.Create(pszDstFilenameRO2VY, xsize=pCount, ysize=lCount, bands=2, eType=gdal.GDT_Float64)\n poDstDSRO2VY.SetGeoTransform( adfGeoTransform )\n poDstDSRO2VY.SetProjection( pszSRS_WKT )\n \n poBand1RO2VY = poDstDSRO2VY.GetRasterBand(1)\n poBand2RO2VY = poDstDSRO2VY.GetRasterBand(2)\n poBand1RO2VY.SetNoDataValue(nodata_out)\n poBand2RO2VY.SetNoDataValue(nodata_out)\n\n\n\n raster1 = np.zeros(pCount,dtype=np.int32)\n raster2 = np.zeros(pCount,dtype=np.int32)\n raster11 = np.zeros(pCount,dtype=np.int32)\n raster22 = np.zeros(pCount,dtype=np.int32)\n sr_raster11 = np.zeros(pCount,dtype=np.int32)\n sr_raster22 = np.zeros(pCount,dtype=np.int32)\n csmin_raster11 = np.zeros(pCount,dtype=np.int32)\n csmin_raster22 = np.zeros(pCount,dtype=np.int32)\n csmax_raster11 = np.zeros(pCount,dtype=np.int32)\n csmax_raster22 = np.zeros(pCount,dtype=np.int32)\n ssm_raster = np.zeros(pCount,dtype=np.int32)\n raster1a = np.zeros(pCount,dtype=np.float64)\n raster1b = np.zeros(pCount,dtype=np.float64)\n raster2a = np.zeros(pCount,dtype=np.float64)\n raster2b = np.zeros(pCount,dtype=np.float64)\n\n \n \n # X- and Y-direction pixel size\n X_res = np.abs(self.XSize)\n Y_res = np.abs(self.YSize)\n print(\"X-direction pixel size: \" + str(X_res))\n print(\"Y-direction pixel size: \" + str(Y_res))\n \n ChipSizeX0_PIX_X = np.ceil(self.chipSizeX0 / X_res / 4) * 4\n ChipSizeX0_PIX_Y = np.ceil(self.chipSizeX0 / Y_res / 4) * 4\n \n \n\n\n\n for ii in range(lCount):\n y = geoTrans[3] + (lOff+ii+0.5) * geoTrans[5]\n demLine = demDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n demLine = struct.unpack('d' * pCount, demLine)\n \n if (self.dhdxname != \"\"):\n sxLine = sxDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n sxLine = struct.unpack('d' * pCount, sxLine)\n syLine = syDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n syLine = struct.unpack('d' * pCount, syLine)\n \n if (self.vxname != \"\"):\n vxLine = vxDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n vxLine = struct.unpack('d' * pCount, vxLine)\n vyLine = vyDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n vyLine = struct.unpack('d' * pCount, vyLine)\n \n if (self.srxname != \"\"):\n srxLine = srxDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n srxLine = struct.unpack('d' * pCount, srxLine)\n sryLine = sryDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n sryLine = struct.unpack('d' * pCount, sryLine)\n \n if (self.csminxname != \"\"):\n csminxLine = csminxDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n csminxLine = struct.unpack('d' * pCount, csminxLine)\n csminyLine = csminyDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n csminyLine = struct.unpack('d' * pCount, csminyLine)\n \n if (self.csmaxxname != \"\"):\n csmaxxLine = csmaxxDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n csmaxxLine = struct.unpack('d' * pCount, csmaxxLine)\n csmaxyLine = csmaxyDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n csmaxyLine = struct.unpack('d' * pCount, csmaxyLine)\n\n if (self.ssmname != \"\"):\n ssmLine = ssmDS.GetRasterBand(1).ReadRaster(xoff=pOff, yoff=lOff+ii, xsize=pCount, ysize=1, buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n ssmLine = struct.unpack('d' * pCount, ssmLine)\n\n for jj in range(pCount):\n xyzs = np.array([geoTrans[0] + (jj+pOff+0.5)*geoTrans[1], y, demLine[jj]])\n targxyz0 = xyzs.copy()\n if (self.dhdxname != \"\"):\n slp = np.array([sxLine[jj], syLine[jj], -1.0])\n if (self.vxname != \"\"):\n vel = np.array([vxLine[jj], vyLine[jj], 0.0])\n else:\n vel = np.array([0., 0., 0.])\n if (self.srxname != \"\"):\n schrng1 = np.array([srxLine[jj], sryLine[jj], 0.0])\n schrng2 = np.array([-srxLine[jj], sryLine[jj], 0.0])\n targutm0 = np.array(fwdTrans.TransformPoint(targxyz0[0],targxyz0[1],targxyz0[2]))\n xind = np.round((targutm0[0] - self.startingX) / self.XSize) + 1.\n yind = np.round((targutm0[1] - self.startingY) / self.YSize) + 1.\n \n # x-direction vector\n targutm = targutm0.copy()\n targutm[0] = targutm0[0] + self.XSize\n targxyz = np.array(invTrans.TransformPoint(targutm[0],targutm[1],targutm[2]))\n xunit = (targxyz-targxyz0) / np.linalg.norm(targxyz-targxyz0)\n \n # y-direction vector\n targutm = targutm0.copy()\n targutm[1] = targutm0[1] + self.YSize\n targxyz = np.array(invTrans.TransformPoint(targutm[0],targutm[1],targutm[2]))\n yunit = (targxyz-targxyz0) / np.linalg.norm(targxyz-targxyz0)\n\n # local normal vector\n if (self.dhdxname != \"\"):\n normal = -slp / np.linalg.norm(slp)\n else:\n normal = np.array([0., 0., 0.])\n\n if (self.vxname != \"\"):\n vel[2] = -(vel[0]*normal[0]+vel[1]*normal[1])/normal[2]\n\n if (self.srxname != \"\"):\n schrng1[2] = -(schrng1[0]*normal[0]+schrng1[1]*normal[1])/normal[2]\n schrng2[2] = -(schrng2[0]*normal[0]+schrng2[1]*normal[1])/normal[2]\n \n\n\n if ((xind > self.numberOfSamples)|(xind < 1)|(yind > self.numberOfLines)|(yind < 1)):\n# pdb.set_trace()\n raster1[jj] = nodata_out\n raster2[jj] = nodata_out\n raster11[jj] = nodata_out\n raster22[jj] = nodata_out\n \n sr_raster11[jj] = nodata_out\n sr_raster22[jj] = nodata_out\n csmin_raster11[jj] = nodata_out\n csmin_raster22[jj] = nodata_out\n csmax_raster11[jj] = nodata_out\n csmax_raster22[jj] = nodata_out\n ssm_raster[jj] = nodata_out\n \n raster1a[jj] = nodata_out\n raster1b[jj] = nodata_out\n raster2a[jj] = nodata_out\n raster2b[jj] = nodata_out\n else:\n raster1[jj] = xind;\n raster2[jj] = yind;\n# pdb.set_trace()\n# if ((self.vxname != \"\")&(vel[0] != nodata)):\n## pdb.set_trace()\n# raster11[jj] = np.round(np.dot(vel,xunit)*self.repeatTime/self.XSize/365.0/24.0/3600.0*1)\n# raster22[jj] = np.round(np.dot(vel,yunit)*self.repeatTime/self.YSize/365.0/24.0/3600.0*1)\n# else:\n# raster11[jj] = 0.\n# raster22[jj] = 0.\n if (self.dhdxname != \"\"):\n\n if (self.vxname != \"\"):\n if (vel[0] == nodata):\n raster11[jj] = 0.\n raster22[jj] = 0.\n else:\n raster11[jj] = np.round(np.dot(vel,xunit)*self.repeatTime/self.XSize/365.0/24.0/3600.0*1)\n raster22[jj] = np.round(np.dot(vel,yunit)*self.repeatTime/self.YSize/365.0/24.0/3600.0*1)\n\n cross = np.cross(xunit,yunit)\n cross = cross / np.linalg.norm(cross)\n cross_check = np.abs(np.arccos(np.dot(normal,cross))/np.pi*180.0-90.0)\n \n if (cross_check > 1.0):\n raster1a[jj] = normal[2]/(self.repeatTime/self.XSize/365.0/24.0/3600.0)*(normal[2]*yunit[1]-normal[1]*yunit[2])/((normal[2]*xunit[0]-normal[0]*xunit[2])*(normal[2]*yunit[1]-normal[1]*yunit[2])-(normal[2]*yunit[0]-normal[0]*yunit[2])*(normal[2]*xunit[1]-normal[1]*xunit[2]));\n raster1b[jj] = -normal[2]/(self.repeatTime/self.YSize/365.0/24.0/3600.0)*(normal[2]*xunit[1]-normal[1]*xunit[2])/((normal[2]*xunit[0]-normal[0]*xunit[2])*(normal[2]*yunit[1]-normal[1]*yunit[2])-(normal[2]*yunit[0]-normal[0]*yunit[2])*(normal[2]*xunit[1]-normal[1]*xunit[2]));\n raster2a[jj] = -normal[2]/(self.repeatTime/self.XSize/365.0/24.0/3600.0)*(normal[2]*yunit[0]-normal[0]*yunit[2])/((normal[2]*xunit[0]-normal[0]*xunit[2])*(normal[2]*yunit[1]-normal[1]*yunit[2])-(normal[2]*yunit[0]-normal[0]*yunit[2])*(normal[2]*xunit[1]-normal[1]*xunit[2]));\n raster2b[jj] = normal[2]/(self.repeatTime/self.YSize/365.0/24.0/3600.0)*(normal[2]*xunit[0]-normal[0]*xunit[2])/((normal[2]*xunit[0]-normal[0]*xunit[2])*(normal[2]*yunit[1]-normal[1]*yunit[2])-(normal[2]*yunit[0]-normal[0]*yunit[2])*(normal[2]*xunit[1]-normal[1]*xunit[2]));\n else:\n raster1a[jj] = nodata_out\n raster1b[jj] = nodata_out\n raster2a[jj] = nodata_out\n raster2b[jj] = nodata_out\n\n if (self.srxname != \"\"):\n if ((self.vxname != \"\")&(vel[0] == nodata)):\n sr_raster11[jj] = 0\n sr_raster22[jj] = 0\n else:\n sr_raster11[jj] = np.abs(np.round(np.dot(schrng1,xunit)*self.repeatTime/self.XSize/365.0/24.0/3600.0*1))\n sr_raster22[jj] = np.abs(np.round(np.dot(schrng1,yunit)*self.repeatTime/self.YSize/365.0/24.0/3600.0*1))\n if (np.abs(np.round(np.dot(schrng2,xunit)*self.repeatTime/self.XSize/365.0/24.0/3600.0*1)) > sr_raster11[jj]):\n sr_raster11[jj] = np.abs(np.round(np.dot(schrng2,xunit)*self.repeatTime/self.XSize/365.0/24.0/3600.0*1))\n if (np.abs(np.round(np.dot(schrng2,yunit)*self.repeatTime/self.YSize/365.0/24.0/3600.0*1)) > sr_raster22[jj]):\n sr_raster22[jj] = np.abs(np.round(np.dot(schrng2,yunit)*self.repeatTime/self.YSize/365.0/24.0/3600.0*1))\n if (sr_raster11[jj] == 0):\n sr_raster11[jj] = 1\n if (sr_raster22[jj] == 0):\n sr_raster22[jj] = 1\n\n if (self.csminxname != \"\"):\n csmin_raster11[jj] = csminxLine[jj] / self.chipSizeX0 * ChipSizeX0_PIX_X\n csmin_raster22[jj] = csminyLine[jj] / self.chipSizeX0 * ChipSizeX0_PIX_Y\n\n\n if (self.csmaxxname != \"\"):\n csmax_raster11[jj] = csmaxxLine[jj] / self.chipSizeX0 * ChipSizeX0_PIX_X\n csmax_raster22[jj] = csmaxyLine[jj] / self.chipSizeX0 * ChipSizeX0_PIX_Y\n\n\n\n if (self.ssmname != \"\"):\n ssm_raster[jj] = ssmLine[jj]\n \n\n\n\n \n\n# pdb.set_trace()\n \n poBand1.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster1.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n poBand2.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster2.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if ((self.dhdxname != \"\")&(self.vxname != \"\")):\n poBand1Off.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster11.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n poBand2Off.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster22.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if ((self.dhdxname != \"\")&(self.srxname != \"\")):\n poBand1Sch.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=sr_raster11.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n poBand2Sch.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=sr_raster22.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if (self.csminxname != \"\"):\n poBand1Min.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=csmin_raster11.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n poBand2Min.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=csmin_raster22.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if (self.csmaxxname != \"\"):\n poBand1Max.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=csmax_raster11.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n poBand2Max.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=csmax_raster22.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if (self.ssmname != \"\"):\n poBand1Msk.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=ssm_raster.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Int32)\n if (self.dhdxname != \"\"):\n poBand1RO2VX.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster1a.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n poBand2RO2VX.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster1b.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n poBand1RO2VY.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster2a.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n poBand2RO2VY.WriteRaster(xoff=0, yoff=ii, xsize=pCount, ysize=1, buf_len=raster2b.tostring(), buf_xsize=pCount, buf_ysize=1, buf_type=gdal.GDT_Float64)\n\n \n poDstDS = None\n if ((self.dhdxname != \"\")&(self.vxname != \"\")):\n poDstDSOff = None\n if ((self.dhdxname != \"\")&(self.srxname != \"\")):\n poDstDSSch = None\n if (self.csminxname != \"\"):\n poDstDSMin = None\n if (self.csmaxxname != \"\"):\n poDstDSMax = None\n if (self.ssmname != \"\"):\n poDstDSMsk = None\n if (self.dhdxname != \"\"):\n poDstDSRO2VX = None\n \n poDstDSRO2VY = None\n \n demDS = None\n \n if (self.dhdxname != \"\"):\n sxDS = None\n syDS = None\n \n if (self.vxname != \"\"):\n vxDS = None\n vyDS = None\n \n if (self.srxname != \"\"):\n srxDS = None\n sryDS = None\n \n if (self.csminxname != \"\"):\n csminxDS = None\n csminyDS = None\n\n if (self.csmaxxname != \"\"):\n csmaxxDS = None\n csmaxyDS = None\n \n if (self.ssmname != \"\"):\n ssmDS = None\n \n\n \n\n\n\n\n\n\n\n\n\n \n \n \n\n def __init__(self):\n super(GeogridOptical, self).__init__()\n\n ##Optical image related parameters\n self.startingY = None\n self.startingX = None\n self.XSize = None\n self.YSize = None\n self.numberOfSamples = None\n self.numberOfLines = None\n self.repeatTime = None\n self.chipSizeX0 = None\n\n ##Input related parameters\n self.dat1name = None\n self.demname = None\n self.dhdxname = None\n self.dhdyname = None\n self.vxname = None\n self.vyname = None\n self.srxname = None\n self.sryname = None\n self.csminxname = None\n self.csminyname = None\n self.csmaxxname = None\n self.csmaxyname = None\n self.ssmname = None\n \n ##Output related parameters\n self.winlocname = None\n self.winoffname = None\n self.winsrname = None\n self.wincsminname = None\n self.wincsmaxname = None\n self.winssmname = None\n self.winro2vxname = None\n self.winro2vyname = None\n\n ##Coordinate system\n self.epsgDem = None\n self.epsgDat = None\n self._xlim = None\n self._ylim = None\n self.nodata_out = None\n\n\n"
] | [
[
"numpy.dot",
"numpy.abs",
"numpy.min",
"numpy.linalg.norm",
"numpy.round",
"numpy.max",
"numpy.ceil",
"numpy.floor",
"numpy.cross",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jlezama/disentangling-jacobian | [
"c570945055c735a15b9adba093b7c688c7310aad"
] | [
"unsupervised_disentangling/utils.py"
] | [
"import torch\nimport torch.nn as nn\n\nfrom PIL import Image\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\n\n\ndef to_img(x):\n x = 1-x.clamp(0, 1)\n x = x.view(x.size(0), 1, 28, 28)\n return x\n\ndef show(img, outfname=None):\n npimg = img.numpy()\n plt.imshow(npimg, interpolation='nearest', aspect='equal')\n if outfname is not None:\n Image.fromarray(npimg).convert('RGB').save(outfname)\n\n\n\n# to show validation loss\ndef val_loss(tmp_model, test_loader, hidden_size, train=False, teacher=None):\n tmp_model.eval()\n\n criterion = nn.MSELoss()\n\n\n if teacher:\n teacher.eval()\n\n count = 0\n loss_rec_all = 0 \n all_z = None\n\n loss_z_all = 0\n loss_xcov_all = 0\n loss_jacobian_all = 0\n\n \n for data in test_loader:\n img, y = data\n img = img.view(img.size(0), -1)\n img = img.cuda()\n y = y.cuda()\n # ===================forward=====================\n #output = model.decoder(Z)\n #output = model(img)\n z = tmp_model.encoder(img)\n tmp_output = tmp_model.decoder(z) \n\n try:\n all_z = torch.cat((all_z,z))\n all_y = torch.cat((all_y,y))\n except:\n all_z = z.clone()\n all_y = y.clone()\n count+= 1\n \n loss_rec_all += criterion(tmp_output, img)\n \n if teacher:\n teacher_z = teacher.encoder(img)\n student_nuisance_z = z[:,:-hidden_size]\n student_factor_z = z[:,-hidden_size:]\n \n loss_z = torch.mean((teacher_z-student_factor_z)**2)\n loss_xcov = compute_xcov(student_nuisance_z, student_factor_z, teacher_z.size(0))\n\n # ================= start jacobian supervision ====\n # swap factor z\n swap_idx = torch.randperm(z.size(0)).cuda()\n \n student_z_swapped = torch.cat((student_nuisance_z,student_factor_z[swap_idx]),dim=1)\n teacher_z_swapped = teacher_z[swap_idx]\n \n \n swapped_tmp_output = tmp_model.decoder(student_z_swapped)\n swapped_teacher_output = teacher.decoder(teacher_z_swapped)\n teacher_output = teacher.decoder(teacher_z)\n \n diff_teacher = (teacher_output - swapped_teacher_output).clone().detach()\n diff_student = (tmp_output - swapped_tmp_output)\n \n if 0:#params.gauss_sigma >0:\n diff_teacher = diff_teacher.view([img.size(0),1,28,28])\n diff_student = diff_student.view([img.size(0),1,28,28])\n \n diff_student = GBlur(diff_student)\n diff_teacher = GBlur_teacher(diff_teacher)\n \n jacobian_loss = torch.mean((diff_teacher-diff_student)**2)\n\n\n loss_z_all += loss_z\n loss_xcov_all += loss_xcov\n loss_jacobian_all += jacobian_loss\n \n if train:\n tmp_model.train()\n if teacher:\n teacher.train()\n\n loss_rec_all /= count\n loss_z_all /= count\n loss_xcov_all /= count\n loss_jacobian_all /= count\n\n if teacher:\n return loss_rec_all, loss_z_all, loss_jacobian_all, loss_xcov_all\n else:\n return loss_rec_all\n \n\n\n\n\n\n\"\"\" # Define Cross-covariance loss \"\"\"\ndef compute_xcov(z,y,bs):\n \"\"\"computes cross-covariance loss between latent code and attributes\nprediction, so that latent code does note encode attributes, compute\nmean first.\"\"\"\n # z: latent code\n # y: predicted labels\n # bs: batch size\n z = z.contiguous().view(bs,-1)\n y = y.contiguous().view(bs,-1)\n\n # print z.size(), y.size()\n\n # center matrices\n\n z = z - torch.mean(z, dim=0)\n y = y - torch.mean(y, dim=0)\n\n \n cov_matrix = torch.matmul(torch.t(z),y)\n\n cov_loss = torch.norm(cov_matrix.view(1,-1))/bs\n\n return cov_loss\n\n\n\n\n"
] | [
[
"torch.mean",
"matplotlib.pyplot.imshow",
"torch.cat",
"matplotlib.use",
"torch.t",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LEOCUIZHIHAO/segcarpoint | [
"42d78cde1f28b0c705f7755356610cf3039c3caf"
] | [
"bcl_caffe/layers/bcl_layers.py"
] | [
"\nfrom pathlib import Path\nimport pickle\nimport shutil\nimport time, timeit\nimport numpy as np\nimport torch\nimport torchplus\n\nfrom google.protobuf import text_format\nimport second.data.kitti_common as kitti\nfrom second.builder import target_assigner_builder, voxel_builder\nfrom second.pytorch.core import box_torch_ops\nfrom second.data.preprocess import merge_second_batch, merge_second_batch_multigpu\nfrom second.protos import pipeline_pb2\nfrom second.pytorch.builder import box_coder_builder, input_reader_builder\nfrom second.pytorch.models.voxel_encoder import get_paddings_indicator_np #for pillar\nfrom second.utils.log_tool import SimpleModelLog\nimport caffe\nfrom enum import Enum\nimport numpy_indexed as npi\nfrom numba import jit\nfrom numba import njit, prange\nfrom second.core import box_np_ops\n\ndef build_network(model_cfg, measure_time=False):\n voxel_generator = voxel_builder.build(model_cfg.voxel_generator)\n bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]\n box_coder = box_coder_builder.build(model_cfg.box_coder)\n target_assigner_cfg = model_cfg.target_assigner\n target_assigner = target_assigner_builder.build(target_assigner_cfg,\n bv_range, box_coder)\n\n return voxel_generator, target_assigner\n\ndef _worker_init_fn(worker_id):\n time_seed = np.array(time.time(), dtype=np.int32)\n np.random.seed(time_seed + worker_id)\n print(f\"WORKER {worker_id} seed:\", np.random.get_state()[1][0])\n\ndef load_config(model_dir, config_path):\n model_dir = str(Path(model_dir).resolve())\n model_dir = Path(model_dir)\n config_file_bkp = \"pipeline.config\"\n if isinstance(config_path, str):\n # directly provide a config object. this usually used\n # when you want to train with several different parameters in\n # one script.\n config = pipeline_pb2.TrainEvalPipelineConfig()\n with open(config_path, \"r\") as f:\n proto_str = f.read()\n text_format.Merge(proto_str, config)\n else:\n config = config_path\n proto_str = text_format.MessageToString(config, indent=2)\n with (model_dir / config_file_bkp).open(\"w\") as f:\n f.write(proto_str)\n\n input_cfg = config.train_input_reader\n eval_input_cfg = config.eval_input_reader\n model_cfg = config.model.second\n train_cfg = config.train_config\n\n return (input_cfg, eval_input_cfg, model_cfg, train_cfg)\n\nclass LossNormType(Enum):\n NormByNumPositives = \"norm_by_num_positives\"\n NormByNumExamples = \"norm_by_num_examples\"\n NormByNumPosNeg = \"norm_by_num_pos_neg\"\n\nclass DataFeature(caffe.Layer):\n def setup(self, bottom, top):\n params = {}\n params.update(eval(self.param_str))\n bcl_keep_voxels_eval = params['bcl_keep_voxels_eval']\n seg_keep_points_eval = params['seg_keep_points_eval']\n num_points_per_voxel = params['num_points_per_voxel']\n is_segmentation = params['segmentation']\n try:\n batch_size = params[\"eval_batch_size\"]\n except Exception as e:\n batch_size = 1\n # BCL\n if is_segmentation:\n top[0].reshape(*(batch_size, seg_keep_points_eval, 4)) # for pillar shape should (B,C=9,V,N=100), For second (B,C=1,V,N=5)\n else:\n # top[0].reshape(*(bcl_keep_voxels_eval, num_points_per_voxel, 4)) #pillar\n top[0].reshape(*(batch_size, bcl_keep_voxels_eval, 4)) #pillar\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n pass\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass VoxelSegNetInput(caffe.Layer):\n def setup(self, bottom, top):\n params = {}\n params.update(eval(self.param_str))\n max_voxels = params['max_voxels']\n points_per_voxel = params['points_per_voxel']\n seg_keep_points_eval = params['seg_keep_points_eval']\n top[0].reshape(*(1, seg_keep_points_eval, 4)) # seg points\n top[1].reshape(*(1, max_voxels, 3)) # Coords\n top[2].reshape(*(1, seg_keep_points_eval, 3)) # p2voxel_idx\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n pass\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass LatticeFeature(caffe.Layer):\n def setup(self, bottom, top):\n params = {}\n params.update(eval(self.param_str))\n bcl_keep_voxels_eval = params['bcl_keep_voxels_eval']\n seg_keep_points_eval = params['seg_keep_points_eval']\n is_segmentation = params['segmentation']\n # BCL\n if is_segmentation:\n top[0].reshape(*(seg_keep_points_eval,4)) #(V, C=4) # TODO:\n else:\n top[0].reshape(*(bcl_keep_voxels_eval,4)) # for pillar\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n pass\n def backward(self, top, propagate_down, bottom):\n pass\n\n#for point-wise segmentation\nclass InputKittiData(caffe.Layer):\n def setup(self, bottom, top):\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n # for point segmentation detection\n for example in self.dataloader:\n seg_points = example['seg_points']\n seg_labels =example['seg_labels']\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*seg_points.shape)\n top[1].reshape(*seg_labels.shape)\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n seg_points = example['seg_points']\n seg_labels = example['seg_labels']\n\n # \"\"\"shuffle car seg points\"\"\" #move to preprocess\n # indices = np.arange(seg_labels.shape[1])\n # np.random.shuffle(indices)\n # seg_points = seg_points[:,indices]\n # seg_labels = seg_labels[:,indices]\n\n # for point object segmentation\n top[0].reshape(*seg_points.shape)\n top[1].reshape(*seg_labels.shape)\n top[0].data[...] = seg_points\n top[1].data[...] = seg_labels\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n try: segmentation = args[\"segmentation\"]\n except: segmentation = True\n try: bcl_keep_voxels = args[\"bcl_keep_voxels\"]\n except: bcl_keep_voxels = 6000\n try: seg_keep_points = args[\"seg_keep_points\"]\n except: seg_keep_points = 8000\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae']) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\n#for voxel-wise object detection\nclass InputKittiDataV2(caffe.Layer):\n\n def setup(self, bottom, top):\n\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n\n # for point segmentation detection\n for example in self.dataloader:\n voxels = example['voxels']\n coors = example['coordinates']\n labels = example['labels']\n reg_targets = example['reg_targets']\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*voxels.shape)\n top[1].reshape(*coors.shape)\n top[2].reshape(*labels.shape)\n top[3].reshape(*reg_targets.shape)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n voxels = example['voxels']\n coors = example['coordinates']\n labels = example['labels']\n reg_targets = example['reg_targets']\n\n # for point object segmentation\n # top[0].reshape(*voxels.shape)\n # top[1].reshape(*coors.shape)\n # top[2].reshape(*labels.shape)\n # top[3].reshape(*reg_targets.shape)\n top[0].data[...] = voxels\n top[1].data[...] = coors\n top[2].data[...] = labels\n top[3].data[...] = reg_targets\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n try: segmentation = args[\"segmentation\"]\n except: segmentation = False\n try: bcl_keep_voxels = args[\"bcl_keep_voxels\"]\n except: bcl_keep_voxels = 6000\n try: seg_keep_points = args[\"seg_keep_points\"]\n except: seg_keep_points = 8000\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae']) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\n#for point-wise object detection & segmentation\nclass InputKittiDataV3(caffe.Layer):\n\n def setup(self, bottom, top):\n\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.generate_anchors_cachae = params['anchors_cachae'] #True FOR Pillar, False For BCL\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg, self.model_cfg)\n\n # for point segmentation detection\n for example in self.dataloader:\n points = example['points']\n coors = example['coordinates']\n labels = example['labels']\n reg_targets = example['reg_targets']\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*points.shape)\n top[1].reshape(*coors.shape)\n top[2].reshape(*labels.shape)\n top[3].reshape(*reg_targets.shape)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n points = example['points']\n coors = example['coordinates']\n labels = example['labels']\n reg_targets = example['reg_targets']\n\n # for point object segmentation\n top[0].reshape(*points.shape)\n top[1].reshape(*coors.shape)\n top[2].reshape(*labels.shape)\n top[3].reshape(*reg_targets.shape)\n top[0].data[...] = points\n top[1].data[...] = coors\n top[2].data[...] = labels\n top[3].data[...] = reg_targets\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n multi_gpu=False,\n #generate_anchors_cachae=self.generate_anchors_cachae\n ) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\n#for point-wise object detection\nclass InputKittiDataV4(caffe.Layer):\n\n def setup(self, bottom, top):\n\n params = dict(batch_size=1)\n params['anchors_cachae']=False #False For BCL, Anchor Free\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n\n for example in self.dataloader:\n points = example['points']\n labels = example['labels']\n reg_targets = example['reg_targets']\n break\n self.data_iter = iter(self.dataloader)\n\n top[0].reshape(*points.shape)\n top[1].reshape(*labels.shape)\n top[2].reshape(*reg_targets.shape)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n points = example['points']\n labels = example['labels']\n reg_targets = example['reg_targets']\n\n top[0].reshape(*points.shape)\n top[1].reshape(*labels.shape)\n top[2].reshape(*reg_targets.shape)\n top[0].data[...] = points\n top[1].data[...] = labels\n top[2].data[...] = reg_targets\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae']) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\n#for seg_feature map\nclass InputKittiDataV5(caffe.Layer):\n def setup(self, bottom, top):\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n # for point segmentation detection\n for example in self.dataloader:\n seg_points = example['seg_points']\n seg_labels =example['seg_labels']\n labels = example['labels']\n reg_targets =example['reg_targets']\n\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*seg_points.shape)\n top[1].reshape(*seg_labels.shape)\n top[2].reshape(*labels.shape)\n top[3].reshape(*reg_targets.shape)\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n seg_points = example['seg_points']\n seg_labels = example['seg_labels']\n labels = example['labels']\n reg_targets =example['reg_targets']\n\n # \"\"\"shuffle car seg points\"\"\" #moved to preprocess\n # for point object segmentation\n top[0].data[...] = seg_points\n top[1].data[...] = seg_labels\n top[2].data[...] = labels\n top[3].data[...] = reg_targets\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n try: segmentation = args[\"segmentation\"]\n except: segmentation = True\n try: bcl_keep_voxels = args[\"bcl_keep_voxels\"]\n except: bcl_keep_voxels = 6000\n try: seg_keep_points = args[\"seg_keep_points\"]\n except: seg_keep_points = 8000\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae']) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\nclass InputKittiDataV6(caffe.Layer):\n def setup(self, bottom, top):\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n # for point segmentation detection\n for example in self.dataloader:\n seg_points = example['seg_points']\n seg_labels =example['seg_labels']\n gt_box = example['gt_boxes']\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*seg_points.shape)\n top[1].reshape(*seg_labels.shape)\n top[2].reshape(*gt_box.shape)\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n seg_points = example['seg_points']\n seg_labels = example['seg_labels']\n gt_box = example['gt_boxes']\n # \"\"\"shuffle car seg points\"\"\" #moved to preprocess\n # for point object segmentation\n top[0].data[...] = seg_points\n top[1].data[...] = seg_labels\n top[2].reshape(*gt_box.shape)\n top[2].data[...] = gt_box\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n try: segmentation = args[\"segmentation\"]\n except: segmentation = True\n try: bcl_keep_voxels = args[\"bcl_keep_voxels\"]\n except: bcl_keep_voxels = 6000\n try: seg_keep_points = args[\"seg_keep_points\"]\n except: seg_keep_points = 8000\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae']) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\nclass InputKittiDataV7(caffe.Layer):\n def setup(self, bottom, top):\n params = dict(batch_size=1)\n params.update(eval(self.param_str))\n\n model_dir = params['model_dir']\n config_path = params['config_path']\n self.phase = params['subset']\n self.input_cfg, self.eval_input_cfg, self.model_cfg, train_cfg = load_config(model_dir, config_path)\n self.voxel_generator, self.target_assigner = build_network(self.model_cfg)\n self.dataloader = self.load_dataloader(self.input_cfg, self.eval_input_cfg,\n self.model_cfg, args=params)\n # for point segmentation detection\n for example in self.dataloader:\n seg_points = example['seg_points']\n seg_labels = example['seg_labels']\n coords = example['coords']\n p2voxel_idx = example['p2voxel_idx']\n cls_labels = example['cls_labels']\n reg_targets = example['reg_targets']\n break\n self.data_iter = iter(self.dataloader)\n\n # for point object segmentation\n top[0].reshape(*seg_points.shape)\n top[1].reshape(*seg_labels.shape)\n top[2].reshape(*coords.shape)\n top[3].reshape(*p2voxel_idx.shape)\n top[4].reshape(*cls_labels.shape)\n top[5].reshape(*reg_targets.shape)\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n try:\n example = next(self.data_iter)\n except Exception as e:\n print(\"\\n[info] start a new epoch for {} data\\n\".format(self.phase))\n self.data_iter = iter(self.dataloader)\n example = next(self.data_iter)\n\n seg_points = example['seg_points']\n seg_labels = example['seg_labels']\n coords = example['coords']\n p2voxel_idx = example['p2voxel_idx']\n cls_labels = example['cls_labels']\n reg_targets = example['reg_targets']\n\n # \"\"\"shuffle car seg points\"\"\" #moved to preprocess\n # for point object segmentation\n top[0].data[...] = seg_points\n top[1].data[...] = seg_labels\n top[2].data[...] = coords\n top[3].data[...] = p2voxel_idx\n top[4].data[...] = cls_labels\n top[5].data[...] = reg_targets\n\n #print(\"[debug] train img idx : \", example[\"metadata\"])\n\n def backward(self, top, propagate_down, bottom):\n pass\n def load_dataloader(self, input_cfg, eval_input_cfg, model_cfg, args):\n try: segmentation = args[\"segmentation\"]\n except: segmentation = True\n try: bcl_keep_voxels = args[\"bcl_keep_voxels\"]\n except: bcl_keep_voxels = 6000\n try: seg_keep_points = args[\"seg_keep_points\"]\n except: seg_keep_points = 8000\n try: points_per_voxel = args[\"points_per_voxel\"]\n except: points_per_voxel = 200\n dataset = input_reader_builder.build(\n input_cfg,\n model_cfg,\n training=True,\n voxel_generator=self.voxel_generator,\n target_assigner=self.target_assigner,\n segmentation=segmentation,\n bcl_keep_voxels=bcl_keep_voxels,\n seg_keep_points=seg_keep_points,\n multi_gpu=False,\n generate_anchors_cachae=args['anchors_cachae'],\n points_per_voxel=points_per_voxel) #True FOR Pillar, False For BCL\n\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=input_cfg.batch_size,\n shuffle=True,\n num_workers=input_cfg.preprocess.num_workers,\n pin_memory=False,\n collate_fn=merge_second_batch,\n worker_init_fn=_worker_init_fn,\n drop_last=not False)\n return dataloader\n\nclass Scatter(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n output_shape = param['output_shape']\n self.ny = output_shape[0]\n self.nx = output_shape[1]\n self.nchannels = output_shape[2]\n self.batch_size = 1\n\n voxel_features = bottom[0].data\n voxel_features = np.squeeze(voxel_features) #(1, 64, 1, Voxel) -> (64,Voxel)\n coords = bottom[1].data # reverse_index is True, output coordinates will be zyx format\n batch_canvas, _ = self.ScatterNet(voxel_features, coords, self.nchannels, self.nx, self.ny)\n\n top[0].reshape(*batch_canvas.shape)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n voxel_features = bottom[0].data #(1,64,-1,1)\n voxel_features = np.squeeze(voxel_features) #(1, 64, -1, 1) -> (64,-1)\n coords = bottom[1].data\n batch_canvas, self.indices = self.ScatterNet(voxel_features, coords, self.nchannels, self.nx, self.ny)\n\n top[0].data[...] = batch_canvas\n\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(self.batch_size, self.nchannels, self.nx * self.ny)[:,:,self.indices]\n bottom[0].diff[...] = np.expand_dims(diff, axis=2)\n\n def ScatterNet(self, voxel_features, coords, nchannels, feature_map_x, feature_map_y):\n canvas = np.zeros(shape=(nchannels, feature_map_x * feature_map_y)) #(nchannels,-1)\n # Only include non-empty pillars\n indices = coords[:, 2] * feature_map_x + coords[:, 3]\n indices = indices.astype(int)\n canvas[:, indices] = voxel_features\n canvas = canvas.reshape(self.batch_size, nchannels, feature_map_y, feature_map_x)\n return canvas, indices\n\n def Voxel3DStack2D(self, voxel_features, coors):\n # coors = np.delete(coors, obj=1, axis=1) #delete z column\n coors = coors[:,2:]\n voxel_group = npi.group_by(coors) #features mean\n coors_idx, voxel_features = voxel_group.mean(voxel_features) #features max\n return voxel_features, coors_idx, voxel_group\n\nclass Point2FeatMap(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n # (1,4,100,100,80)\n self.feat_map_size = param['feat_map_size']\n self.point_cloud_range = np.array(param['point_cloud_range'])\n try: self.use_depth = param['use_depth']\n except: self.use_depth = False\n try: self.use_score = param['use_score']\n except: self.use_score = False\n try: self.use_points = param['use_points']\n except: self.use_points = False\n self.thresh = param['thresh']\n self.num_feat = self.feat_map_size[1]\n self.num_points = self.feat_map_size[2]\n self.feat_h = self.feat_map_size[3]\n self.feat_w = self.feat_map_size[4]\n self.feat_map_size = np.array(self.feat_map_size)\n top[0].reshape(1, self.num_feat*self.num_points, self.feat_h, self.feat_w)\n # top[0].reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n # if self.num_feat != 4 and self.num_feat != 5:\n # print(\"[Error] Feature number other than 4 and 5 is not yet implemented\")\n # raise NotImplementedError\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n points = bottom[0].data[...].squeeze()\n point_xy = points[:,:2]\n score = bottom[1].data[...].squeeze()\n if not self.use_depth:\n points = points[:,:3]\n if self.use_score:\n points = np.concatenate((points, score.reshape(-1,1)), axis = -1)\n if len(bottom) > 2:\n extra_feat = bottom[2].data[...].squeeze().transpose()\n self.extra_feat_shape = extra_feat.shape\n points = np.concatenate((points, extra_feat), axis = -1)\n if not self.use_points:\n points = points[:,3:]\n self.p2feat_idx = np.zeros((points.shape[0], 3), dtype=np.int_)\n points = points[score>self.thresh,:]\n point_xy = point_xy[score>self.thresh,:]\n p2feat_idx = self.p2feat_idx[score>self.thresh,:]\n # Calculate grid size of feature map\n # voxel size of [w, h]\n voxel_size = (self.point_cloud_range[3:5]-self.point_cloud_range[:2])/np.array([self.feat_w, self.feat_h])\n # create a feature map of cooresponding shape\n feat_map = np.zeros((1, self.num_feat, self.num_points, self.feat_h, self.feat_w), dtype=np.float32)\n points_in_feat_map = np.zeros((self.feat_h, self.feat_w), dtype=np.int_)\n #point to voxel indices (num, h, w)\n offset = np.array(self.point_cloud_range[:2])\n # Indices (w, h)\n indices = np.floor((point_xy-offset)/voxel_size).astype(np.int_)\n # remove points and indices that are out put range\n feat_map, p2feat_idx=self.to_feat_map(points, feat_map, indices, points_in_feat_map,\n p2feat_idx, self.num_points)\n self.p2feat_idx[score>self.thresh,:] = p2feat_idx\n feat_map = feat_map.reshape(1, -1, self.feat_h, self.feat_w)\n # feat_map = feat_map.reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n top[0].data[...] = feat_map\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(1,self.num_feat,self.num_points,self.feat_h,\n self.feat_w)\n backward = np.zeros((1,1,1,self.p2feat_idx.shape[0]))\n mask = (self.p2feat_idx > 0).any(-1)\n indices = self.p2feat_idx[mask]\n diff = diff[:,:,indices[:,0],indices[:,1],indices[:,2]].squeeze().transpose()\n if len(bottom) > 2:\n backward_extra = np.zeros((1,self.extra_feat_shape[1],1,self.extra_feat_shape[0]))\n # OPTIMIZE: get rid of two expand_dims\n extra_feat_backward = diff[:,-self.extra_feat_shape[1]:].transpose()\n extra_feat_backward = np.expand_dims(extra_feat_backward,0)\n extra_feat_backward = np.expand_dims(extra_feat_backward,2)\n backward_extra[..., mask] = extra_feat_backward\n bottom[2].diff[...] = backward_extra\n if self.use_score:\n backward[..., mask] = diff[:,(-self.extra_feat_shape[1]-1)]\n bottom[1].diff[...] = backward\n else:\n if self.use_score:\n backward[..., mask] = diff[:,-1]\n bottom[1].diff[...] = backward\n @staticmethod\n @njit#(nopython=True)#, parallel=True)\n def to_feat_map(points, feat_map, indices, points_in_feat_map, p2feat_idx, num_p_feat = 10):\n # Indices is (width, height)\n for idx in prange(len(indices)):\n feat_index = indices[idx]\n num = points_in_feat_map[feat_index[1],feat_index[0]]\n if num < num_p_feat:\n feat_map[:,:,num,feat_index[1],feat_index[0]] = points[idx]\n points_in_feat_map[feat_index[1],feat_index[0]] += 1\n p2feat_idx[idx,0] = num\n p2feat_idx[idx,1] = feat_index[1]\n p2feat_idx[idx,2] = feat_index[0]\n return feat_map, p2feat_idx\n\n#return (B,C,N,H,W)\nclass Point2FeatMapV3(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n # (1,4,100,100,80)\n self.feat_map_size = param['feat_map_size']\n self.point_cloud_range = np.array(param['point_cloud_range'])\n try: self.use_depth = param['use_depth']\n except: self.use_depth = False\n try: self.use_score = param['use_score']\n except: self.use_score = False\n try: self.use_points = param['use_points']\n except: self.use_points = False\n self.thresh = param['thresh']\n self.num_feat = self.feat_map_size[1]\n self.num_points = self.feat_map_size[2]\n self.feat_h = self.feat_map_size[3]\n self.feat_w = self.feat_map_size[4]\n self.feat_map_size = np.array(self.feat_map_size)\n # top[0].reshape(1, self.num_feat*self.num_points, self.feat_h, self.feat_w)\n top[0].reshape(1, self.num_feat, self.num_points, self.feat_h* self.feat_w) #leo added to (1,c,n,h*w)\n # if self.num_feat != 4 and self.num_feat != 5:\n # print(\"[Error] Feature number other than 4 and 5 is not yet implemented\")\n # raise NotImplementedError\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n points = bottom[0].data[...].squeeze()\n point_xy = points[:,:2]\n #score = bottom[1].data[...].squeeze()\n if not self.use_depth:\n points = points[:,:3]\n if self.use_score:\n points = np.concatenate((points, score.reshape(-1,1)), axis = -1)\n if len(bottom) > 1:\n extra_feat = bottom[1].data[...].squeeze().transpose()\n self.extra_feat_shape = extra_feat.shape\n points = np.concatenate((points, extra_feat), axis = -1)\n if not self.use_points:\n points = points[:,3:]\n self.p2feat_idx = np.zeros((points.shape[0], 3), dtype=np.int_)\n #points = points[score>self.thresh,:]\n #point_xy = point_xy[score>self.thresh,:]\n # p2feat_idx = self.p2feat_idx#[score>self.thresh,:]\n # Calculate grid size of feature map\n # voxel size of [w, h]\n voxel_size = (self.point_cloud_range[3:5]-self.point_cloud_range[:2])/np.array([self.feat_w, self.feat_h])\n # create a feature map of cooresponding shape\n feat_map = np.zeros((1, self.num_feat, self.num_points, self.feat_h, self.feat_w), dtype=np.float32)\n points_in_feat_map = np.zeros((self.feat_h, self.feat_w), dtype=np.int_)\n #point to voxel indices (num, h, w)\n offset = np.array(self.point_cloud_range[:2])\n # Indices (w, h)\n indices = np.floor((point_xy-offset)/voxel_size).astype(np.int_)\n # remove points and indices that are out put range\n feat_map, p2feat_idx=self.to_feat_map(points, feat_map, indices, points_in_feat_map,\n self.p2feat_idx, self.num_points)\n # self.p2feat_idx[score>self.thresh,:] = p2feat_idx\n self.p2feat_idx = p2feat_idx\n # feat_map = feat_map.reshape(1, -1, self.feat_h, self.feat_w)\n feat_map = feat_map.reshape(1, self.num_feat, self.num_points, self.feat_h* self.feat_w) #leo added to (1,c,n,h*w)\n top[0].data[...] = feat_map\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(1,self.num_feat,self.num_points,self.feat_h,\n self.feat_w)\n #backward = np.zeros((1,1,1,self.p2feat_idx.shape[0]))\n mask = (self.p2feat_idx > 0).any(-1)\n indices = self.p2feat_idx[mask]\n diff = diff[:,:,indices[:,0],indices[:,1],indices[:,2]].squeeze().transpose()\n if len(bottom) > 1:\n # backward_extra = np.zeros((1,self.extra_feat_shape[1],1,self.extra_feat_shape[0])) #old\n # OPTIMIZE: get rid of two expand_dims\n extra_feat_backward = diff[:,-self.extra_feat_shape[1]:].transpose()\n extra_feat_backward = np.expand_dims(extra_feat_backward,0)\n extra_feat_backward = np.expand_dims(extra_feat_backward,2)\n # backward_extra[..., mask] = extra_feat_backward #old\n # bottom[1].diff[...] = backward_extra #old\n\n #####################Test new backward##############################\n bottom[1].diff[...] = 0\n bottom[1].diff[..., mask] = extra_feat_backward\n #####################Test new backward##############################\n\n if self.use_score:\n pass\n else:\n if self.use_score:\n pass\n @staticmethod\n @njit#(nopython=True)#, parallel=True)\n def to_feat_map(points, feat_map, indices, points_in_feat_map, p2feat_idx, num_p_feat = 10):\n # Indices is (width, height)\n for idx in prange(len(indices)):\n feat_index = indices[idx]\n num = points_in_feat_map[feat_index[1],feat_index[0]]\n if num < num_p_feat:\n feat_map[:,:,num,feat_index[1],feat_index[0]] = points[idx]\n points_in_feat_map[feat_index[1],feat_index[0]] += 1\n p2feat_idx[idx,0] = num\n p2feat_idx[idx,1] = feat_index[1]\n p2feat_idx[idx,2] = feat_index[0]\n return feat_map, p2feat_idx\n\nclass Point2FeatMapV2(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n # (1,4,100,100,80)\n self.feat_map_size = param['feat_map_size']\n self.point_cloud_range = np.array(param['point_cloud_range'])\n try: self.use_depth = param['use_depth']\n except: self.use_depth = False\n try: self.use_score = param['use_score']\n except: self.use_score = False\n try: self.use_points = param['use_points']\n except: self.use_points = False\n self.thresh = param['thresh']\n self.num_feat = self.feat_map_size[1]\n self.num_points = self.feat_map_size[2]\n self.feat_h = self.feat_map_size[3]\n self.feat_w = self.feat_map_size[4]\n self.feat_map_size = np.array(self.feat_map_size)\n top[0].reshape(1, self.num_feat*self.num_points, self.feat_h, self.feat_w)\n # top[0].reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n # if self.num_feat != 4 and self.num_feat != 5:\n # print(\"[Error] Feature number other than 4 and 5 is not yet implemented\")\n # raise NotImplementedError\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n points = bottom[0].data[...].squeeze()\n point_xy = points[:,:2]\n #score = bottom[1].data[...].squeeze()\n if not self.use_depth:\n points = points[:,:3]\n if self.use_score:\n points = np.concatenate((points, score.reshape(-1,1)), axis = -1)\n if len(bottom) > 1:\n extra_feat = bottom[1].data[...].squeeze().transpose()\n self.extra_feat_shape = extra_feat.shape\n points = np.concatenate((points, extra_feat), axis = -1)\n if not self.use_points:\n points = points[:,3:]\n self.p2feat_idx = np.zeros((points.shape[0], 3), dtype=np.int_)\n #points = points[score>self.thresh,:]\n #point_xy = point_xy[score>self.thresh,:]\n # p2feat_idx = self.p2feat_idx#[score>self.thresh,:]\n # Calculate grid size of feature map\n # voxel size of [w, h]\n voxel_size = (self.point_cloud_range[3:5]-self.point_cloud_range[:2])/np.array([self.feat_w, self.feat_h])\n # create a feature map of cooresponding shape\n feat_map = np.zeros((1, self.num_feat, self.num_points, self.feat_h, self.feat_w), dtype=np.float32)\n points_in_feat_map = np.zeros((self.feat_h, self.feat_w), dtype=np.int_)\n #point to voxel indices (num, h, w)\n offset = np.array(self.point_cloud_range[:2])\n # Indices (w, h)\n indices = np.floor((point_xy-offset)/voxel_size).astype(np.int_)\n # remove points and indices that are out put range\n feat_map, p2feat_idx=self.to_feat_map(points, feat_map, indices, points_in_feat_map,\n self.p2feat_idx, self.num_points)\n # self.p2feat_idx[score>self.thresh,:] = p2feat_idx\n self.p2feat_idx = p2feat_idx\n feat_map = feat_map.reshape(1, -1, self.feat_h, self.feat_w)\n # feat_map = feat_map.reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n top[0].data[...] = feat_map\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(1,self.num_feat,self.num_points,self.feat_h,\n self.feat_w)\n #backward = np.zeros((1,1,1,self.p2feat_idx.shape[0]))\n mask = (self.p2feat_idx > 0).any(-1)\n indices = self.p2feat_idx[mask]\n diff = diff[:,:,indices[:,0],indices[:,1],indices[:,2]].squeeze().transpose()\n if len(bottom) > 1:\n # backward_extra = np.zeros((1,self.extra_feat_shape[1],1,self.extra_feat_shape[0])) #old\n # OPTIMIZE: get rid of two expand_dims\n extra_feat_backward = diff[:,-self.extra_feat_shape[1]:].transpose()\n extra_feat_backward = np.expand_dims(extra_feat_backward,0)\n extra_feat_backward = np.expand_dims(extra_feat_backward,2)\n # backward_extra[..., mask] = extra_feat_backward #old\n # bottom[1].diff[...] = backward_extra #old\n\n #####################Test new backward##############################\n bottom[1].diff[...] = 0\n bottom[1].diff[..., mask] = extra_feat_backward\n #####################Test new backward##############################\n\n if self.use_score:\n pass\n else:\n if self.use_score:\n pass\n @staticmethod\n @njit#(nopython=True)#, parallel=True)\n def to_feat_map(points, feat_map, indices, points_in_feat_map, p2feat_idx, num_p_feat = 10):\n # Indices is (width, height)\n for idx in prange(len(indices)):\n feat_index = indices[idx]\n num = points_in_feat_map[feat_index[1],feat_index[0]]\n if num < num_p_feat:\n feat_map[:,:,num,feat_index[1],feat_index[0]] = points[idx]\n points_in_feat_map[feat_index[1],feat_index[0]] += 1\n p2feat_idx[idx,0] = num\n p2feat_idx[idx,1] = feat_index[1]\n p2feat_idx[idx,2] = feat_index[0]\n return feat_map, p2feat_idx\n\nclass Point2FeatMapV4(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n # (1,4,100,100,80)\n self.feat_map_size = param['feat_map_size']\n self.point_cloud_range = np.array(param['point_cloud_range'])\n try: self.use_depth = param['use_depth']\n except: self.use_depth = False\n try: self.use_score = param['use_score']\n except: self.use_score = False\n try: self.use_points = param['use_points']\n except: self.use_points = False\n self.thresh = param['thresh']\n self.num_feat = self.feat_map_size[1]\n self.num_points = self.feat_map_size[2]\n self.feat_h = self.feat_map_size[3]\n self.feat_w = self.feat_map_size[4]\n self.feat_map_size = np.array(self.feat_map_size)\n self.batch_size = bottom[1].data.shape[0]\n top[0].reshape(self.batch_size, self.num_feat*self.num_points, self.feat_h, self.feat_w)\n # top[0].reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n # if self.num_feat != 4 and self.num_feat != 5:\n # print(\"[Error] Feature number other than 4 and 5 is not yet implemented\")\n # raise NotImplementedError\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n points = bottom[0].data[...]\n point_xy = points[:,:,:2]\n #score = bottom[1].data[...].squeeze()\n if not self.use_depth:\n points = points[:,:,:3]\n if self.use_score:\n points = np.concatenate((points, score.reshape(-1,1)), axis = -1)\n if len(bottom) > 1:\n\n extra_feat = bottom[1].data[...].squeeze(2).transpose(0,2,1)\n self.extra_feat_shape = extra_feat.shape\n points = np.concatenate((points, extra_feat), axis = -1)\n if not self.use_points:\n points = points[:,:,3:]\n self.p2feat_idx = np.zeros((self.batch_size,points.shape[1], 3), dtype=np.int_)\n #points = points[score>self.thresh,:]\n #point_xy = point_xy[score>self.thresh,:]\n # p2feat_idx = self.p2feat_idx#[score>self.thresh,:]\n # Calculate grid size of feature map\n # voxel size of [w, h]\n voxel_size = (self.point_cloud_range[3:5]-self.point_cloud_range[:2])/np.array([self.feat_w, self.feat_h])\n # create a feature map of cooresponding shape\n feat_map = np.zeros((self.batch_size, self.num_feat, self.num_points, self.feat_h, self.feat_w), dtype=np.float32)\n points_in_feat_map = np.zeros((self.batch_size, self.feat_h, self.feat_w), dtype=np.int_)\n #point to voxel indices (num, h, w)\n offset = np.array(self.point_cloud_range[:2])\n # Indices (w, h)\n indices = np.floor((point_xy-offset)/voxel_size).astype(np.int_)\n # remove points and indices that are out put range\n feat_map, p2feat_idx=self.to_feat_map(points, feat_map, indices, points_in_feat_map,\n self.p2feat_idx, self.num_points)\n # self.p2feat_idx[score>self.thresh,:] = p2feat_idx\n self.p2feat_idx = p2feat_idx\n feat_map = feat_map.reshape(self.batch_size, -1, self.feat_h, self.feat_w)\n # feat_map = feat_map.reshape(1, self.num_feat, self.num_points, self.feat_h*self.feat_w) #leo added to (1,c,n,h*w)\n top[0].data[...] = feat_map\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(self.batch_size,self.num_feat,self.num_points,self.feat_h,\n self.feat_w)\n bottom[1].diff[...] = 0\n for batch in range(self.batch_size):\n #backward = np.zeros((1,1,1,self.p2feat_idx.shape[0]))\n mask = (self.p2feat_idx[batch,...] > 0).any(-1)\n indices = self.p2feat_idx[batch, mask]\n diff_ = diff[batch,:,indices[:,0],indices[:,1],indices[:,2]].squeeze().transpose()\n if len(bottom) > 1:\n # backward_extra = np.zeros((1,self.extra_feat_shape[1],1,self.extra_feat_shape[0])) #old\n # OPTIMIZE: get rid of two expand_dims\n extra_feat_backward = diff_[:,-self.extra_feat_shape[1]:].transpose()\n # extra_feat_backward = np.expand_dims(extra_feat_backward,0)\n # print(\"extra_feat_shape\", extra_feat_backward.shape)\n extra_feat_backward = np.expand_dims(extra_feat_backward,-1)\n # backward_extra[..., mask] = extra_feat_backward #old\n # bottom[1].diff[...] = backward_extra #old\n\n #####################Test new backward##############################\n bottom[1].diff[batch,:,:,mask] = extra_feat_backward\n #####################Test new backward##############################\n\n if self.use_score:\n continue\n else:\n if self.use_score:\n continue\n #@njit#(nopython=True)#, parallel=True)\n @staticmethod\n @njit\n def to_feat_map(points, feat_map, indices, points_in_feat_map, p2feat_idx, num_p_feat = 10):\n # Indices is (width, height)\n for batch in prange(indices.shape[0]):\n for idx in prange(indices.shape[1]):\n feat_index = indices[batch,idx]\n num = points_in_feat_map[batch,feat_index[1],feat_index[0]]\n if num < num_p_feat:\n feat_map[batch,:,num,feat_index[1],feat_index[0]] = points[batch,idx]\n points_in_feat_map[batch,feat_index[1],feat_index[0]] += 1\n p2feat_idx[batch,idx,0] = num\n p2feat_idx[batch,idx,1] = feat_index[1]\n p2feat_idx[batch,idx,2] = feat_index[0]\n return feat_map, p2feat_idx\n\nclass Point2Voxel3D(caffe.Layer):\n def setup(self, bottom, top):\n param = eval(self.param_str)\n self.extra_feat_shape = bottom[0].data.shape\n self.p2voxel_idx_shape = bottom[1].data.shape\n self.max_voxels = param['max_voxels']\n self.points_per_voxel = param['points_per_voxel']\n top[0].reshape(1, self.points_per_voxel*self.extra_feat_shape[1], 1, self.max_voxels)\n\n def reshape(self, bottom, top):\n pass\n\n def forward(self, bottom, top):\n extra_feat = bottom[0].data[...]\n p2voxel_idx = bottom[1].data[...].astype(np.int_)\n voxels = np.zeros((1, self.extra_feat_shape[1], self.points_per_voxel, self.max_voxels))\n num = p2voxel_idx[:,:,0].squeeze()\n voxel_idx = p2voxel_idx[:,:,1].squeeze()\n point_idx = p2voxel_idx[:,:,2].squeeze()\n voxels[:,:,num,voxel_idx] = extra_feat[...,point_idx].squeeze()\n voxels = np.expand_dims(voxels.reshape(1,-1,self.max_voxels), 2)\n top[0].reshape(1, self.points_per_voxel*self.extra_feat_shape[1], 1, self.max_voxels)\n top[0].data[...] = voxels\n\n def backward(self, top, propagate_down, bottom):\n diff = top[0].diff.reshape(1, self.extra_feat_shape[1], self.points_per_voxel, self.max_voxels)\n p2voxel_idx = bottom[1].data[...].astype(np.int_)\n num = p2voxel_idx[:,:,0].squeeze()\n voxel_idx = p2voxel_idx[:,:,1].squeeze()\n point_idx = p2voxel_idx[:,:,2].squeeze()\n diff = diff[:, :, num, voxel_idx]\n backward = np.zeros(bottom[0].data.shape)\n backward[..., point_idx] = np.expand_dims(diff, 2)\n bottom[0].diff[...] = backward\n\nclass SegWeight(caffe.Layer):\n def setup(self, bottom, top):\n labels = bottom[0].data\n seg_weights = self.prepare_loss_weights(labels)\n top[0].reshape(*seg_weights.shape)\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n labels = bottom[0].data\n seg_weights = self.prepare_loss_weights(labels)\n top[0].data[...] = seg_weights\n def prepare_loss_weights(self,\n labels,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n dtype=\"float32\"):\n\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.astype(dtype) * neg_cls_weight\n posetive_cls_weights = positives.astype(dtype) * pos_cls_weight\n seg_weights = negative_cls_weights + posetive_cls_weights\n reg_weights = positives.astype(dtype)\n\n pos_normalizer = np.sum(positives, 1, keepdims=True).astype(dtype)\n seg_weights /= np.clip(pos_normalizer, a_min=1.0, a_max=None) #(1, 107136)\n\n return seg_weights\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass PrepareLossWeight(caffe.Layer):\n def setup(self, bottom, top):\n labels = bottom[0].data\n cls_weights, reg_weights, cared = self.prepare_loss_weights(labels)\n\n top[0].reshape(*cared.shape)\n top[1].reshape(*reg_weights.shape) #reg_outside_weights\n top[2].reshape(*cls_weights.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n labels = bottom[0].data\n cls_weights, reg_weights, cared = self.prepare_loss_weights(labels)\n top[0].data[...] = cared\n top[1].data[...] = reg_weights #reg_outside_weights\n top[2].data[...] = cls_weights\n def prepare_loss_weights(self,\n labels,\n pos_cls_weight=1.0, # TODO: pass params here\n neg_cls_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n dtype=\"float32\"):\n \"\"\"get cls_weights and reg_weights from labels.\n \"\"\"\n cared = labels >= 0\n # print(\"label \", np.unique(labels, return_counts=True))\n # cared: [N, num_anchors]\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.astype(dtype) * neg_cls_weight\n posetive_cls_weights = positives.astype(dtype) * pos_cls_weight #(1, 107136)\n cls_weights = negative_cls_weights + posetive_cls_weights\n reg_weights = positives.astype(dtype)\n\n if loss_norm_type == LossNormType.NormByNumExamples:\n num_examples = cared.astype(dtype).sum(1, keepdims=True)\n num_examples = np.clip(num_examples, a_min=1.0, a_max=None)\n cls_weights /= num_examples\n bbox_normalizer = np.sum(positives, 1, keepdims=True).astype(dtype)\n reg_weights /= np.clip(bbox_normalizer, a_min=1.0, a_max=None)\n\n elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss\n pos_normalizer = np.sum(positives, 1, keepdims=True).astype(dtype)\n reg_weights /= np.clip(pos_normalizer, a_min=1.0, a_max=None) #(1, 107136)\n cls_weights /= np.clip(pos_normalizer, a_min=1.0, a_max=None) #(1, 107136)\n\n\n elif loss_norm_type == LossNormType.NormByNumPosNeg:\n pos_neg = np.stack([positives, negatives], a_min=-1).astype(dtype)\n normalizer = np.sum(pos_neg, 1, keepdims=True) # [N, 1, 2]\n cls_normalizer = np.sum((pos_neg * normalizer),-1) # [N, M]\n cls_normalizer = np.clip(cls_normalizer, a_min=1.0, a_max=None)\n # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg\n normalizer = np.clip(normalizer, a_min=1.0, a_max=None)\n reg_weights /= normalizer[:, 0:1, 0]\n cls_weights /= cls_normalizer\n\n else:\n raise ValueError(\n \"unknown loss norm type. available: {list(LossNormType)}\")\n\n return cls_weights, reg_weights, cared\n def backward(self, top, propagate_down, bottom):\n pass\n\n#For Point-Wise model\nclass PrepareLossWeightV2(caffe.Layer):\n def setup(self, bottom, top):\n labels = bottom[0].data\n cls_weights, reg_weights = self.prepare_loss_weights(labels)\n\n top[0].reshape(*reg_weights.shape) #reg_outside_weights\n top[1].reshape(*cls_weights.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n labels = bottom[0].data\n cls_weights, reg_weights = self.prepare_loss_weights(labels)\n top[0].data[...] = reg_weights #reg_outside_weights\n top[1].data[...] = cls_weights\n def prepare_loss_weights(self,\n labels,\n pos_cls_weight=1.0,\n neg_cls_weight=1.0,\n loss_norm_type=LossNormType.NormByNumPositives,\n dtype=\"float32\"):\n\n # print(\"label \", np.unique(labels, return_counts=True))\n positives = labels > 0\n negatives = labels == 0\n negative_cls_weights = negatives.astype(dtype) * neg_cls_weight\n posetive_cls_weights = positives.astype(dtype) * pos_cls_weight #(1, 107136)\n cls_weights = negative_cls_weights + posetive_cls_weights\n reg_weights = positives.astype(dtype)\n\n if loss_norm_type == LossNormType.NormByNumExamples:\n num_examples = cared.astype(dtype).sum(1, keepdims=True)\n num_examples = np.clip(num_examples, a_min=1.0, a_max=None)\n cls_weights /= num_examples\n bbox_normalizer = np.sum(positives, 1, keepdims=True).astype(dtype)\n reg_weights /= np.clip(bbox_normalizer, a_min=1.0, a_max=None)\n\n elif loss_norm_type == LossNormType.NormByNumPositives: # for focal loss\n pos_normalizer = np.sum(positives, 1, keepdims=True).astype(dtype)\n reg_weights /= np.clip(pos_normalizer, a_min=1.0, a_max=None) #(1, 107136)\n cls_weights /= np.clip(pos_normalizer, a_min=1.0, a_max=None) #(1, 107136)\n\n\n elif loss_norm_type == LossNormType.NormByNumPosNeg:\n pos_neg = np.stack([positives, negatives], a_min=-1).astype(dtype)\n normalizer = np.sum(pos_neg, 1, keepdims=True) # [N, 1, 2]\n cls_normalizer = np.sum((pos_neg * normalizer),-1) # [N, M]\n cls_normalizer = np.clip(cls_normalizer, a_min=1.0, a_max=None)\n # cls_normalizer will be pos_or_neg_weight/num_pos_or_neg\n normalizer = np.clip(normalizer, a_min=1.0, a_max=None)\n reg_weights /= normalizer[:, 0:1, 0]\n cls_weights /= cls_normalizer\n\n else:\n raise ValueError(\n \"unknown loss norm type. available: {list(LossNormType)}\")\n return cls_weights, reg_weights\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass LabelEncode(caffe.Layer):\n def setup(self, bottom, top):\n\n labels = bottom[0].data\n cared = bottom[1].data\n\n cls_targets = labels * cared # (1, 107136)\n cls_targets = cls_targets.astype(int)\n\n self.num_class = 1\n one_hot_targets = np.eye(self.num_class+1)[cls_targets] #One_hot label -- make sure one hot class is <num_class+1>\n one_hot_targets = one_hot_targets[..., 1:]\n\n top[0].reshape(*one_hot_targets.shape) #reshape to caffe pattern\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n\n labels = bottom[0].data # (1, 107136)\n cared = bottom[1].data\n\n cls_targets = labels * cared\n cls_targets = cls_targets.astype(int)\n\n one_hot_targets = np.eye(self.num_class+1)[cls_targets] #One_hot label -- make sure one hot class is <num_class+1>\n one_hot_targets = one_hot_targets[..., 1:]\n\n top[0].data[...] = one_hot_targets\n def backward(self, top, propagate_down, bottom):\n pass\n\n#For Point-Wise model\nclass LabelEncodeV2(caffe.Layer):\n def setup(self, bottom, top):\n\n labels = bottom[0].data\n labels = labels.astype(int)\n labels = np.expand_dims(labels,-1)\n top[0].reshape(*labels.shape) #reshape to caffe pattern\n\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n\n labels = bottom[0].data # (1, 107136)\n labels = labels.astype(int)\n labels = np.expand_dims(labels,-1)\n top[0].data[...] = labels\n\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass WeightFocalLoss(caffe.Layer):\n def setup(self, bottom, top):\n params = eval(self.param_str)\n self.gamma = int(params['focusing_parameter'])\n self.alpha = params['alpha']\n self.batch_size = bottom[0].data.shape[0]\n\n def reshape(self, bottom, top):\n # check input dimensions match\n # if bottom[0].num != bottom[1].num:\n # raise Exception(\"Infered scores and labels must have the same dimension.\")\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n self.cls_weights = bottom[2].data\n self.cls_weights = np.expand_dims(self.cls_weights,-1)\n\n log1p = np.log1p(np.exp(-np.abs(self._p))) #logits\n\n self._p_t = 1 / (1 + np.exp(-self._p)) # Compute sigmoid activations\n\n self.first = (1-self.label) * (1-self.alpha) + self.label * self.alpha\n\n self.second = (1-self.label) * ((self._p_t) ** self.gamma) + self.label * ((1 - self._p_t) ** self.gamma)\n\n self.sigmoid_cross_entropy = (1-self.label) * (log1p + np.clip(self._p, a_min=0, a_max=None)) + \\\n self.label * (log1p - np.clip(self._p, a_min=None, a_max=0))\n\n logprobs = ((1-self.label) * self.first * self.second * self.sigmoid_cross_entropy) + \\\n (self.label * self.first * self.second * self.sigmoid_cross_entropy)\n\n top[0].data[...] = np.sum(logprobs*self.cls_weights) / self.batch_size\n\n def backward(self, top, propagate_down, bottom):\n\n dev_log1p = np.sign(self._p) * (1 / (np.exp(np.abs(self._p))+1)) # might fix divided by 0 x/|x| bug\n\n self.dev_sigmoid_cross_entropy = (1-self.label) * (dev_log1p - np.where(self._p<=0, 0, 1)) + \\\n self.label * (dev_log1p + np.where(self._p>=0, 0, 1))\n\n delta = (1-self.label) * (self.first * self.second * (self.gamma * (1-self._p_t) * self.sigmoid_cross_entropy - self.dev_sigmoid_cross_entropy)) + \\\n self.label * (-self.first * self.second * (self.gamma * self._p_t * self.sigmoid_cross_entropy + self.dev_sigmoid_cross_entropy))\n\n bottom[0].diff[...] = delta * self.cls_weights / self.batch_size\n\nclass WeightedSmoothL1Loss(caffe.Layer):\n def setup(self, bottom, top):\n self.sigma = 3\n self.encode_rad_error_by_sin = True\n self.batch_size = bottom[0].data.shape[0]\n def reshape(self, bottom, top):\n # check input dimensions match\n # if bottom[0].num != bottom[1].num:\n # raise Exception(\"Infered scores and labels must have the same dimension.\")\n top[0].reshape(1)\n def forward(self, bottom, top):\n box_preds = bottom[0].data\n reg_targets = bottom[1].data\n self.reg_weights = bottom[2].data\n self.reg_weights = np.expand_dims(self.reg_weights,-1)\n\n self.diff = box_preds - reg_targets\n\n #use sin_difference rad to sin\n if self.encode_rad_error_by_sin:\n diff_rot = self.diff[...,-1:].copy() #copy rotation without add sin\n self.sin_diff = np.sin(diff_rot)\n self.cos_diff = np.cos(diff_rot)\n self.diff[...,-1] = np.sin(self.diff[...,-1]) #use sin_difference\n\n self.abs_diff = np.abs(self.diff)\n #change from less than to less or equal\n self.cond = self.abs_diff <= (1/(self.sigma**2))\n loss = np.where(self.cond, 0.5 * self.sigma**2 * self.abs_diff**2,\n self.abs_diff - 0.5/self.sigma**2)\n\n reg_loss = loss * self.reg_weights\n\n top[0].data[...] = np.sum(reg_loss) / self.batch_size # * 2\n def backward(self, top, propagate_down, bottom):\n\n if self.encode_rad_error_by_sin:\n\n delta = np.where(self.cond[...,:-1], (self.sigma**2) * self.diff[...,:-1], np.sign(self.diff[...,:-1]))\n\n delta_rotation = np.where(self.cond[...,-1:], (self.sigma**2) * self.sin_diff * self.cos_diff,\n np.sign(self.sin_diff) * self.cos_diff) #if sign(0) is gonna be 0!\n\n delta = np.concatenate([delta, delta_rotation], axis=-1)\n\n else:\n delta = np.where(self.cond, (self.sigma**2) * self.diff, np.sign(self.diff))\n bottom[0].diff[...] = delta * self.reg_weights / self.batch_size# * 2\n\nclass FocalLoss(caffe.Layer):\n def setup(self, bottom, top):\n params = eval(self.param_str)\n self.gamma = int(params['focusing_parameter'])\n self.alpha = params['alpha']\n def reshape(self, bottom, top):\n # check input dimensions match\n # if bottom[0].num != bottom[1].num:\n # raise Exception(\"Infered scores and labels must have the same dimension.\")\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n\n log1p = np.log1p(np.exp(-np.abs(self._p))) #logits\n\n self._p_t = 1 / (1 + np.exp(-self._p)) # Compute sigmoid activations\n\n self.first = (1-self.label) * (1-self.alpha) + self.label * self.alpha\n\n self.second = (1-self.label) * (self._p_t ** self.gamma) + self.label * ((1 - self._p_t) ** self.gamma)\n\n self.sigmoid_cross_entropy = (1-self.label) * (log1p + np.clip(self._p, a_min=0, a_max=None)) + \\\n self.label * (log1p - np.clip(self._p, a_min=None, a_max=0))\n\n logprobs = ((1-self.label) * self.first * self.second * self.sigmoid_cross_entropy) + \\\n (self.label * self.first * self.second * self.sigmoid_cross_entropy)\n\n top[0].data[...] = np.mean(logprobs)\n\n def backward(self, top, propagate_down, bottom):\n\n dev_log1p = np.sign(self._p) * (1 / (np.exp(np.abs(self._p))+1)) # might fix divided by 0 x/|x| bug\n\n self.dev_sigmoid_cross_entropy = (1-self.label) * (dev_log1p - np.where(self._p<=0, 0, 1)) + \\\n self.label * (dev_log1p + np.where(self._p>=0, 0, 1))\n\n delta = (1-self.label) * (self.first * self.second * (self.gamma * (1-self._p_t) * self.sigmoid_cross_entropy - self.dev_sigmoid_cross_entropy)) + \\\n self.label * (-self.first * self.second * (self.gamma * self._p_t * self.sigmoid_cross_entropy + self.dev_sigmoid_cross_entropy))\n\n bottom[0].diff[...] = delta\n\nclass DiceLoss(caffe.Layer):\n def setup(self, bottom, top):\n params = eval(self.param_str)\n self.belta = params['belta'] #0.5\n self.alpha = params['alpha'] #0.5\n self.eps = 1e-5\n\n def reshape(self, bottom, top):\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n\n self.tp = self._p * self.label\n\n self.fn = (1- self._p ) * self.label\n\n self.fp = self._p * (1 - self.label)\n\n self.union = self.tp + self.alpha * self.fn + self.belta * self.fp\n\n logprobs = (np.sum(self.tp) + self.eps) / (np.sum(self.union) + self.eps)\n\n top[0].data[...] = 1 - logprobs\n\n def backward(self, top, propagate_down, bottom):\n\n delta = self.alpha * np.square(self.label) / (np.square(self.union) + self.eps)\n\n bottom[0].diff[...] = delta\n\n#for v-net paper\nclass DiceLossV2(caffe.Layer):\n def setup(self, bottom, top):\n self.eps = 1e-5\n self.smooth = 1\n\n def reshape(self, bottom, top):\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n\n self.inter = np.sum(self._p * self.label)\n self.union = np.sum(self._p + self.label)\n\n logprobs = (2 * self.inter + self.smooth) / (self.union + self.smooth)\n\n top[0].data[...] = logprobs\n\n def backward(self, top, propagate_down, bottom):\n\n delta = (self.label * (self.union) - 2 * self._p * (self.inter)) / (np.square(self.union) + self.eps)\n\n bottom[0].diff[...] = 2 * delta\n\nclass DiceLossV3(caffe.Layer):\n def setup(self, bottom, top):\n # params = eval(self.param_str)\n # self.belta = params['belta'] #0.5\n # self.alpha = params['alpha'] #0.5\n self.eps = 1e-5\n self.smooth = 1\n\n def reshape(self, bottom, top):\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n\n self.tp = self._p * self.label\n self.union = self._p + self.label\n\n logprobs = (2 * np.sum(self.tp) + self.smooth) / (np.sum(self.union) + self.smooth)\n\n top[0].data[...] = logprobs\n\n def backward(self, top, propagate_down, bottom):\n\n delta = 2 * np.square(self.label) / (np.square(self.union) + self.eps)\n\n bottom[0].diff[...] = delta\n\nclass IoUSegLoss(caffe.Layer):\n def setup(self, bottom, top):\n # params = eval(self.param_str)\n # self.belta = params['belta'] #0.5\n # self.alpha = params['alpha'] #0.5\n self.eps = 1e-5\n\n def reshape(self, bottom, top):\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n\n self.inter = self._p * self.label\n self.union = self._p + self.label - self.inter\n self.iou = self.inter/self.union\n\n logprobs = (np.sum(self.inter) + self.eps) / (np.sum(self.union) + self.eps)\n\n top[0].data[...] = 1 - logprobs\n\n def backward(self, top, propagate_down, bottom):\n\n delta = np.where(self.label>0, -1/(self.union + self.eps), self.inter/(np.square(self.union)+ self.eps))\n\n bottom[0].diff[...] = delta\n\nclass DiceFocalLoss(caffe.Layer):\n def setup(self, bottom, top):\n params = eval(self.param_str)\n self.gamma = int(params['focusing_parameter']) #2\n self.alpha = params['alpha'] #0.25\n self.dice_belta = params['dice_belta'] #0.5\n self.dice_alpha = params['dice_alpha'] #0.5\n self.lamda = params['lamda'] #trade off between focal and dice loss # 0.1, 0.5 , 1\n\n\n def reshape(self, bottom, top):\n # check input dimensions match\n # if bottom[0].num != bottom[1].num:\n # raise Exception(\"Infered scores and labels must have the same dimension.\")\n top[0].reshape(1)\n def forward(self, bottom, top):\n self._p = bottom[0].data\n self.label = bottom[1].data\n self.c = len(np.unique(self.label)) #no background\n\n ####################################Focal loss##########################\n self._p_t = 1 / (1 + np.exp(-self._p)) # Compute sigmoid activations\n\n self.first = (1-self.label) * (1-self.alpha) + self.label * self.alpha\n\n self.second = (1-self.label) * ((self._p_t) ** self.gamma) + self.label * ((1 - self._p_t) ** self.gamma)\n\n log1p = np.log1p(np.exp(-np.abs(self._p)))\n\n self.sigmoid_cross_entropy = (1-self.label) * (log1p + np.clip(self._p, a_min=0, a_max=None)) + \\\n self.label * (log1p - np.clip(self._p, a_min=None, a_max=0))\n\n focal = ((1-self.label) * self.first * self.second * self.sigmoid_cross_entropy) + \\\n (self.label * self.first * self.second * self.sigmoid_cross_entropy)\n\n focal = np.mean(focal)\n\n ########################################Dice############################\n\n self.tp = np.sum(self._p * self.label)\n\n self.fn = np.sum((1- self._p ) * self.label)\n\n self.fp = np.sum(self._p * (1 - self.label))\n\n self.union = self.tp + self.alpha * self.fn + self.belta * self.fp\n\n dice = self.tp / (self.union + self.eps )\n\n top[0].data[...] = self.c - dice - self.lamda * focal #average fl\n\n def backward(self, top, propagate_down, bottom):\n\n dev_log1p = np.sign(self._p) * (1 / (np.exp(np.abs(self._p))+1)) # might fix divided by 0 x/|x| bug\n\n self.dev_sigmoid_cross_entropy = (1-self.label) * (dev_log1p - np.where(self._p<=0, 0, 1)) + \\\n self.label * (dev_log1p + np.where(self._p>=0, 0, 1))\n\n focal_delta = (1-self.label) * (self.first * self.second * (self.gamma * (1-self._p_t) * self.sigmoid_cross_entropy - self.dev_sigmoid_cross_entropy)) + \\\n self.label * (-self.first * self.second * (self.gamma * self._p_t * self.sigmoid_cross_entropy + self.dev_sigmoid_cross_entropy))\n\n ########################################Dice############################\n dev_tp = np.sum(self.label)\n\n dev_fn = np.sum(-self.label)\n\n dev_fp = np.sum(1-self.label)\n\n dice_delta = (self.tp * (dev_tp + self.alpha * dev_fn + self.belta * dev_fp) - dev_tp * self.union) / ((self.union)**2 + self.eps)\n\n delta = -(dice_delta + self.lamda * focal_delta)\n\n bottom[0].diff[...] = delta\n\nclass IoULoss(caffe.Layer):\n def setup(self, bottom, top):\n # params = eval(self.param_str)\n self.eps = 1e-5\n\n def reshape(self, bottom, top):\n top[0].reshape(1)\n def forward(self, bottom, top):\n\n pred = bottom[0].data\n gt_box = bottom[1].data\n self.points_label = bottom[2].data\n self.reg_weights = bottom[3].data\n self.reg_weights = np.expand_dims(self.reg_weights,-1)\n points = bottom[4].data[...,:3]\n\n pred = pred * self.points_label #if label==0 do not count iou\n\n self.pred_up = pred[..., 5:6]\n self.pred_down = pred[..., 2:3]\n self.pred_fwd = pred[..., 3:4]\n self.pred_bwd = pred[..., 0:1]\n self.pred_right = pred[..., 4:5]\n self.pred_left = pred[..., 1:2]\n\n self.gt_up = gt_box[..., 5:6]\n self.gt_down = gt_box[..., 2:3]\n self.gt_fwd = gt_box[..., 3:4]\n self.gt_bwd = gt_box[..., 0:1]\n self.gt_right = gt_box[..., 4:5]\n self.gt_left = gt_box[..., 1:2]\n\n pred_min_points = points - pred[..., :3]\n pred_max_points = points + pred[..., 3:-1]\n\n gt_min_points = points - gt_box[..., :3]\n gt_max_points = points + gt_box[..., 3:-1]\n\n pred_area = np.abs((self.pred_up + self.pred_down) * (self.pred_fwd + self.pred_bwd) * (self.pred_right + self.pred_left))\n # pred_area = np.prod(pred_max_points - pred_min_points, axis = -1)\n\n gt_area = (self.gt_up + self.gt_down) * (self.gt_fwd + self.gt_bwd) * (self.gt_right + self.gt_left)\n\n # self.inter_h = np.minimum(self.pred_up, self.gt_up) + np.minimum(self.pred_down, self.gt_down)\n # self.inter_w = np.minimum(self.pred_fwd, self.gt_fwd) + np.minimum(self.pred_bwd, self.gt_bwd)\n # self.inter_l = np.minimum(self.pred_right, self.gt_right) + np.minimum(self.pred_left, self.gt_left)\n\n h_pred_max = np.maximum(pred_max_points[..., 2:], pred_min_points[..., 2:])\n h_pred_min = np.minimum(pred_max_points[..., 2:], pred_min_points[..., 2:])\n\n w_pred_max = np.maximum(pred_max_points[..., 0:1], pred_min_points[..., 0:1])\n w_pred_min = np.minimum(pred_max_points[..., 0:1], pred_min_points[..., 0:1])\n\n l_pred_max = np.maximum(pred_max_points[..., 1:2], pred_min_points[..., 1:2])\n l_pred_min = np.minimum(pred_max_points[..., 1:2], pred_min_points[..., 1:2])\n\n self.inter_h = np.minimum(h_pred_max, gt_max_points[..., 2:]) - np.maximum(h_pred_min, gt_min_points[..., 2:])\n self.inter_w = np.minimum(w_pred_max, gt_max_points[..., 0:1]) - np.maximum(w_pred_min, gt_min_points[..., 0:1])\n self.inter_l = np.minimum(l_pred_max, gt_max_points[..., 1:2]) - np.maximum(l_pred_min, gt_min_points[..., 1:2])\n\n self.inter_h = np.clip(self.inter_h, a_min=0, a_max=None)\n self.inter_w = np.clip(self.inter_w, a_min=0, a_max=None)\n self.inter_l = np.clip(self.inter_l, a_min=0, a_max=None)\n\n # self.inter_h = np.minimum(pred_max_points[..., 2:], gt_max_points[..., 2:]) - np.maximum(pred_min_points[..., 2:], gt_min_points[..., 2:])\n # self.inter_w = np.minimum(pred_max_points[..., 0:1], gt_max_points[..., 0:1]) - np.maximum(pred_min_points[..., 0:1], gt_min_points[..., 0:1])\n # self.inter_l = np.minimum(pred_max_points[..., 1:2], gt_max_points[..., 1:2]) - np.maximum(pred_min_points[..., 1:2], gt_min_points[..., 1:2])\n\n # self.inter = np.clip(self.inter_h, a_min=0, a_max=None) * np.clip(self.inter_w, a_min=0, a_max=None) * np.clip(self.inter_l, a_min=0, a_max=None)\n self.inter = self.inter_h * self.inter_w * self.inter_l\n self.union = pred_area + gt_area - self.inter\n\n iou = (self.inter + self.eps) / (self.union + self.eps) #* self.points_label #if label==0 do not count iou\n # print(\"iou\", np.unique(iou<=0, return_counts=True))\n # print(\"iou less than 0\", iou[iou<=0])\n # print(\"self.inter <= 0\", self.inter[iou<=0])\n # print(\"self.union less than 0\", self.union[iou<=0])\n # print(\"pred_area less than 0\", pred_area[iou<=0])\n # print(\"gt_area less than 0\", gt_area[iou<=0])\n logprobs = -np.log(iou)\n\n top[0].data[...] = np.sum(logprobs * self.reg_weights)\n\n def backward(self, top, propagate_down, bottom):\n\n dev_h = (self.pred_left * self.pred_fwd) + (self.pred_left * self.pred_bwd) + (self.pred_right * self.pred_fwd) + (self.pred_right * self.pred_bwd)\n dev_w = (self.pred_left * self.pred_up) + (self.pred_left * self.pred_down) + (self.pred_right * self.pred_up) + (self.pred_right * self.pred_down)\n dev_l = (self.pred_up * self.pred_fwd) + (self.pred_up * self.pred_bwd) + (self.pred_down * self.pred_fwd) + (self.pred_down * self.pred_bwd)\n\n dev_iou_h = self.inter_w * self.inter_l\n dev_iou_w = self.inter_h * self.inter_l\n dev_iou_l = self.inter_w * self.inter_h\n\n # dev_iou_up = np.where(self.pred_up < self.gt_up, dev_iou_h, 0)\n # dev_iou_down = np.where(self.pred_down < self.gt_down, dev_iou_h, 0)\n # dev_iou_fwd = np.where(self.pred_fwd < self.gt_fwd, dev_iou_w, 0)\n # dev_iou_bwd = np.where(self.pred_bwd < self.gt_bwd, dev_iou_w, 0)\n # dev_iou_right = np.where(self.pred_right < self.gt_right, dev_iou_l, 0)\n # dev_iou_left = np.where(self.pred_left < self.gt_left, dev_iou_l, 0)\n\n cond_h = (self.pred_up < self.gt_up) + (self.pred_down < self.gt_down) # or condition\n cond_w = (self.pred_fwd < self.gt_fwd) + (self.pred_bwd < self.gt_bwd)\n cond_l = (self.pred_right < self.gt_right) + (self.pred_left < self.gt_left)\n\n dev_iou_h = np.where(cond_h, dev_iou_h, 0)\n dev_iou_w = np.where(cond_w, dev_iou_w, 0)\n dev_iou_l = np.where(cond_l, dev_iou_l, 0)\n\n\n second_term = (self.union + self.inter+ self.eps) / (self.union * self.inter + self.eps)\n first_term = 1/(self.union + self.eps)\n\n # delta_up = first_term * dev_h - second_term * dev_iou_up\n # delta_down = first_term * dev_h - second_term * dev_iou_down\n # delta_fwd = first_term * dev_w - second_term * dev_iou_fwd\n # delta_bwd = first_term * dev_w - second_term * dev_iou_bwd\n # delta_right = first_term * dev_l - second_term * dev_iou_right\n # delta_left = first_term * dev_l - second_term * dev_iou_left\n\n delta_h = first_term * dev_h - second_term * dev_iou_h\n delta_w = first_term * dev_w - second_term * dev_iou_w\n delta_l = first_term * dev_l - second_term * dev_iou_l\n\n # delta = delta_up + delta_down + delta_fwd + delta_bwd + delta_right + delta_left\n\n delta = 2*delta_h + 2*delta_w + 2*delta_l\n\n bottom[0].diff[...] = delta * self.reg_weights\n\n # print(\"IoULoss backward\", np.mean(delta * self.reg_weights))\n\nclass IoULossV2(caffe.Layer):\n def setup(self, bottom, top):\n self.eps = 1e-5\n self.sigma = 3\n def reshape(self, bottom, top):\n top[0].reshape(1)\n\n def forward(self, bottom, top):\n pred = bottom[0].data\n gt_box = bottom[1].data\n self.points_label = bottom[2].data\n self.reg_weights = bottom[3].data\n self.reg_weights = np.expand_dims(self.reg_weights,-1)\n # points = bottom[4].data[...,:3]\n\n pred = pred * self.points_label #if label==0 do not count iou\n # pred = np.where(pred<0, 0, pred) #ReLU\n\n self.pred_up = pred[..., 5:6]\n self.pred_down = pred[..., 2:3]\n self.pred_fwd = pred[..., 3:4]\n self.pred_bwd = pred[..., 0:1]\n self.pred_right = pred[..., 4:5]\n self.pred_left = pred[..., 1:2]\n self.pred_rot = pred[..., 6:]\n\n self.gt_up = gt_box[..., 5:6]\n self.gt_down = gt_box[..., 2:3]\n self.gt_fwd = gt_box[..., 3:4]\n self.gt_bwd = gt_box[..., 0:1]\n self.gt_right = gt_box[..., 4:5]\n self.gt_left = gt_box[..., 1:2]\n self.gt_rot = pred[..., 6:]\n\n self.diff = self.pred_rot - self.gt_rot\n self.abs_diff = np.abs(self.diff)\n self.cond = self.abs_diff <= (1/(self.sigma**2))\n rot_loss = np.where(self.cond, 0.5 * self.sigma**2 * self.abs_diff**2,\n self.abs_diff - 0.5/self.sigma**2)\n\n\n pred_area = (self.pred_up + self.pred_down) * (self.pred_fwd + self.pred_bwd) * (self.pred_right + self.pred_left)\n gt_area = (self.gt_up + self.gt_down) * (self.gt_fwd + self.gt_bwd) * (self.gt_right + self.gt_left)\n\n self.inter_h = np.minimum(self.pred_up, self.gt_up) + np.minimum(self.pred_down, self.gt_down)\n self.inter_w = np.minimum(self.pred_fwd, self.gt_fwd) + np.minimum(self.pred_bwd, self.gt_bwd)\n self.inter_l = np.minimum(self.pred_right, self.gt_right) + np.minimum(self.pred_left, self.gt_left)\n\n self.inter = self.inter_h * self.inter_w * self.inter_l\n self.union = pred_area + gt_area - self.inter\n\n iou = (self.inter + self.eps) / (self.union + self.eps) #* self.points_label #if label==0 do not count iou\n\n logprobs = -np.log(iou) + rot_loss\n\n top[0].data[...] = np.sum(logprobs * self.reg_weights)\n\n def backward(self, top, propagate_down, bottom):\n\n dev_h = (self.pred_left * self.pred_fwd) + (self.pred_left * self.pred_bwd) + (self.pred_right * self.pred_fwd) + (self.pred_right * self.pred_bwd)\n dev_w = (self.pred_left * self.pred_up) + (self.pred_left * self.pred_down) + (self.pred_right * self.pred_up) + (self.pred_right * self.pred_down)\n dev_l = (self.pred_up * self.pred_fwd) + (self.pred_up * self.pred_bwd) + (self.pred_down * self.pred_fwd) + (self.pred_down * self.pred_bwd)\n\n cond_h = (self.pred_up < self.gt_up) + (self.pred_down < self.gt_down) # or condition\n cond_w = (self.pred_fwd < self.gt_fwd) + (self.pred_bwd < self.gt_bwd)\n cond_l = (self.pred_right < self.gt_right) + (self.pred_left < self.gt_left)\n\n dev_iou_h = np.where(cond_h, self.inter_w * self.inter_l, 0)\n dev_iou_w = np.where(cond_w, self.inter_h * self.inter_l, 0)\n dev_iou_l = np.where(cond_l, self.inter_w * self.inter_h, 0)\n\n second_term = (self.union + self.inter) / (self.union * self.inter + self.eps)\n first_term = 1/(self.union + self.eps)\n\n delta_h = first_term * dev_h - second_term * dev_iou_h\n delta_w = first_term * dev_w - second_term * dev_iou_w\n delta_l = first_term * dev_l - second_term * dev_iou_l\n\n # start_time = timeit.default_timer()\n\n rot_delta = np.where(self.cond, (self.sigma**2) * self.diff, np.sign(self.diff))\n delta = np.concatenate((delta_w, delta_l, delta_h), axis=-1)\n delta = np.repeat(delta, 2, axis=-1)\n delta = np.concatenate((delta,rotate), axis=-1)\n #\n # end_time = timeit.default_timer()\n # print('np.repeat forwards ran for {}s'.format((end_time-start_time)/60))\n\n bottom[0].diff[...] = delta * self.reg_weights\n\nclass IoULossV3(caffe.Layer):\n def setup(self, bottom, top):\n self.eps = 1e-5\n self.smooth = 1\n def reshape(self, bottom, top):\n top[0].reshape(1)\n\n def forward(self, bottom, top):\n pred = bottom[0].data\n gt_box = bottom[1].data\n self.points_label = bottom[2].data\n self.reg_weights = bottom[3].data\n self.reg_weights = np.expand_dims(self.reg_weights,-1)\n points = bottom[4].data[...,:3]\n\n pred = pred * self.points_label #if label==0 do not count iou\n # print(\"label\", np.unique(self.points_label, return_index=True))\n # pred = np.where(pred<=0, 0, pred) #ReLU\n # print(\"pred\", np.unique(self.points_label>0, return_index=True))\n\n self.pred_up = pred[..., 5:6]\n self.pred_down = pred[..., 2:3]\n self.pred_fwd = pred[..., 3:4]\n self.pred_bwd = pred[..., 0:1]\n self.pred_right = pred[..., 4:5]\n self.pred_left = pred[..., 1:2]\n\n self.gt_up = gt_box[..., 5:6]\n self.gt_down = gt_box[..., 2:3]\n self.gt_fwd = gt_box[..., 3:4]\n self.gt_bwd = gt_box[..., 0:1]\n self.gt_right = gt_box[..., 4:5]\n self.gt_left = gt_box[..., 1:2]\n\n pred_area = (self.pred_fwd + self.pred_bwd) * (self.pred_right + self.pred_left)\n # print(\"pred_area\", pred_area[pred_area>4])\n gt_area = (self.gt_fwd + self.gt_bwd) * (self.gt_right + self.gt_left)\n # print(\"gt_area\", gt_area[gt_area>0.8])\n\n # self.inter_h = np.minimum(self.pred_up, self.gt_up) + np.minimum(self.pred_down, self.gt_down)\n self.inter_w = np.minimum(self.pred_fwd, self.gt_fwd) + np.minimum(self.pred_bwd, self.gt_bwd)\n self.inter_l = np.minimum(self.pred_right, self.gt_right) + np.minimum(self.pred_left, self.gt_left)\n\n self.inter = self.inter_w * self.inter_l\n\n # print(\"self.inter > 0.4\", self.inter[self.inter>0.4])\n\n self.union = pred_area + gt_area - self.inter\n\n iou = (self.inter + self.eps) / (self.union + self.eps) #* self.points_label #if label==0 do not count iou\n\n logprobs = -np.log(iou)\n\n top[0].data[...] = np.sum(logprobs * self.reg_weights)\n\n def backward(self, top, propagate_down, bottom):\n\n # dev_h = (self.pred_left * self.pred_fwd) + (self.pred_left * self.pred_bwd) + (self.pred_right * self.pred_fwd) + (self.pred_right * self.pred_bwd)\n dev_w = self.pred_left + self.pred_right\n dev_l = self.pred_fwd + self.pred_bwd\n\n # dev_iou_h = self.inter_w * self.inter_l\n # dev_iou_w = self.inter_l\n # dev_iou_l = self.inter_w\n\n # cond_h = (self.pred_up < self.gt_up) + (self.pred_down < self.gt_down) # or condition\n cond_w = (self.pred_fwd < self.gt_fwd) + (self.pred_bwd < self.gt_bwd)\n cond_l = (self.pred_right < self.gt_right) + (self.pred_left < self.gt_left)\n\n # dev_iou_h = np.where(cond_h, dev_iou_h, 0)\n dev_iou_w = np.where(cond_w, self.inter_l, 0)\n dev_iou_l = np.where(cond_l, self.inter_w, 0)\n\n\n second_term = (self.union + self.inter) / (self.union * self.inter + self.eps)\n first_term = 1/(self.union + self.eps)\n\n delta = np.zeros(shape=(1,9000,1))\n # delta_h = first_term * dev_h - second_term * dev_iou_h\n delta_w = first_term * dev_w - second_term * dev_iou_w # df, db\n delta_l = first_term * dev_l - second_term * dev_iou_l # dr, dl\n\n delta[..., 0:1] = delta_w #b\n delta[..., 1:2] = delta_l #l\n delta[..., 3:4] = delta_w #f\n delta[..., 4:5] = delta_l #r\n # delta = np.concatenate((),axis=-1)\n\n # delta = delta_w + delta_l\n\n bottom[0].diff[...] = delta * self.reg_weights\n\nclass CaLu(caffe.Layer):\n def setup(self, bottom, top):\n input_tensor = bottom[0].data\n top[0].reshape(*input_tensor.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n self.input_tensor = bottom[0].data\n\n # make positives\n self.t_mask = self.input_tensor < 0\n self.tensor = np.where(self.t_mask, 0, self.input_tensor)\n\n #activate\n self.tensor = 1 - 1/(1+self.tensor)\n\n top[0].data[...] = self.tensor\n\n def backward(self, top, propagate_down, bottom):\n diff = np.where(self.t_mask, 0, 1/np.square((1+self.input_tensor)))\n bottom[0].diff[...] = diff\n\nclass CaLuV2(caffe.Layer):\n def setup(self, bottom, top):\n input_tensor = bottom[0].data\n top[0].reshape(*input_tensor.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n self.input_tensor = bottom[0].data\n\n #activate\n self.tensor = 1 - 1/(1+self.input_tensor)\n\n top[0].data[...] = self.tensor\n\n def backward(self, top, propagate_down, bottom):\n diff = 1/np.square((1+self.input_tensor))\n bottom[0].diff[...] = diff\n\nclass BCLReshape(caffe.Layer):\n def setup(self, bottom, top):\n top_prev = bottom[0].data\n top_prev, top_lattice = self.reshape_func(top_prev)\n top[0].reshape(*top_prev.shape)\n top[1].reshape(*top_lattice.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n top_prev = bottom[0].data\n top_prev, top_lattice = self.reshape_func(top_prev)\n top[0].reshape(*top_prev.shape) #top_prev\n top[0].data[...] = top_prev\n top[1].reshape(*top_lattice.shape) #top_lattice\n top[1].data[...] = top_lattice\n def backward(self, top, propagate_down, bottom):\n pass\n def reshape_func(self, top_prev):\n top_prev = top_prev.transpose(0,2,1) #(1,N,C) -> (1,C,N)\n top_prev = np.expand_dims(top_prev,2) #(1,C,N) -> (1,C,,1,N)\n top_lattice = top_prev[:, :3, ...]\n return top_prev, top_lattice\n\nclass BCLReshapeV2(caffe.Layer):\n def setup(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape)\n top[1].reshape(*top_lattice.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape) #top_prev\n top[0].data[...] = top_prev\n top[1].reshape(*top_lattice.shape) #top_lattice\n top[1].data[...] = top_lattice\n def backward(self, top, propagate_down, bottom):\n pass\n def reshape_func(self, top_prev, coords):\n top_prev = top_prev.transpose(1,2,0) #(N,1,4) -> (1,4,N)\n top_prev = np.expand_dims(top_prev,2) #(1,4,N) -> (1,4,,1,N)\n coords = coords[:,1:][:,::-1].transpose() #coors in reverse order bzyx (V, C) -> (C,V)\n coords = np.expand_dims(coords,0) #(C,V)-> (1,C,V)\n coords = np.expand_dims(coords,2) #(1,C,V)-> (1,C,1,V)\n return top_prev, coords\n\nclass BCLReshapeV4(caffe.Layer):\n def setup(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape)\n top[1].reshape(*top_lattice.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape) #top_prev\n top[0].data[...] = top_prev\n top[1].reshape(*top_lattice.shape) #top_lattice\n top[1].data[...] = top_lattice\n def backward(self, top, propagate_down, bottom):\n pass\n def reshape_func(self, top_prev, coords):\n top_prev = top_prev.transpose(2,1,0) #(V,100,C) -> (C,100,V)\n top_prev = np.expand_dims(top_prev,0) #(C,100,V)-> (1,C,100,V)\n coords = coords[:,2:][:,::-1].transpose() #coors in reverse order bzyx, pillar no need z (V,C)\n coords = np.expand_dims(coords,0) #(C,V)-> (1,C,V)\n coords = np.expand_dims(coords,2) #(1,C,V)-> (1,C,1,V)\n coords = np.repeat(coords, top_prev.shape[-2], 2) #repeat 100\n return top_prev, coords\n\nclass BCLReshapeV5(caffe.Layer):\n def setup(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape)\n top[1].reshape(*top_lattice.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n top_prev = bottom[0].data\n coords = bottom[1].data\n top_prev, top_lattice = self.reshape_func(top_prev, coords)\n top[0].reshape(*top_prev.shape) #top_prev\n top[0].data[...] = top_prev\n top[1].reshape(*top_lattice.shape) #top_lattice\n top[1].data[...] = top_lattice\n def backward(self, top, propagate_down, bottom):\n pass\n def reshape_func(self, top_prev, coords):\n top_prev = top_prev.transpose(2,1,0) #(V,N,C) -> (C,N,V)\n top_prev = np.expand_dims(top_prev,0) #(C,N,V)-> (1,C,N,V)\n coords = coords[:,2:][:,::-1].transpose() #coors in reverse order bzyx, pillar no need z (V,C)\n coords = np.expand_dims(coords,0) #(C,V)-> (1,C,V)\n coords = np.expand_dims(coords,2) #(1,C,V)-> (1,C,1,V)\n return top_prev, coords\n\nclass GlobalPooling(caffe.Layer):\n def setup(self, bottom, top):\n pass\n def reshape(self, bottom, top):\n n, c, p, h, w = bottom[0].data.shape\n top[0].reshape(*(n, c, h, w))\n def forward(self, bottom, top):\n n, c, p, h, w = bottom[0].data.shape\n self.max_loc = bottom[0].data.argmax(axis=2)\n top[0].data[...] = bottom[0].data.max(axis=2)\n def backward(self, top, propagate_down, bottom):\n n, c, h, w = top[0].diff.shape\n nn, cc, hh, ww = np.ix_(np.arange(n), np.arange(c), np.arange(h),np.arange(w))\n bottom[0].diff[...] = 0\n bottom[0].diff[nn, cc, self.max_loc, hh, ww] = top[0].diff\n\nclass LogLayer(caffe.Layer):\n def setup(self, bottom, top):\n in1 = bottom[0].data\n print(\"debug print\", in1)\n print(\"debug print\", in1.shape)\n top[0].reshape(*in1.shape)\n def reshape(self, bottom, top):\n pass\n def forward(self, bottom, top):\n in1 = bottom[0].data\n print(\"forward debug print\", in1)\n print(\"forward debug print\", in1.shape)\n top[0].reshape(*in1.shape)\n top[0].data[...] = in1\n pass\n def backward(self, top, propagate_down, bottom):\n pass\n\nclass ProbRenorm(caffe.Layer):\n def setup(self, bottom, top):\n pass\n\n def reshape(self, bottom, top):\n top[0].reshape(*bottom[0].data.shape)\n\n def forward(self, bottom, top):\n clipped = bottom[0].data * bottom[1].data\n self.sc = 1.0 / (np.sum(clipped, axis=1, keepdims=True) + 1e-10)\n top[0].data[...] = clipped * self.sc\n\n def backward(self, top, propagate_down, bottom):\n bottom[0].diff[...] = top[0].diff * bottom[1].data * self.sc\n\nclass PickAndScale(caffe.Layer):\n def setup(self, bottom, top):\n self.nch_out = len(self.param_str.split('_'))\n self.dims = []\n for f in self.param_str.split('_'):\n if f.find('*') >= 0:\n self.dims.append((int(f[:f.find('*')]), float(f[f.find('*') + 1:])))\n elif f.find('/') >= 0:\n self.dims.append((int(f[:f.find('/')]), 1.0 / float(f[f.find('/') + 1:])))\n\n else:\n self.dims.append((int(f), 1.0))\n\n def reshape(self, bottom, top):\n top[0].reshape(bottom[0].data.shape[0], self.nch_out, bottom[0].data.shape[2], bottom[0].data.shape[3])\n\n def forward(self, bottom, top):\n for i, (j, s) in enumerate(self.dims):\n top[0].data[:, i, :, :] = bottom[0].data[:, j, :, :] * s\n def backward(self, top, propagate_down, bottom):\n pass # TODO NOT_YET_IMPLEMENTED\n"
] | [
[
"numpy.expand_dims",
"numpy.minimum",
"numpy.squeeze",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"numpy.mean",
"numpy.exp",
"numpy.where",
"numpy.square",
"numpy.clip",
"numpy.unique",
"numpy.arange",
"numpy.eye",
"numpy.stack",
"numpy.sin",
"numpy.repeat",
"numpy.zeros",
"numpy.log",
"numpy.floor",
"numpy.array",
"numpy.sum",
"numpy.random.get_state",
"numpy.maximum",
"numpy.abs",
"numpy.random.seed",
"numpy.cos",
"numpy.sign"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carpedkm/vedatad | [
"55f8dced57f698ee9fc0da9bcf471d171e718d0c"
] | [
"vedacore/image/photometric.py"
] | [
"import cv2\nimport numpy as np\n\n\ndef imnormalize(img, mean, std, to_rgb=True):\n \"\"\"Normalize an image with mean and std.\n\n Args:\n img (ndarray): Image to be normalized.\n mean (ndarray): The mean to be used for normalize.\n std (ndarray): The std to be used for normalize.\n to_rgb (bool): Whether to convert to rgb.\n\n Returns:\n ndarray: The normalized image.\n \"\"\"\n img = img.copy().astype(np.float32)\n return imnormalize_(img, mean, std, to_rgb)\n\n\ndef imnormalize_(img, mean, std, to_rgb=True):\n \"\"\"Inplace normalize an image with mean and std.\n\n Args:\n img (ndarray): Image to be normalized.\n mean (ndarray): The mean to be used for normalize.\n std (ndarray): The std to be used for normalize.\n to_rgb (bool): Whether to convert to rgb.\n\n Returns:\n ndarray: The normalized image.\n \"\"\"\n # cv2 inplace normalization does not accept uint8\n assert img.dtype != np.uint8\n mean = np.float64(mean.reshape(1, -1))\n stdinv = 1 / np.float64(std.reshape(1, -1))\n if to_rgb:\n img = img[..., ::-1] # inplace\n img -= mean # inplace\n img *= stdinv # inplace\n return img\n\n\ndef imdenormalize(img, mean, std, to_bgr=True):\n assert img.dtype != np.uint8\n mean = mean.reshape(1, -1).astype(np.float64)\n std = std.reshape(1, -1).astype(np.float64)\n img = cv2.multiply(img, std) # make a copy\n cv2.add(img, mean, img) # inplace\n if to_bgr:\n cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace\n return img\n\n\ndef iminvert(img):\n \"\"\"Invert (negate) an image.\n\n Args:\n img (ndarray): Image to be inverted.\n\n Returns:\n ndarray: The inverted image.\n \"\"\"\n return np.full_like(img, 255) - img\n\n\ndef solarize(img, thr=128):\n \"\"\"Solarize an image (invert all pixel values above a threshold)\n\n Args:\n img (ndarray): Image to be solarized.\n thr (int): Threshold for solarizing (0 - 255).\n\n Returns:\n ndarray: The solarized image.\n \"\"\"\n img = np.where(img < thr, img, 255 - img)\n return img\n\n\ndef posterize(img, bits):\n \"\"\"Posterize an image (reduce the number of bits for each color channel)\n\n Args:\n img (ndarray): Image to be posterized.\n bits (int): Number of bits (1 to 8) to use for posterizing.\n\n Returns:\n ndarray: The posterized image.\n \"\"\"\n shift = 8 - bits\n img = np.left_shift(np.right_shift(img, shift), shift)\n return img\n"
] | [
[
"numpy.full_like",
"numpy.right_shift",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PranavBharadwaj-1328/Image_Filtering_methods | [
"a608c81a47f85adb38604f8f8d9503f5bf6555f7"
] | [
"tophat.py"
] | [
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef openimg():\n F = input()\n img = cv2.imread(F,0)\n kernel = np.ones((5,5),np.uint8)\n opening = cv2.morphologyEx(img,cv2.MORPH_TOPHAT,kernel)\n plt.subplot(121),plt.imshow(img)\n plt.title('Original'),plt.xticks([]),plt.yticks([])\n plt.subplot(122),plt.imshow(opening)\n plt.title('Tophat image'),plt.xticks([]),plt.yticks([])\n plt.show()\nopenimg()\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.ones",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
imanpalsingh/projection-pursuit | [
"307ad765d447e81dce909dfa9778db1610704315"
] | [
"skpp/tests/test_skpp.py"
] | [
"# run with python(3) -m pytest\n\nimport numpy\nimport pytest\nimport time\n\nfrom sklearn.utils import estimator_checks\nfrom sklearn.utils.testing import assert_equal\nfrom sklearn.utils.testing import assert_less\nfrom sklearn.utils.testing import assert_almost_equal\nfrom sklearn.utils.testing import assert_array_less\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.utils.testing import assert_array_almost_equal\nfrom sklearn.utils.testing import assert_raise_message\nfrom sklearn.utils.testing import assert_raises\nfrom sklearn.utils.testing import assert_warns_message\n# https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/testing.py\n\nfrom ..skpp import ProjectionPursuitRegressor, ProjectionPursuitClassifier\n\ndef test_regressor_passes_sklearn_checks():\n\t#estimator_checks.MULTI_OUTPUT.append('ProjectionPursuitRegressor')\n\testimator_checks.check_estimator(ProjectionPursuitRegressor)\n\ndef test_classifier_passes_sklearn_checks():\n\testimator_checks.check_estimator(ProjectionPursuitClassifier)\n\ndef test_construction_errors():\n\tassert_raises(ValueError, ProjectionPursuitRegressor, r=0)\n\tassert_raises(NotImplementedError, ProjectionPursuitRegressor, fit_type='jabberwocky')\n\tassert_raises(ValueError, ProjectionPursuitRegressor, degree='master')\n\tassert_raises(ValueError, ProjectionPursuitRegressor, opt_level='near')\n\tassert_raises(ValueError, ProjectionPursuitRegressor, example_weights='light')\n\tassert_raises(ValueError, ProjectionPursuitRegressor, example_weights=numpy.array([-1]))\n\tassert_raises(ValueError, ProjectionPursuitRegressor, out_dim_weights='heavy')\n\tassert_raises(ValueError, ProjectionPursuitRegressor, out_dim_weights=numpy.array([-1]))\n\tassert_raises(ValueError, ProjectionPursuitRegressor, eps_stage=-0.1)\n\tassert_raises(ValueError, ProjectionPursuitRegressor, stage_maxiter=0)\n\tassert_raises(ValueError, ProjectionPursuitClassifier, pairwise_loss_matrix=numpy.array([-1]))\n\tassert_raises(ValueError, ProjectionPursuitClassifier, pairwise_loss_matrix=numpy.array([1]))\n\tassert_raises(ValueError, ProjectionPursuitClassifier, pairwise_loss_matrix='whereami?')\n\ndef test_fit_errors():\n\tppc = ProjectionPursuitClassifier(example_weights=numpy.array([1, 2]))\n\tppr = ProjectionPursuitRegressor(example_weights=numpy.array([1,2]),\n\t\tout_dim_weights=numpy.array([3]))\n\tX = numpy.random.randn(5, 2)\n\tY = numpy.array([0, 0, 1, 1, 1])\n\tassert_raises(ValueError, ppc.fit, X, Y)\n\tassert_raises(ValueError, ppr.fit, X, Y)\n\tX = numpy.random.randn(2, 2)\n\tY = numpy.eye(2)\n\tassert_raises(ValueError, ppc.fit, X, Y)\n\tassert_raises(ValueError, ppr.fit, X, Y)\n\ndef test_example_weightings_applied():\n\t# Construct a 1D example constrained to deg=2. No polynomial of such low\n\t# order can go through all the points, so weights determine which should be\n\t# fit more closely.\n\tX = numpy.array([[-1],[-0.9],[0],[0.9],[1]])# on a number line\n\tY = numpy.array([0, 1, 1, 1, 0])# the targets for these points\n\n\tL = numpy.array([[0, 10], [1, 0]])\n\n\t# If given the following example weightings, the rounded predictions at the\n\t# points queried should end up looking like the corresponding targets.\n\texample_weights = numpy.array([[1, 1, 1, 1, 1], [1, 100, 100, 100, 1],\n\t\t[100, 1, 1, 1, 100], [10, 1, 1, 10, 1]])\n\ttargets = numpy.array([[0, 0, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1],\n\t\t[0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1]])\n\n\tfor i in range(example_weights.shape[0]):\n\t\tppr = ProjectionPursuitRegressor(degree=2,\n\t\t\texample_weights=example_weights[i,:])\n\t\tppr.fit(X, Y)\n\n\t\tppc = ProjectionPursuitClassifier(degree=2,\n\t\t\texample_weights=example_weights[i,:], pairwise_loss_matrix=L)\n\t\tppc.fit(X, Y)\n\n\t\tpredictions = numpy.round(ppr.predict(numpy.array([[-1], [-0.95], [-0.9],\n\t\t\t[0], [0.9], [0.95], [1]])))\n\n\t\tassert_array_equal(predictions, targets[i,:])\n\ndef test_ppr_learns():\n\t# Generate some dummy data, X random, Y an additive-model-like construction\n\tn = 1000\n\td = 4\n\tp = 10\n\n\tX = numpy.random.rand(n, p) - 0.5\n\tY = numpy.zeros((n, d))\n\tfor j in range(5):\n\t\talpha = numpy.random.randn(p) # projection vector\n\t\tprojection = numpy.dot(X, alpha)\n\t\t# Generate random polynomials with coefficients in [-100, 100]\n\t\tf = numpy.poly1d(numpy.random.randint(-100, 100,\n\t\t\tsize=numpy.random.randint(3+1)))\n\t\tbeta = numpy.random.randn(d) # expansion vector\n\t\tY += numpy.outer(f(projection), beta)\n\n\t# Divide the data\n\ttemp = numpy.arange(n)\n\tnumpy.random.shuffle(temp)\n\ttraining = temp[0:int(n*0.8)]\n\ttesting = temp[int(n*0.8):]\n\n\tmse_per_element = numpy.sum(Y**2)/Y.size\n\tprint('Average magnitude of squared Y per element', mse_per_element)\n\n\testimators = [ProjectionPursuitRegressor(r=20, fit_type='polyfit', degree=3,\n\t\topt_level='high'), ProjectionPursuitRegressor(out_dim_weights='uniform',\n\t\tfit_type='spline', opt_level='medium')]\n\taccuracies = [mse_per_element/1000000, mse_per_element/100]\n\n\tfor i in range(len(estimators)):\n\t\t\n\t\tprint('training')\n\t\tbefore = time.time()\n\t\testimators[i].fit(X[training, :], Y[training, :])\n\t\tafter = time.time()\n\t\tprint('finished in', after-before, 'seconds')\n\n\t\tYhat = estimators[i].predict(X[training, :])\n\t\ttrain_error = numpy.sum((Y[training, :] - Yhat)**2)/Y[training, :].size\n\t\tprint('Average magnitude of squared error in training data per element',\n\t\t\ttrain_error)\n\n\t\tYhat = estimators[i].predict(X[testing, :])\n\t\ttest_error = numpy.sum((Y[testing, :] - Yhat)**2)/Y[testing, :].size\n\t\tprint('Average magnitude of squared error in testing data per element',\n\t\t\ttest_error)\n\n\t\tassert_less(train_error, accuracies[i])\n\t\tassert_less(test_error, accuracies[i])\n"
] | [
[
"numpy.dot",
"numpy.arange",
"numpy.eye",
"sklearn.utils.testing.assert_raises",
"numpy.random.shuffle",
"sklearn.utils.testing.assert_less",
"numpy.random.randn",
"sklearn.utils.estimator_checks.check_estimator",
"numpy.random.rand",
"sklearn.utils.testing.assert_array_equal",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
c-d-leonard/N5K | [
"99d844621f2436aaf56fc98484e309043d4b7bd1"
] | [
"timer.py"
] | [
"import sys\nsys.path.append(\"fftlogx/\")\nimport numpy as np\nimport time\nimport n5k\n\n\ndef time_run(cls, config, niter):\n c = cls(config)\n c.setup()\n ts = np.zeros(niter+1)\n for i in range(niter+1):\n t0 = time.time()\n c.run()\n tf = time.time()\n ts[i] = tf-t0\n print('t=', ts[i])\n tmean = np.mean(ts[1:])\n terr = np.std(ts[1:])/np.sqrt(niter)\n c.write_output()\n c.teardown()\n print('%s: t=(%f+-%f)s'%(cls.name,tmean,terr))\n return ts\n\n\nconf = sys.argv[1]\nname = sys.argv[2]\nniter = int(sys.argv[3])\nfname_out = sys.argv[4]\n\ncalc = n5k.n5k_calculator_from_name(name)\n\ntimes = time_run(calc, conf, niter)\n\nif fname_out != 'none':\n np.savez(fname_out, times=times)\n"
] | [
[
"numpy.savez",
"numpy.sqrt",
"numpy.std",
"numpy.mean",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AGrigis/pynet | [
"d0e6a3e6e954ae0e59fddfe85fe12ce0ef1e6fe4"
] | [
"pynet/plotting/image.py"
] | [
"# -*- coding: utf-8 -*-\n##########################################################################\n# NSAp - Copyright (C) CEA, 2019\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# Import\nimport numpy as np\nfrom pyqtgraph.Qt import QtGui\nimport pyqtgraph\n\n\ndef plot_data(data, extradata=None, scroll_axis=2):\n \"\"\" Plot an image associated data.\n Currently support on 1D, 2D or 3D data.\n\n Parameters\n ----------\n data: array\n the data to be displayed.\n extradata: list of array\n if specified concatenate this array with the input data.\n scroll_axis: int (optional, default 2)\n the scroll axis for 3D data.\n \"\"\"\n # Check input parameters\n if data.ndim not in range(1, 4):\n raise ValueError(\"Unsupported data dimension.\")\n\n # Concatenate\n if extradata is not None:\n concat_axis = 0 if scroll_axis != 0 else 1\n extradata = [\n rescale_intensity(\n arr=_data,\n in_range=(_data.min(), _data.max()),\n out_range=(data.min(), data.max()))\n for _data in extradata]\n data = np.concatenate([data] + extradata, axis=concat_axis)\n\n # Create application\n app = pyqtgraph.mkQApp()\n\n # Create the widget\n if data.ndim == 3:\n indices = [i for i in range(3) if i != scroll_axis]\n indices = [scroll_axis] + indices\n widget = pyqtgraph.image(np.transpose(data, indices))\n elif data.ndim == 2:\n widget = pyqtgraph.image(data)\n else:\n widget = pyqtgraph.plot(data)\n\n # Run application\n app.exec_()\n\n\ndef rescale_intensity(arr, in_range, out_range):\n \"\"\" Return arr after stretching or shrinking its intensity levels.\n\n Parameters\n ----------\n arr: array\n input array.\n in_range, out_range: 2-tuple\n min and max intensity values of input and output arr.\n\n Returns\n -------\n out: array\n array after rescaling its intensity.\n \"\"\"\n imin, imax = in_range\n omin, omax = out_range\n out = np.clip(arr, imin, imax)\n out = (out - imin) / float(imax - imin)\n return out * (omax - omin) + omin\n"
] | [
[
"numpy.concatenate",
"numpy.transpose",
"numpy.clip"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TUDelft-CITG/OpenTNSim | [
"7d3566c9027fe6874b9196e03aafd70e4f5919f5"
] | [
"opentnsim/corelock2.py"
] | [
"\"\"\"Main module.\"\"\"\n\n# package(s) related to time, space and id\nimport json\nimport logging\nimport uuid\n\n# you need these dependencies (you can get these from anaconda)\n# package(s) related to the simulation\nimport simpy\nimport random\nimport networkx as nx\nimport numpy as np\n\n# spatial libraries\nimport pyproj\nimport shapely.geometry\n\n# additional packages\nimport datetime, time\n\nlogger = logging.getLogger(__name__)\n\n\nclass SimpyObject:\n \"\"\"General object which can be extended by any class requiring a simpy environment\n env: a simpy Environment\n \"\"\"\n\n def __init__(self, env, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.env = env\n\n\nclass HasResource(SimpyObject):\n \"\"\"Something that has a resource limitation, a resource request must be granted before the object can be used.\n nr_resources: nr of requests that can be handled simultaneously\"\"\"\n\n def __init__(self, nr_resources=1, priority=False, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.resource = (\n simpy.PriorityResource(self.env, capacity=nr_resources)\n if priority\n else simpy.Resource(self.env, capacity=nr_resources)\n )\n\n\nclass Identifiable:\n \"\"\"Mixin class: Something that has a name and id\n name: a name\n id: a unique id generated with uuid\"\"\"\n\n def __init__(self, name, id=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.name = name\n # generate some id, in this case based on m\n self.id = id if id else str(uuid.uuid1())\n\n\nclass Locatable:\n \"\"\"Mixin class: Something with a geometry (geojson format)\n geometry: can be a point as well as a polygon\"\"\"\n\n def __init__(self, geometry, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.geometry = geometry\n self.node = None\n\n\nclass Neighbours:\n \"\"\"Can be added to a locatable object (list)\n travel_to: list of locatables to which can be travelled\"\"\"\n\n def ___init(self, travel_to, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.neighbours = travel_to\n\n\nclass HasContainer(SimpyObject):\n \"\"\"Mixin class: Something with a storage capacity\n capacity: amount the container can hold\n level: amount the container holds initially\n container: a simpy object that can hold stuff\n total_requested: a counter that helps to prevent over requesting\"\"\"\n\n def __init__(self, capacity, level=0, total_requested=0, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.container = simpy.Container(self.env, capacity, init=level)\n self.total_requested = total_requested\n\n @property\n def is_loaded(self):\n return True if self.container.level > 0 else False\n\n @property\n def filling_degree(self):\n return self.container.level / self.container.capacity\n\n\nclass Log(SimpyObject):\n \"\"\"Mixin class: Something that has logging capability\n log: log message [format: 'start activity' or 'stop activity']\n t: timestamp\n value: a value can be logged as well\n geometry: value from locatable (lat, lon)\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.log = {\"Message\": [], \"Timestamp\": [], \"Value\": [], \"Geometry\": []}\n\n def log_entry(self, log, t, value, geometry_log):\n \"\"\"Log\"\"\"\n self.log[\"Message\"].append(log)\n self.log[\"Timestamp\"].append(datetime.datetime.fromtimestamp(t))\n self.log[\"Value\"].append(value)\n self.log[\"Geometry\"].append(geometry_log)\n\n def get_log_as_json(self):\n json = []\n for msg, t, value, geometry_log in zip(\n self.log[\"Message\"],\n self.log[\"Timestamp\"],\n self.log[\"Value\"],\n self.log[\"Geometry\"],\n ):\n json.append(\n dict(message=msg, time=t, value=value, geometry_log=geometry_log)\n )\n return json\n\n\nclass VesselProperties:\n \"\"\"Mixin class: Something that has vessel properties\n vessel_type: can contain info on vessel type (avv class, cemt_class or other)\n width: vessel width\n length: vessel length\n height_empty: vessel height unloaded\n height_full: vessel height loaded\n draught_empty: draught unloaded\n draught_full: draught loaded\n Add information on possible restrictions to the vessels, i.e. height, width, etc.\n \"\"\"\n\n def __init__(\n self,\n vessel_type,\n width,\n length,\n height_empty,\n height_full,\n draught_empty,\n draught_full,\n *args,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n\n \"\"\"Initialization\"\"\"\n self.vessel_type = vessel_type\n\n self.width = width\n self.length = length\n\n self.height_empty = height_empty\n self.height_full = height_full\n\n self.draught_empty = draught_empty\n self.draught_full = draught_full\n\n @property\n def current_height(self):\n \"\"\" Calculate current height based on filling degree \"\"\"\n\n return (\n self.filling_degree * (self.height_full - self.height_empty)\n + self.height_empty\n )\n\n @property\n def current_draught(self):\n \"\"\" Calculate current draught based on filling degree \"\"\"\n\n return (\n self.filling_degree * (self.draught_full - self.draught_empty)\n + self.draught_empty\n )\n\n def get_route(\n self,\n origin,\n destination,\n graph=None,\n minWidth=None,\n minHeight=None,\n minDepth=None,\n randomSeed=4,\n ):\n \"\"\" Calculate a path based on vessel restrictions \"\"\"\n\n graph = graph if graph else self.env.FG\n minWidth = minWidth if minWidth else 1.1 * self.width\n minHeight = minWidth if minHeight else 1.1 * self.current_height\n minDepth = minWidth if minDepth else 1.1 * self.current_draught\n\n # Check if information on restrictions is added to the edges\n random.seed(randomSeed)\n edge = random.choice(list(graph.edges(data=True)))\n edge_attrs = list(edge[2].keys())\n\n # IMPROVE THIS TO CHECK ALL EDGES AND COMBINATIONS OF RESTRICTIONS\n\n if all(item in edge_attrs for item in [\"Width\", \"Height\", \"Depth\"]):\n edges = []\n nodes = []\n\n for edge in graph.edges(data=True):\n if (\n edge[2][\"Width\"] >= minWidth\n and edge[2][\"Height\"] >= minHeight\n and edge[2][\"Depth\"] >= minDepth\n ):\n edges.append(edge)\n\n nodes.append(graph.nodes[edge[0]])\n nodes.append(graph.nodes[edge[1]])\n\n subGraph = graph.__class__()\n\n for node in nodes:\n subGraph.add_node(\n node[\"name\"],\n name=node[\"name\"],\n geometry=node[\"geometry\"],\n position=(node[\"geometry\"].x, node[\"geometry\"].y),\n )\n\n for edge in edges:\n subGraph.add_edge(edge[0], edge[1], attr_dict=edge[2])\n\n try:\n return nx.dijkstra_path(subGraph, origin, destination)\n except:\n raise ValueError(\n \"No path was found with the given boundary conditions.\"\n )\n\n # If not, return shortest path\n else:\n return nx.dijkstra_path(graph, origin, destination)\n\n\nclass HasEnergy:\n \"\"\"Mixin class: Something that has energy usage.\n installed_power: installed engine power [kW]\n resistance: Rtot unloaded [N]\n resistance_empty: Rtot loaded [N]\n emissionfactor: emission factor [-]\n \"\"\"\n\n def __init__(self, installed_power, resistance, resistance_empty, emissionfactor, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n \"\"\"Initialization\"\"\"\n self.installed_power = installed_power\n self.resistance = resistance\n self.resistance_empty = resistance_empty\n self.emissionfactor = emissionfactor\n\n\nclass Routeable:\n \"\"\"Mixin class: Something with a route (networkx format)\n route: a networkx path\"\"\"\n\n def __init__(self, route, complete_path=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.route = route\n self.complete_path = complete_path\n\nclass IsLockWaitingArea(HasResource, Identifiable, Log):\n \"\"\"Mixin class: Something has lock object properties\n properties in meters\n operation in seconds\n \"\"\"\n\n def __init__(\n self,\n node,\n *args,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n \n waiting_area_resources = 100\n self.waiting_area = {\n node: simpy.Resource(self.env, capacity=waiting_area_resources),\n }\n \n departure_resources = 2\n self.departure = {\n node: simpy.PriorityResource(self.env, capacity=departure_resources),\n }\n \nclass IsLockLineUpArea(HasResource, Identifiable, Log):\n \"\"\"Mixin class: Something has lock object properties\n properties in meters\n operation in seconds\n \"\"\"\n\n def __init__(\n self,\n node,\n *args,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n \n self.lock_queue_length = 0\n \n # Lay-Out\n self.line_up_area = {\n node: simpy.Resource(self.env, capacity=1),\n }\n \n departure_resources = 1\n self.departure = {\n node: simpy.Resource(self.env, capacity=departure_resources),\n }\n\nclass IsLock(HasResource, Identifiable, Log):\n \"\"\"Mixin class: Something has lock object properties\n properties in meters\n operation in seconds\n \"\"\"\n\n def __init__(\n self,\n node_1,\n node_2,\n node_3,\n lock_length,\n lock_width,\n lock_depth,\n doors_open,\n doors_close,\n operating_time,\n waiting_area=True,\n *args,\n **kwargs\n ):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n \n # Properties\n self.lock_length = lock_length\n self.lock_width = lock_width\n self.lock_depth = lock_depth\n\n # Operating\n self.doors_open = doors_open\n self.doors_close = doors_close\n self.operating_time = operating_time\n\n # Water level\n assert node_1 != node_3\n\n self.node_1 = node_1\n self.node_3 = node_3\n self.water_level = random.choice([node_1, node_3])\n\n def convert_chamber(self, environment, new_level):\n \"\"\" Convert the water level \"\"\"\n # Close the doors\n self.log_entry(\"Lock doors closing start\", environment.now, self.water_level, 0)\n yield environment.timeout(self.doors_close)\n self.log_entry(\"Lock doors closing stop\", environment.now, self.water_level, 0)\n\n # Convert the chamber\n self.log_entry(\n \"Lock chamber converting start\", environment.now, self.water_level, 0\n )\n \n # Water level will shift\n self.change_water_level(new_level)\n \n yield environment.timeout(self.operating_time)\n self.log_entry(\n \"Lock chamber converting stop\", environment.now, self.water_level, 0\n )\n\n # Open the doors\n self.log_entry(\"Lock doors opening start\", environment.now, self.water_level, 0)\n yield environment.timeout(self.doors_open)\n self.log_entry(\"Lock doors opening stop\", environment.now, self.water_level, 0)\n\n def change_water_level(self, side):\n \"\"\" Change water level and priorities in queue \"\"\"\n\n self.water_level = side\n\n for request in self.resource.queue:\n request.priority = -1 if request.priority == 0 else 0\n\n if request.priority == -1:\n self.resource.queue.insert(\n 0, self.resource.queue.pop(self.resource.queue.index(request))\n )\n else:\n self.resource.queue.insert(\n -1, self.resource.queue.pop(self.resource.queue.index(request))\n )\n\n\nclass Movable(Locatable, Routeable, Log):\n \"\"\"Mixin class: Something can move\n Used for object that can move with a fixed speed\n geometry: point used to track its current location\n v: speed\"\"\"\n\n def __init__(self, v=1, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.v = v\n self.wgs84 = pyproj.Geod(ellps=\"WGS84\")\n\n def move(self):\n \"\"\"determine distance between origin and destination, and\n yield the time it takes to travel it\n Assumption is that self.path is in the right order - vessel moves from route[0] to route[-1].\n \"\"\"\n self.distance = 0\n speed = self.v\n # Check if vessel is at correct location - if not, move to location\n if (\n self.geometry\n != nx.get_node_attributes(self.env.FG, \"geometry\")[self.route[0]]\n ):\n orig = self.geometry\n dest = nx.get_node_attributes(self.env.FG, \"geometry\")[self.route[0]]\n\n print(\"Origin\", orig)\n print(\"Destination\", dest)\n\n self.distance += self.wgs84.inv(\n shapely.geometry.asShape(orig).x,\n shapely.geometry.asShape(orig).y,\n shapely.geometry.asShape(dest).x,\n shapely.geometry.asShape(dest).y,\n )[2]\n\n yield self.env.timeout(self.distance / self.current_speed)\n self.log_entry(\"Sailing to start\", self.env.now, self.distance, dest)\n \n \n # Move over the path and log every step\n for node in enumerate(self.route):\n self.node = node[1]\n\n if node[0] + 2 <= len(self.route):\n origin = self.route[node[0]]\n destination = self.route[node[0] + 1]\n \n \n if \"Waiting area\" in self.env.FG.nodes[destination].keys():\n locks = self.env.FG.nodes[destination][\"Waiting area\"]\n for lock in locks:\n lock \n loc = self.route.index(destination)\n for r in self.route[loc:]:\n if 'Line-up area' in self.env.FG.nodes[r].keys(): \n wait_for_waiting_area = self.env.now\n access_waiting_area = lock.waiting_area[destination].request()\n yield access_waiting_area \n \n if wait_for_waiting_area != self.env.now:\n waiting = self.env.now - wait_for_waiting_area\n self.log_entry(\"Waiting to enter waiting area start\", wait_for_waiting_area, 0, nx.get_node_attributes(self.env.FG, \"geometry\")[origin],)\n self.log_entry(\"Waiting to enter waiting area stop\", self.env.now, waiting, nx.get_node_attributes(self.env.FG, \"geometry\")[origin],)\n \n if \"Waiting area\" in self.env.FG.nodes[origin].keys(): \n locks = self.env.FG.nodes[origin][\"Waiting area\"]\n for lock in locks:\n if 'departure_area' in locals():\n lock.departure[origin].release(departure_area)\n else:\n loc = self.route.index(origin)\n for r in self.route[loc:]:\n if 'Line-up area' in self.env.FG.nodes[r].keys():\n locks2 = self.env.FG.nodes[r][\"Line-up area\"]\n break\n \n for r2 in self.route[loc:]:\n if 'Lock' in self.env.FG.nodes[r2].keys():\n locks3 = self.env.FG.nodes[r2][\"Lock\"]\n break\n \n self.lock_name = []\n for lock3 in locks3:\n if lock3.water_level == self.route[self.route.index(r2)-1]:\n for lock2 in locks2:\n if lock2.name == lock3.name:\n if lock2.lock_queue_length == 0:\n self.lock_name = lock3.name\n break\n \n lock_queue_length = [];\n if self.lock_name == []:\n for lock2 in locks2:\n lock_queue_length.append(lock2.lock_queue_length)\n \n self.lock_name = locks2[lock_queue_length.index(min(lock_queue_length))].name\n \n for lock2 in locks2:\n if lock2.name == self.lock_name:\n lock2.lock_queue_length += 1\n \n for lock2 in locks2:\n if lock2.name == self.lock_name: \n self.v = 0.5*speed\n break\n \n wait_for_lineup_area = self.env.now\n lock.waiting_area[origin].release(access_waiting_area)\n access_lineup_area = lock2.line_up_area[r].request()\n yield access_lineup_area\n access_departure_waiting_area = lock.departure[origin].request()\n yield access_departure_waiting_area\n \n if wait_for_lineup_area != self.env.now:\n self.v = 0.25*speed\n waiting = self.env.now - wait_for_lineup_area\n self.log_entry(\"Waiting in waiting area start\", wait_for_lineup_area, 0, nx.get_node_attributes(self.env.FG, \"geometry\")[origin])\n self.log_entry(\"Waiting in waiting area stop\", self.env.now, waiting, nx.get_node_attributes(self.env.FG, \"geometry\")[origin]) \n \n lock.departure[origin].release(access_departure_waiting_area)\n \n if \"Line-up area\" in self.env.FG.nodes[origin].keys(): \n locks = self.env.FG.nodes[origin][\"Line-up area\"]\n for lock in locks:\n if lock.name == self.lock_name:\n if 'departure_lock' in locals():\n lock.departure[origin].release(departure_lock)\n else:\n loc = self.route.index(origin)\n for r in self.route[loc:]:\n if 'Lock' in self.env.FG.nodes[r].keys():\n locks = self.env.FG.nodes[r][\"Lock\"]\n for lock2 in locks:\n if lock2.name == self.lock_name:\n self.v = 0.25*speed\n wait_for_lock_entry = self.env.now\n if lock2.resource.users != []:\n yield self.env.timeout(lock2.doors_close) \n yield self.env.timeout(lock2.operating_time) \n \n access_lock = lock2.resource.request(priority=-1 if self.route[self.route.index(r)-1] == lock2.water_level else 0)\n yield access_lock\n access_departure_lineup_area = lock.departure[origin].request()\n yield access_departure_lineup_area\n \n if self.route[self.route.index(r)-1] != lock2.water_level:\n yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1])\n \n for r2 in self.route[(loc+1):]:\n if 'Line-up area' in self.env.FG.nodes[r2].keys():\n locks = self.env.FG.nodes[r2][\"Line-up area\"]\n for lock3 in locks:\n if lock3.name == self.lock_name:\n departure_lock = lock3.departure[r2].request()\n yield departure_lock\n \n for r3 in self.route[(loc+1):]:\n if 'Waiting area' in self.env.FG.nodes[r3].keys():\n locks = self.env.FG.nodes[r3][\"Waiting area\"]\n for lock4 in locks:\n departure_area = lock4.departure[r3].request(priority=-1)\n yield departure_area\n \n if wait_for_lock_entry != self.env.now:\n waiting = self.env.now - wait_for_lock_entry\n self.log_entry(\"Waiting in line-up area start\", wait_for_lock_entry, 0, nx.get_node_attributes(self.env.FG, \"geometry\")[origin])\n self.log_entry(\"Waiting in line-up area stop\", self.env.now, waiting, nx.get_node_attributes(self.env.FG, \"geometry\")[origin]) \n \n lock.line_up_area[origin].release(access_lineup_area)\n lock.departure[origin].release(access_departure_lineup_area) \n\n for r4 in self.route[:(loc-1)]:\n if 'Line-up area' in self.env.FG.nodes[r4].keys():\n locks = self.env.FG.nodes[r4][\"Line-up area\"]\n for lock4 in locks:\n if lock4.name == self.lock_name:\n lock4.lock_queue_length -= 1 \n \n if \"Lock\" in self.env.FG.nodes[origin].keys():\n locks = self.env.FG.nodes[origin][\"Lock\"] \n for lock in locks:\n if lock.name == self.lock_name:\n self.log_entry(\"Passing lock start\", self.env.now, 0, nx.get_node_attributes(self.env.FG, \"geometry\")[origin])\n yield from lock.convert_chamber(self.env, destination)\n \n lock.resource.release(access_lock)\n passage_time = lock.doors_close + lock.operating_time + lock.doors_open\n self.log_entry(\"Passing lock stop\", self.env.now, passage_time, nx.get_node_attributes(self.env.FG, \"geometry\")[origin],)\n yield from self.pass_edge(origin, destination)\n self.v = speed\n \n else:\n # print('I am going to go to the next node {}'.format(destination)) \n yield from self.pass_edge(origin, destination)\n\n if node[0] + 2 == len(self.route):\n break\n\n # self.geometry = nx.get_node_attributes(self.env.FG, \"geometry\")[destination]\n\n logger.debug(\" distance: \" + \"%4.2f\" % self.distance + \" m\")\n logger.debug(\" sailing: \" + \"%4.2f\" % self.current_speed + \" m/s\")\n logger.debug(\n \" duration: \"\n + \"%4.2f\" % ((self.distance / self.current_speed) / 3600)\n + \" hrs\"\n )\n\n def pass_edge(self, origin, destination):\n edge = self.env.FG.edges[origin, destination]\n orig = nx.get_node_attributes(self.env.FG, \"geometry\")[origin]\n dest = nx.get_node_attributes(self.env.FG, \"geometry\")[destination]\n\n if 'geometry' in edge:\n edge_route = np.array(edge['geometry'])\n\n # check if edge is in the sailing direction, otherwise flip it\n distance_from_start = self.wgs84.inv(\n orig.x,\n orig.y,\n edge_route[0][0],\n edge_route[0][1],\n )[2]\n distance_from_stop = self.wgs84.inv(\n orig.x,\n orig.y,\n edge_route[-1][0],\n edge_route[-1][1],\n )[2]\n if distance_from_start>distance_from_stop:\n # when the distance from the starting point is greater than from the end point\n edge_route = np.flipud(np.array(edge['geometry']))\n\n for index, pt in enumerate(edge_route[:-1]):\n sub_orig = shapely.geometry.Point(edge_route[index][0], edge_route[index][1])\n sub_dest = shapely.geometry.Point(edge_route[index+1][0], edge_route[index+1][1])\n\n distance = self.wgs84.inv(\n shapely.geometry.asShape(sub_orig).x,\n shapely.geometry.asShape(sub_orig).y,\n shapely.geometry.asShape(sub_dest).x,\n shapely.geometry.asShape(sub_dest).y,\n )[2]\n self.distance += distance\n self.log_entry(\"Sailing from node {} to node {} sub edge {} start\".format(origin, destination, index), self.env.now, 0, sub_orig,)\n yield self.env.timeout(distance / self.current_speed)\n self.log_entry(\"Sailing from node {} to node {} sub edge {} stop\".format(origin, destination, index), self.env.now, 0, sub_dest,)\n self.geometry = dest\n # print(' My new origin is {}'.format(destination))\n else:\n distance = self.wgs84.inv(\n shapely.geometry.asShape(orig).x,\n shapely.geometry.asShape(orig).y,\n shapely.geometry.asShape(dest).x,\n shapely.geometry.asShape(dest).y,\n )[2]\n\n self.distance += distance\n arrival = self.env.now\n\n # Act based on resources\n if \"Resources\" in edge.keys():\n with self.env.FG.edges[origin, destination][\"Resources\"].request() as request:\n yield request\n\n if arrival != self.env.now:\n self.log_entry(\"Waiting to pass edge {} - {} start\".format(origin, destination), arrival, 0, orig,)\n self.log_entry(\"Waiting to pass edge {} - {} stop\".format(origin, destination), self.env.now, 0, orig,)\n\n self.log_entry(\"Sailing from node {} to node {} start\".format(origin, destination), self.env.now, 0, orig,)\n yield self.env.timeout(distance / self.current_speed)\n self.log_entry(\"Sailing from node {} to node {} stop\".format(origin, destination), self.env.now, 0, dest,)\n\n else:\n self.log_entry(\"Sailing from node {} to node {} start\".format(origin, destination), self.env.now, 0, orig,)\n yield self.env.timeout(distance / self.current_speed)\n self.log_entry(\"Sailing from node {} to node {} stop\".format(origin, destination), self.env.now, 0, dest,)\n \n @property\n def current_speed(self):\n return self.v\n\n\nclass ContainerDependentMovable(Movable, HasContainer):\n \"\"\"ContainerDependentMovable class\n Used for objects that move with a speed dependent on the container level\n compute_v: a function, given the fraction the container is filled (in [0,1]), returns the current speed\"\"\"\n\n def __init__(self, compute_v, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \"\"\"Initialization\"\"\"\n self.compute_v = compute_v\n self.wgs84 = pyproj.Geod(ellps=\"WGS84\")\n\n @property\n def current_speed(self):\n return self.compute_v(self.container.level / self.container.capacity)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
radroid/project-one-sentiment-analysis | [
"cff3ebfe7a2d3ab6bc4fa6b93669aee995d5b43b"
] | [
"train/train.py"
] | [
"import argparse\nimport json\nimport os\nimport pickle\nimport sys\nimport sagemaker_containers\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.utils.data\n\nfrom model import LSTMClassifier\n\ndef model_fn(model_dir):\n \"\"\"Load the PyTorch model from the `model_dir` directory.\"\"\"\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the stored model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved word_dict.\n word_dict_path = os.path.join(model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'rb') as f:\n model.word_dict = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model\n\ndef _get_train_data_loader(batch_size, training_dir):\n print(\"Get train data loader.\")\n\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n train_y = torch.from_numpy(train_data[[0]].values).float().squeeze()\n train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long()\n\n train_ds = torch.utils.data.TensorDataset(train_X, train_y)\n\n return torch.utils.data.DataLoader(train_ds, batch_size=batch_size)\n\n\ndef train(model, train_loader, epochs, optimizer, loss_fn, device):\n \"\"\"\n This is the training method that is called by the PyTorch training script. The parameters\n passed are as follows:\n model - The PyTorch model that we wish to train.\n train_loader - The PyTorch DataLoader that should be used during training.\n epochs - The total number of epochs to train for.\n optimizer - The optimizer to use during training.\n loss_fn - The loss function used for training.\n device - Where the model and data should be loaded (gpu or cpu).\n \"\"\"\n for epoch in range(1, epochs + 1):\n model.train()\n total_loss = 0 \n for batch in train_loader: \n batch_X, batch_y = batch\n \n batch_X = batch_X.to(device)\n batch_y = batch_y.to(device)\n \n # TODO: Complete this train method to train the model provided.\n predictions = model(batch_X)\n loss = loss_fn(predictions, batch_y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n total_loss += loss.data.item()\n print(\"Epoch: {}, BCELoss: {}\".format(epoch, total_loss / len(train_loader)))\n\n\nif __name__ == '__main__':\n # All of the model parameters and training parameters are sent as arguments when the script\n # is executed. Here we set up an argument parser to easily access the parameters.\n\n parser = argparse.ArgumentParser()\n\n # Training Parameters\n parser.add_argument('--batch-size', type=int, default=512, metavar='N',\n help='input batch size for training (default: 512)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n\n # Model Parameters\n parser.add_argument('--embedding_dim', type=int, default=32, metavar='N',\n help='size of the word embeddings (default: 32)')\n parser.add_argument('--hidden_dim', type=int, default=100, metavar='N',\n help='size of the hidden dimension (default: 100)')\n parser.add_argument('--vocab_size', type=int, default=5000, metavar='N',\n help='size of the vocabulary (default: 5000)')\n\n # SageMaker Parameters\n parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS']))\n parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])\n parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS'])\n\n args = parser.parse_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device {}.\".format(device))\n\n torch.manual_seed(args.seed)\n\n # Load the training data.\n train_loader = _get_train_data_loader(args.batch_size, args.data_dir)\n\n # Build the model.\n model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device)\n\n with open(os.path.join(args.data_dir, \"word_dict.pkl\"), \"rb\") as f:\n model.word_dict = pickle.load(f)\n\n print(\"Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.\".format(\n args.embedding_dim, args.hidden_dim, args.vocab_size\n ))\n\n # Train the model.\n optimizer = optim.Adam(model.parameters())\n loss_fn = torch.nn.BCELoss()\n\n train(model, train_loader, args.epochs, optimizer, loss_fn, device)\n\n # Save the parameters used to construct the model\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'embedding_dim': args.embedding_dim,\n 'hidden_dim': args.hidden_dim,\n 'vocab_size': args.vocab_size,\n }\n torch.save(model_info, f)\n\n\t# Save the word_dict\n word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl')\n with open(word_dict_path, 'wb') as f:\n pickle.dump(model.word_dict, f)\n\n\t# Save the model parameters\n model_path = os.path.join(args.model_dir, 'model.pth')\n with open(model_path, 'wb') as f:\n torch.save(model.cpu().state_dict(), f)\n"
] | [
[
"torch.load",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhanwenchen/Scene-Graph-Benchmark.pytorch | [
"c86475bcbdaefcc1656a2890194355c2b32aa694"
] | [
"maskrcnn_benchmark/modeling/roi_heads/relation_head/model_transformer.py"
] | [
"\"\"\"\nBased on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom maskrcnn_benchmark.modeling.utils import cat\nfrom .utils_motifs import obj_edge_vectors, to_onehot, nms_overlaps, encode_box_info\n\n\nclass ScaledDotProductAttention(nn.Module):\n ''' Scaled Dot-Product Attention '''\n\n def __init__(self, temperature, attn_dropout=0.1):\n super().__init__()\n self.temperature = temperature\n self.dropout = nn.Dropout(attn_dropout)\n self.softmax = nn.Softmax(dim=2)\n\n def forward(self, q, k, v, mask=None):\n \"\"\"\n Args:\n q (bsz, len_q, dim_q)\n k (bsz, len_k, dim_k)\n v (bsz, len_v, dim_v)\n Note: len_k==len_v, and dim_q==dim_k\n Returns:\n output (bsz, len_q, dim_v)\n attn (bsz, len_q, len_k)\n \"\"\"\n attn = torch.bmm(q, k.transpose(1, 2))\n attn = attn / self.temperature\n\n if mask is not None:\n attn = attn.masked_fill(mask, -np.inf)\n\n attn = self.softmax(attn)\n attn = self.dropout(attn)\n output = torch.bmm(attn, v)\n\n return output, attn\n\n\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k)\n self.w_ks = nn.Linear(d_model, n_head * d_k)\n self.w_vs = nn.Linear(d_model, n_head * d_v)\n nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))\n nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))\n\n self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.fc = nn.Linear(n_head * d_v, d_model)\n nn.init.xavier_normal_(self.fc.weight)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, q, k, v, mask=None):\n \"\"\"\n Args:\n q (bsz, len_q, dim_q)\n k (bsz, len_k, dim_k)\n v (bsz, len_v, dim_v)\n Note: len_k==len_v, and dim_q==dim_k\n Returns:\n output (bsz, len_q, d_model)\n attn (bsz, len_q, len_k)\n \"\"\"\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n\n sz_b, len_q, _ = q.size()\n sz_b, len_k, _ = k.size()\n sz_b, len_v, _ = v.size() # len_k==len_v\n\n residual = q\n\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk\n k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk\n v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv\n if mask is not None:\n mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..\n output, attn = self.attention(q, k, v, mask=mask)\n\n output = output.view(n_head, sz_b, len_q, d_v)\n output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)\n\n output = self.dropout(self.fc(output))\n output = self.layer_norm(output + residual)\n\n return output, attn\n\n\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise\n self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n Merge adjacent information. Equal to linear layer if kernel size is 1\n Args:\n x (bsz, len, dim)\n Returns:\n output (bsz, len, dim)\n \"\"\"\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n return output\n\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input, mask=slf_attn_mask)\n enc_output *= non_pad_mask.float()\n\n enc_output = self.pos_ffn(enc_output)\n enc_output *= non_pad_mask.float()\n\n return enc_output, enc_slf_attn\n\n\nclass TransformerEncoder(nn.Module):\n \"\"\"\n A encoder model with self attention mechanism.\n \"\"\"\n\n def __init__(self, n_layers, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):\n super().__init__()\n self.layer_stack = nn.ModuleList([\n EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n for _ in range(n_layers)])\n\n def forward(self, input_feats, num_objs):\n \"\"\"\n Args:\n input_feats [Tensor] (#total_box, d_model) : bounding box features of a batch\n num_objs [list of int] (bsz, ) : number of bounding box of each image\n Returns:\n enc_output [Tensor] (#total_box, d_model)\n \"\"\"\n original_input_feats = input_feats\n input_feats = input_feats.split(num_objs, dim=0)\n input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True)\n\n # -- Prepare masks\n bsz = len(num_objs)\n device = input_feats.device\n pad_len = max(num_objs)\n num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)\n slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(\n 1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)\n non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(\n num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)\n\n # -- Forward\n enc_output = input_feats\n for enc_layer in self.layer_stack:\n enc_output, enc_slf_attn = enc_layer(\n enc_output,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n enc_output = enc_output[non_pad_mask.squeeze(-1)]\n return enc_output\n\n\nclass TransformerContext(nn.Module):\n def __init__(self, config, obj_classes, rel_classes, in_channels):\n super().__init__()\n self.cfg = config\n # setting parameters\n if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX:\n self.mode = 'predcls' if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL else 'sgcls'\n else:\n self.mode = 'sgdet'\n self.obj_classes = obj_classes\n self.rel_classes = rel_classes\n self.num_obj_cls = len(obj_classes)\n self.num_rel_cls = len(rel_classes)\n self.in_channels = in_channels\n self.obj_dim = in_channels\n self.embed_dim = self.cfg.MODEL.ROI_RELATION_HEAD.EMBED_DIM\n self.hidden_dim = self.cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_HIDDEN_DIM\n self.nms_thresh = self.cfg.TEST.RELATION.LATER_NMS_PREDICTION_THRES\n\n self.dropout_rate = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.DROPOUT_RATE\n self.obj_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.OBJ_LAYER\n self.edge_layer = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.REL_LAYER\n self.num_head = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.NUM_HEAD\n self.inner_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.INNER_DIM\n self.k_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.KEY_DIM\n self.v_dim = self.cfg.MODEL.ROI_RELATION_HEAD.TRANSFORMER.VAL_DIM\n\n # the following word embedding layer should be initalize by glove.6B before using\n embed_vecs = obj_edge_vectors(self.obj_classes, wv_dir=self.cfg.GLOVE_DIR, wv_dim=self.embed_dim)\n\n self.obj_embed1 = nn.Embedding(self.num_obj_cls, self.embed_dim)\n self.obj_embed2 = nn.Embedding(self.num_obj_cls, self.embed_dim)\n with torch.no_grad():\n self.obj_embed1.weight.copy_(embed_vecs, non_blocking=True)\n self.obj_embed2.weight.copy_(embed_vecs, non_blocking=True)\n\n # position embedding\n self.bbox_embed = nn.Sequential(*[\n nn.Linear(9, 32), nn.ReLU(inplace=True), nn.Dropout(0.1),\n nn.Linear(32, 128), nn.ReLU(inplace=True), nn.Dropout(0.1),\n ])\n self.lin_obj = nn.Linear(self.in_channels + self.embed_dim + 128, self.hidden_dim)\n self.lin_edge = nn.Linear(self.embed_dim + self.hidden_dim + self.in_channels, self.hidden_dim)\n self.out_obj = nn.Linear(self.hidden_dim, self.num_obj_cls)\n self.context_obj = TransformerEncoder(self.obj_layer, self.num_head, self.k_dim,\n self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)\n self.context_edge = TransformerEncoder(self.edge_layer, self.num_head, self.k_dim,\n self.v_dim, self.hidden_dim, self.inner_dim, self.dropout_rate)\n\n def forward(self, roi_features, proposals, logger=None):\n # labels will be used in DecoderRNN during training\n use_gt_label = self.training or self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL\n obj_labels = cat([proposal.get_field(\"labels\") for proposal in proposals], dim=0) if use_gt_label else None\n if obj_labels is not None:\n obj_labels = obj_labels.long()\n # label/logits embedding will be used as input\n if self.cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL:\n obj_embed = self.obj_embed1(obj_labels)\n else:\n obj_logits = cat([proposal.get_field(\"predict_logits\") for proposal in proposals], dim=0).detach()\n obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed1.weight\n\n # bbox embedding will be used as input\n assert proposals[0].mode == 'xyxy'\n pos_embed = self.bbox_embed(encode_box_info(proposals))\n\n # encode objects with transformer\n obj_pre_rep = cat((roi_features, obj_embed, pos_embed), -1)\n num_objs = [len(p) for p in proposals]\n obj_pre_rep = self.lin_obj(obj_pre_rep)\n obj_feats = self.context_obj(obj_pre_rep, num_objs)\n\n # predict obj_dists and obj_preds\n if self.mode == 'predcls':\n obj_preds = obj_labels\n obj_dists = to_onehot(obj_preds, self.num_obj_cls)\n edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_labels)), dim=-1)\n else:\n obj_dists = self.out_obj(obj_feats)\n use_decoder_nms = self.mode == 'sgdet' and not self.training\n if use_decoder_nms:\n boxes_per_cls = [proposal.get_field('boxes_per_cls') for proposal in proposals]\n obj_preds = self.nms_per_cls(obj_dists, boxes_per_cls, num_objs)\n else:\n obj_preds = obj_dists[:, 1:].max(1)[1] + 1\n edge_pre_rep = cat((roi_features, obj_feats, self.obj_embed2(obj_preds)), dim=-1)\n\n # edge context\n edge_pre_rep = self.lin_edge(edge_pre_rep)\n edge_ctx = self.context_edge(edge_pre_rep, num_objs)\n\n return obj_dists, obj_preds, edge_ctx\n\n def nms_per_cls(self, obj_dists, boxes_per_cls, num_objs):\n obj_dists = obj_dists.split(num_objs, dim=0)\n obj_preds = []\n for i in range(len(num_objs)):\n is_overlap = nms_overlaps(boxes_per_cls[i]).cpu().numpy() >= self.nms_thresh # (#box, #box, #class)\n\n out_dists_sampled = F.softmax(obj_dists[i], -1).cpu().numpy()\n out_dists_sampled[:, 0] = -1\n\n out_label = obj_dists[i].new(num_objs[i]).fill_(0)\n\n for i in range(num_objs[i]):\n box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)\n out_label[int(box_ind)] = int(cls_ind)\n out_dists_sampled[is_overlap[box_ind, :, cls_ind], cls_ind] = 0.0\n out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample\n\n obj_preds.append(out_label.long())\n obj_preds = torch.cat(obj_preds, dim=0)\n return obj_preds\n"
] | [
[
"torch.nn.Softmax",
"torch.nn.Dropout",
"torch.nn.functional.softmax",
"torch.LongTensor",
"numpy.sqrt",
"torch.cat",
"numpy.power",
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.init.xavier_normal_",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.nn.Linear",
"torch.no_grad",
"torch.bmm",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lcwong0928/pyna | [
"44210812268cb3dbbaaee8caa58e48e4c47372e9"
] | [
"python/vcf/optimizer.py"
] | [
"import pandas as pd\nimport re\nimport json\nfrom pandas import ExcelWriter\n\nstandard = {'PIK3R1', 'PTEN', 'PIK3CG', 'TP53',\n 'PTPN11', 'PIK3CA', 'RB1', 'PDGFRA', 'MET',\n 'ATRX', 'CDK4', 'EGFR', 'IDH1', 'NF1',\n 'CDKN2A', 'MDM4', 'MDM4', 'MDM2', 'CDK6', 'LTBP4'}\n\n\ndef score(df):\n bait = set()\n for genes in df[\"GENE\"]:\n for gene in str(genes).split(\";\"):\n bait.add(gene)\n\n intersection = standard.intersection(bait)\n return intersection\n\n\ndef blood_freq(df, save=False):\n filtered = []\n for index, row in df.iterrows():\n ad = row['ALT COUNT_b']\n dp = row['DP_b']\n\n if pd.isnull(df.loc[index, 'ALT COUNT_b']):\n filtered.append(index)\n elif (ad == 0) or (ad == 1 and dp > 25) or (ad == 2 and dp > 50):\n filtered.append(index)\n\n df = df.loc[filtered]\n print(len(df), len(score(df)), score(df), standard - score(df))\n\n if save:\n dataframe_to_excel(\"blood_freq\", df)\n return df\n\n\n\n\ndef protein_coding(df, save=False):\n filtered = []\n for index, row in df.iterrows():\n\n if not pd.isnull(df.loc[index, 'BIOTYPE']):\n if \"protein_coding\" in row['BIOTYPE'].split(\";\"):\n filtered.append(index)\n\n df = df.loc[filtered]\n print(len(df), len(score(df)), score(df), standard - score(df))\n if save:\n dataframe_to_excel(\"biotype\", df)\n return df\n\n\ndef impact(df, save=False):\n df = df[df['IMPACT'].apply(lambda x: False if len(x.split(\";\")) == 1 and x.split(\";\")[0] == 'LOW' else True)]\n print(len(df), len(score(df)), score(df), standard - score(df))\n\n if save:\n dataframe_to_excel(\"impact\", df)\n return df\n\n\ndef annotation(df, save=False):\n parameters = pd.read_excel(open('/Users/lcwong/Desktop/PyNA/python/vcf/annotations/annotations.xlsx', 'rb'),\n sheet_name='Sheet1')\n\n important = set(parameters['Important'].tolist())\n high = set(parameters['High Only'].tolist())\n is_high = lambda line: \"HIGH\" in line['IMPACT'].split(\";\")\n\n filtered = []\n for index, row in df.iterrows():\n ann = set(re.split('[&;]+', row['ANNOTATION']))\n if len(ann.intersection(important)) != 0:\n filtered.append(index)\n elif len(ann.intersection(high)) != 0 and is_high(row):\n filtered.append(index)\n\n df = df.loc[filtered]\n\n print(len(df), len(score(df)), score(df), standard - score(df))\n if save:\n dataframe_to_excel(\"annotation\", df)\n return df\n\n\ndef tumor_freq(df, t, dp, save=False):\n df = df[(df['VAF_t'] >= t) & (df['DP_t'] >= dp)]\n\n print(len(df), len(score(df)), score(df), standard - score(df))\n\n if save:\n dataframe_to_excel(\"tumor_freq\", df)\n return df\n\n\n\ndef rna_freq(df, r, dp, save=False):\n df = df[(df['VAF_r'] >= r) & (df['DP_r'] >= dp)]\n\n print(len(df), len(score(df)), score(df), standard - score(df))\n\n if save:\n dataframe_to_excel(\"rna_freq\", df)\n return df\n\n# def rna_freq(df, r, dp, save=False):\n# df = df[(df['VAF_r'] <= .1) & (df['DP_r'] >= 10) | (df['VAF_r'] >= .9) & (df['DP_r'] >= 10)]\n#\n# print(len(df), len(score(df)), score(df), standard - score(df))\n#\n# if save:\n# dataframe_to_excel(\"rna_freq\", df)\n# return df\n\n\ndef length(df, l, save=False):\n df = df[df['LENGTH'].apply(lambda x: any(int(i) < l for i in x.split(\";\")))]\n\n print(len(df), len(score(df)), score(df), standard - score(df))\n if save:\n dataframe_to_excel(\"length\", df)\n return df\n\n\n\n\n\ndef dataframe_to_excel(filename, df):\n \"\"\"\n Writes dataframe values into CSV files\n\n :param filename:\n :param df: dataframe\n :return: None\n \"\"\"\n output = \"output/backup/\" + filename + \".xlsx\"\n writer = ExcelWriter(output)\n df.to_excel(writer, 'Sheet1')\n writer.save()\n\n\ndef tune(t, r, l):\n with open(\"vcf/pipeline_1/gatk.json\") as f:\n df = pd.DataFrame.from_dict(json.load(f))\n print(len(df), len(score(df)), score(df), standard - score(df))\n f.close()\n\n df = impact(df, save=False)\n df = annotation(df, save=False)\n\n df = tumor_freq(df, t, save=False)\n df = rna_freq(df, r, save=False)\n df = length(df, l, save=False)\n\n dataframe_to_excel(\"trial_\" + \"_\".join([str(t), str(r), str(l)]), df)\n print(df)\n\n\nif __name__ == '__main__':\n # df = pd.read_excel(open('/Users/lcwong/Desktop/PyNA/python/output/pipeline_1/Unfiltered.xlsx', 'rb'), sheet_name='Sheet1')\n # dataframe_to_excel(\"/Users/lcwong/Desktop/PyNA/python/output/pipeline_1/rna_freq.xlsx\", blood_freq(df, save=False))\n tune(1, 1, 1)\n"
] | [
[
"pandas.isnull",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
lawrenceyan/mango-explorer | [
"ea16f2a27c51e9e5e0f79d491828ad250f970452"
] | [
"mango/account.py"
] | [
"# # ⚠ Warning\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT\n# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN\n# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n# [🥭 Mango Markets](https://mango.markets/) support is available at:\n# [Docs](https://docs.mango.markets/)\n# [Discord](https://discord.gg/67jySBhxrg)\n# [Twitter](https://twitter.com/mangomarkets)\n# [Github](https://github.com/blockworks-foundation)\n# [Email](mailto:[email protected])\n\nimport pandas\nimport typing\n\nfrom decimal import Decimal\nfrom solana.publickey import PublicKey\nfrom solana.rpc.types import MemcmpOpts\n\nfrom .accountinfo import AccountInfo\nfrom .addressableaccount import AddressableAccount\nfrom .cache import Cache, PerpMarketCache, RootBankCache, MarketCache\nfrom .context import Context\nfrom .encoding import encode_key\nfrom .group import Group, GroupSlot, GroupSlotPerpMarket\nfrom .instrumentvalue import InstrumentValue\nfrom .layouts import layouts\nfrom .metadata import Metadata\nfrom .openorders import OpenOrders\nfrom .orders import Side\nfrom .perpaccount import PerpAccount\nfrom .perpopenorders import PerpOpenOrders\nfrom .placedorder import PlacedOrder\nfrom .token import Instrument, Token\nfrom .tokenbank import TokenBank\nfrom .version import Version\n\n\n# # 🥭 AccountSlot class\n#\n# `AccountSlot` gathers slot items together instead of separate arrays.\n#\nclass AccountSlot:\n def __init__(self, index: int, base_instrument: Instrument, base_token_bank: typing.Optional[TokenBank], quote_token_bank: TokenBank, raw_deposit: Decimal, deposit: InstrumentValue, raw_borrow: Decimal, borrow: InstrumentValue, spot_open_orders: typing.Optional[PublicKey], perp_account: typing.Optional[PerpAccount]) -> None:\n self.index: int = index\n self.base_instrument: Instrument = base_instrument\n self.base_token_bank: typing.Optional[TokenBank] = base_token_bank\n self.quote_token_bank: TokenBank = quote_token_bank\n self.raw_deposit: Decimal = raw_deposit\n self.deposit: InstrumentValue = deposit\n self.raw_borrow: Decimal = raw_borrow\n self.borrow: InstrumentValue = borrow\n self.spot_open_orders: typing.Optional[PublicKey] = spot_open_orders\n self.perp_account: typing.Optional[PerpAccount] = perp_account\n\n @property\n def net_value(self) -> InstrumentValue:\n return self.deposit - self.borrow\n\n @property\n def raw_net_value(self) -> Decimal:\n return self.raw_deposit - self.raw_borrow\n\n def __str__(self) -> str:\n perp_account: str = \"None\"\n if self.perp_account is not None:\n perp_account = f\"{self.perp_account}\".replace(\"\\n\", \"\\n \")\n return f\"\"\"« AccountSlot [{self.index}] {self.base_instrument.symbol}\n Net Value: {self.net_value}\n Deposited: {self.deposit} (raw value: {self.raw_deposit})\n Borrowed: {self.borrow} (raw value {self.raw_borrow})\n Spot OpenOrders: {self.spot_open_orders or \"None\"}\n Perp Account:\n {perp_account}\n»\"\"\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n\n\n# # 🥭 Account class\n#\n# `Account` holds information about the account for a particular user/wallet for a particualr `Group`.\n#\nclass Account(AddressableAccount):\n @staticmethod\n def __sum_neg(dataframe: pandas.DataFrame, name: str) -> Decimal:\n return typing.cast(Decimal, dataframe.loc[dataframe[name] < 0, name].sum())\n\n @staticmethod\n def __sum_pos(dataframe: pandas.DataFrame, name: str) -> Decimal:\n return typing.cast(Decimal, dataframe.loc[dataframe[name] > 0, name].sum())\n\n def __init__(self, account_info: AccountInfo, version: Version,\n meta_data: Metadata, group_name: str, group_address: PublicKey, owner: PublicKey,\n info: str, shared_quote: AccountSlot,\n in_margin_basket: typing.Sequence[bool],\n slot_indices: typing.Sequence[bool],\n base_slots: typing.Sequence[AccountSlot],\n msrm_amount: Decimal, being_liquidated: bool, is_bankrupt: bool,\n advanced_orders: PublicKey, not_upgradable: bool, delegate: PublicKey) -> None:\n super().__init__(account_info)\n self.version: Version = version\n\n self.meta_data: Metadata = meta_data\n self.group_name: str = group_name\n self.group_address: PublicKey = group_address\n self.owner: PublicKey = owner\n self.info: str = info\n self.shared_quote: AccountSlot = shared_quote\n self.in_margin_basket: typing.Sequence[bool] = in_margin_basket\n self.slot_indices: typing.Sequence[bool] = slot_indices\n self.base_slots: typing.Sequence[AccountSlot] = base_slots\n self.msrm_amount: Decimal = msrm_amount\n self.being_liquidated: bool = being_liquidated\n self.is_bankrupt: bool = is_bankrupt\n self.advanced_orders: PublicKey = advanced_orders\n self.not_upgradable: bool = not_upgradable\n self.delegate: PublicKey = delegate\n\n @property\n def shared_quote_token(self) -> Token:\n token_bank = self.shared_quote.base_token_bank\n if token_bank is None:\n raise Exception(f\"Shared quote does not have a token: {self.shared_quote}\")\n return Token.ensure(token_bank.token)\n\n @property\n def slots(self) -> typing.Sequence[AccountSlot]:\n return [*[slot for slot in self.base_slots], self.shared_quote]\n\n @property\n def slots_by_index(self) -> typing.Sequence[typing.Optional[AccountSlot]]:\n mapped_items: typing.List[typing.Optional[AccountSlot]] = []\n slot_counter = 0\n for available in self.slot_indices:\n if available:\n mapped_items += [self.base_slots[slot_counter]]\n slot_counter += 1\n else:\n mapped_items += [None]\n mapped_items += [self.shared_quote]\n\n return mapped_items\n\n @property\n def deposits(self) -> typing.Sequence[InstrumentValue]:\n return [slot.deposit for slot in self.slots]\n\n @property\n def deposits_by_index(self) -> typing.Sequence[typing.Optional[InstrumentValue]]:\n return [slot.deposit if slot is not None else None for slot in self.slots_by_index]\n\n @property\n def borrows(self) -> typing.Sequence[InstrumentValue]:\n return [slot.borrow for slot in self.slots]\n\n @property\n def borrows_by_index(self) -> typing.Sequence[typing.Optional[InstrumentValue]]:\n return [slot.borrow if slot is not None else None for slot in self.slots_by_index]\n\n @property\n def net_values(self) -> typing.Sequence[InstrumentValue]:\n return [slot.net_value for slot in self.slots]\n\n @property\n def net_values_by_index(self) -> typing.Sequence[typing.Optional[InstrumentValue]]:\n return [slot.net_value if slot is not None else None for slot in self.slots_by_index]\n\n @property\n def spot_open_orders(self) -> typing.Sequence[PublicKey]:\n return [slot.spot_open_orders for slot in self.base_slots if slot.spot_open_orders is not None]\n\n @property\n def spot_open_orders_by_index(self) -> typing.Sequence[typing.Optional[PublicKey]]:\n return [slot.spot_open_orders if slot is not None else None for slot in self.slots_by_index]\n\n @property\n def perp_accounts(self) -> typing.Sequence[PerpAccount]:\n return [slot.perp_account for slot in self.base_slots if slot.perp_account is not None]\n\n @property\n def perp_accounts_by_index(self) -> typing.Sequence[typing.Optional[PerpAccount]]:\n return [slot.perp_account if slot is not None else None for slot in self.slots_by_index]\n\n @staticmethod\n def from_layout(layout: typing.Any, account_info: AccountInfo, version: Version, group: Group, cache: Cache) -> \"Account\":\n meta_data = Metadata.from_layout(layout.meta_data)\n owner: PublicKey = layout.owner\n info: str = layout.info\n mngo_token = group.liquidity_incentive_token\n in_margin_basket: typing.Sequence[bool] = list([bool(in_basket) for in_basket in layout.in_margin_basket])\n active_in_basket: typing.List[bool] = []\n slots: typing.List[AccountSlot] = []\n placed_orders_all_markets: typing.List[typing.List[PlacedOrder]] = [[]\n for _ in range(len(group.slot_indices) - 1)]\n for index, order_market in enumerate(layout.order_market):\n if order_market != 0xFF:\n side = Side.from_value(layout.order_side[index])\n id = layout.order_ids[index]\n client_id = layout.client_order_ids[index]\n placed_order = PlacedOrder(id, client_id, side)\n placed_orders_all_markets[int(order_market)] += [placed_order]\n\n quote_token_bank: TokenBank = group.shared_quote\n quote_token: Token = group.shared_quote_token\n\n for index in range(len(group.slots_by_index)):\n group_slot = group.slots_by_index[index]\n if group_slot is not None:\n instrument = group_slot.base_instrument\n token_bank = group_slot.base_token_bank\n raw_deposit: Decimal = Decimal(0)\n intrinsic_deposit: Decimal = Decimal(0)\n raw_borrow: Decimal = Decimal(0)\n intrinsic_borrow: Decimal = Decimal(0)\n if token_bank is not None:\n raw_deposit = layout.deposits[index]\n root_bank_cache: typing.Optional[RootBankCache] = token_bank.root_bank_cache_from_cache(\n cache, index)\n if root_bank_cache is None:\n raise Exception(f\"No root bank cache found for token {token_bank} at index {index}\")\n intrinsic_deposit = root_bank_cache.deposit_index * raw_deposit\n raw_borrow = layout.borrows[index]\n intrinsic_borrow = root_bank_cache.borrow_index * raw_borrow\n\n deposit = InstrumentValue(instrument, instrument.shift_to_decimals(intrinsic_deposit))\n borrow = InstrumentValue(instrument, instrument.shift_to_decimals(intrinsic_borrow))\n\n perp_open_orders = PerpOpenOrders(placed_orders_all_markets[index])\n\n perp_account = PerpAccount.from_layout(\n layout.perp_accounts[index],\n instrument,\n quote_token,\n perp_open_orders,\n group_slot.perp_lot_size_converter,\n mngo_token)\n spot_open_orders = layout.spot_open_orders[index]\n account_slot: AccountSlot = AccountSlot(index, instrument, token_bank, quote_token_bank,\n raw_deposit, deposit, raw_borrow, borrow,\n spot_open_orders, perp_account)\n\n slots += [account_slot]\n active_in_basket += [True]\n else:\n active_in_basket += [False]\n\n quote_index: int = len(layout.deposits) - 1\n raw_quote_deposit: Decimal = layout.deposits[quote_index]\n quote_root_bank_cache: typing.Optional[RootBankCache] = quote_token_bank.root_bank_cache_from_cache(\n cache, quote_index)\n if quote_root_bank_cache is None:\n raise Exception(f\"No root bank cache found for quote token {quote_token_bank} at index {index}\")\n intrinsic_quote_deposit = quote_root_bank_cache.deposit_index * raw_quote_deposit\n quote_deposit = InstrumentValue(quote_token, quote_token.shift_to_decimals(intrinsic_quote_deposit))\n raw_quote_borrow: Decimal = layout.borrows[quote_index]\n intrinsic_quote_borrow = quote_root_bank_cache.borrow_index * raw_quote_borrow\n quote_borrow = InstrumentValue(quote_token, quote_token.shift_to_decimals(intrinsic_quote_borrow))\n quote: AccountSlot = AccountSlot(len(layout.deposits) - 1, quote_token_bank.token, quote_token_bank,\n quote_token_bank, raw_quote_deposit, quote_deposit, raw_quote_borrow,\n quote_borrow, None, None)\n\n msrm_amount: Decimal = layout.msrm_amount\n being_liquidated: bool = bool(layout.being_liquidated)\n is_bankrupt: bool = bool(layout.is_bankrupt)\n advanced_orders: PublicKey = layout.advanced_orders\n not_upgradable: bool = bool(layout.not_upgradable)\n delegate: PublicKey = layout.delegate\n\n return Account(account_info, version, meta_data, group.name, group.address, owner, info, quote,\n in_margin_basket, active_in_basket, slots, msrm_amount, being_liquidated, is_bankrupt,\n advanced_orders, not_upgradable, delegate)\n\n @staticmethod\n def parse(account_info: AccountInfo, group: Group, cache: Cache) -> \"Account\":\n data = account_info.data\n if len(data) != layouts.MANGO_ACCOUNT.sizeof():\n raise Exception(\n f\"Account data length ({len(data)}) does not match expected size ({layouts.MANGO_ACCOUNT.sizeof()})\")\n\n layout = layouts.MANGO_ACCOUNT.parse(data)\n return Account.from_layout(layout, account_info, Version.V3, group, cache)\n\n @staticmethod\n def load(context: Context, address: PublicKey, group: Group) -> \"Account\":\n account_info = AccountInfo.load(context, address)\n if account_info is None:\n raise Exception(f\"Account account not found at address '{address}'\")\n cache: Cache = group.fetch_cache(context)\n return Account.parse(account_info, group, cache)\n\n @staticmethod\n def load_all(context: Context, group: Group) -> typing.Sequence[\"Account\"]:\n # mango_group is just after the METADATA, which is the first entry.\n group_offset = layouts.METADATA.sizeof()\n # owner is just after mango_group in the layout, and it's a PublicKey which is 32 bytes.\n filters = [\n MemcmpOpts(\n offset=group_offset,\n bytes=encode_key(group.address)\n )\n ]\n\n results = context.client.get_program_accounts(\n context.mango_program_address, memcmp_opts=filters, data_size=layouts.MANGO_ACCOUNT.sizeof())\n cache: Cache = group.fetch_cache(context)\n accounts: typing.List[Account] = []\n for account_data in results:\n address = PublicKey(account_data[\"pubkey\"])\n account_info = AccountInfo._from_response_values(account_data[\"account\"], address)\n account = Account.parse(account_info, group, cache)\n accounts += [account]\n return accounts\n\n @staticmethod\n def load_all_for_owner(context: Context, owner: PublicKey, group: Group) -> typing.Sequence[\"Account\"]:\n # mango_group is just after the METADATA, which is the first entry.\n group_offset = layouts.METADATA.sizeof()\n # owner is just after mango_group in the layout, and it's a PublicKey which is 32 bytes.\n owner_offset = group_offset + 32\n filters = [\n MemcmpOpts(\n offset=group_offset,\n bytes=encode_key(group.address)\n ),\n MemcmpOpts(\n offset=owner_offset,\n bytes=encode_key(owner)\n )\n ]\n\n results = context.client.get_program_accounts(\n context.mango_program_address, memcmp_opts=filters, data_size=layouts.MANGO_ACCOUNT.sizeof())\n cache: Cache = group.fetch_cache(context)\n accounts: typing.List[Account] = []\n for account_data in results:\n address = PublicKey(account_data[\"pubkey\"])\n account_info = AccountInfo._from_response_values(account_data[\"account\"], address)\n account = Account.parse(account_info, group, cache)\n accounts += [account]\n return accounts\n\n @staticmethod\n def load_all_for_delegate(context: Context, delegate: PublicKey, group: Group) -> typing.Sequence[\"Account\"]:\n # mango_group is just after the METADATA, which is the first entry.\n group_offset = layouts.METADATA.sizeof()\n # delegate is a PublicKey which is 32 bytes that ends 5 bytes before the end of the layout\n delegate_offset = layouts.MANGO_ACCOUNT.sizeof() - 37\n filters = [\n MemcmpOpts(\n offset=group_offset,\n bytes=encode_key(group.address)\n ),\n MemcmpOpts(\n offset=delegate_offset,\n bytes=encode_key(delegate)\n )\n ]\n\n results = context.client.get_program_accounts(\n context.mango_program_address, memcmp_opts=filters, data_size=layouts.MANGO_ACCOUNT.sizeof())\n cache: Cache = group.fetch_cache(context)\n accounts: typing.List[Account] = []\n for account_data in results:\n address = PublicKey(account_data[\"pubkey\"])\n account_info = AccountInfo._from_response_values(account_data[\"account\"], address)\n account = Account.parse(account_info, group, cache)\n accounts += [account]\n return accounts\n\n @staticmethod\n def load_for_owner_by_address(context: Context, owner: PublicKey, group: Group, account_address: typing.Optional[PublicKey]) -> \"Account\":\n if account_address is not None:\n return Account.load(context, account_address, group)\n\n accounts: typing.Sequence[Account] = Account.load_all_for_owner(context, owner, group)\n if len(accounts) > 1:\n raise Exception(f\"More than 1 Mango account for owner '{owner}' and which to choose not specified.\")\n\n return accounts[0]\n\n def slot_by_instrument_or_none(self, instrument: Instrument) -> typing.Optional[AccountSlot]:\n for slot in self.slots:\n if slot.base_instrument == instrument:\n return slot\n\n return None\n\n def slot_by_instrument(self, instrument: Instrument) -> AccountSlot:\n slot: typing.Optional[AccountSlot] = self.slot_by_instrument_or_none(instrument)\n if slot is not None:\n return slot\n\n raise Exception(f\"Could not find token {instrument} in account {self.address}\")\n\n def load_all_spot_open_orders(self, context: Context) -> typing.Dict[str, OpenOrders]:\n spot_open_orders_account_infos = AccountInfo.load_multiple(context, self.spot_open_orders)\n spot_open_orders_account_infos_by_address = {\n str(account_info.address): account_info for account_info in spot_open_orders_account_infos}\n spot_open_orders: typing.Dict[str, OpenOrders] = {}\n for slot in self.base_slots:\n if slot.spot_open_orders is not None:\n account_info = spot_open_orders_account_infos_by_address[str(slot.spot_open_orders)]\n oo = OpenOrders.parse(account_info, slot.base_instrument.decimals,\n self.shared_quote.base_instrument.decimals)\n spot_open_orders[str(slot.spot_open_orders)] = oo\n return spot_open_orders\n\n def update_spot_open_orders_for_market(self, spot_market_index: int, spot_open_orders: PublicKey) -> None:\n item_to_update = self.slots_by_index[spot_market_index]\n if item_to_update is None:\n raise Exception(f\"Could not find AccountBasketItem in Account {self.address} at index {spot_market_index}.\")\n item_to_update.spot_open_orders = spot_open_orders\n\n def to_dataframe(self, group: Group, all_spot_open_orders: typing.Dict[str, OpenOrders], cache: Cache) -> pandas.DataFrame:\n asset_data = []\n for slot in self.slots:\n market_cache: typing.Optional[MarketCache] = group.market_cache_from_cache_or_none(\n cache, slot.base_instrument)\n price: InstrumentValue = group.token_price_from_cache(cache, slot.base_instrument)\n\n spot_open_orders: typing.Optional[OpenOrders] = None\n spot_health_base: Decimal = Decimal(0)\n spot_health_quote: Decimal = Decimal(0)\n spot_bids_base_net: Decimal = Decimal(0)\n spot_asks_base_net: Decimal = Decimal(0)\n if slot.spot_open_orders is not None:\n spot_open_orders = all_spot_open_orders[str(slot.spot_open_orders)]\n if spot_open_orders is None:\n raise Exception(f\"OpenOrders address {slot.spot_open_orders} at index {slot.index} not loaded.\")\n\n # Here's a comment from ckamm in https://github.com/blockworks-foundation/mango-v3/pull/78/files\n # that describes some of the health calculations.\n #\n # // Two \"worst-case\" scenarios are considered:\n # // 1. All bids are executed at current price, producing a base amount of bids_base_net\n # // when all quote_locked are converted to base.\n # // 2. All asks are executed at current price, producing a base amount of asks_base_net\n # // because base_locked would be converted to quote.\n #\n # // Report the scenario that would have a worse outcome on health.\n # //\n # // Explanation: This function returns (base, quote) and the values later get used in\n # // health += (if base > 0 { asset_weight } else { liab_weight }) * base + quote\n # // and here we return the scenario that will increase health the least.\n # //\n # // Correctness proof:\n # // - always bids_base_net >= asks_base_net\n # // - note that scenario 1 returns (a + b, c)\n # // and scenario 2 returns (a, c + b), and b >= 0, c >= 0\n # // - if a >= 0: scenario 1 will lead to less health as asset_weight <= 1.\n # // - if a < 0 and b <= -a: scenario 2 will lead to less health as liab_weight >= 1.\n # // - if a < 0 and b > -a:\n # // The health contributions of both scenarios are identical if\n # // asset_weight * (a + b) + c = liab_weight * a + c + b\n # // <=> b = (asset_weight - liab_weight) / (1 - asset_weight) * a\n # // <=> b = -2 a since asset_weight + liab_weight = 2 by weight construction\n # // So the worse scenario switches when a + b = -a.\n # // That means scenario 1 leads to less health whenever |a + b| > |a|.\n\n # base total if all bids were executed\n spot_bids_base_net = slot.net_value.value + \\\n (spot_open_orders.quote_token_locked / price.value) + spot_open_orders.base_token_total\n\n # base total if all asks were executed\n spot_asks_base_net = slot.net_value.value + spot_open_orders.base_token_free\n\n if abs(spot_bids_base_net) > abs(spot_asks_base_net):\n spot_health_base = spot_bids_base_net\n spot_health_quote = spot_open_orders.quote_token_free\n else:\n spot_health_base = spot_asks_base_net\n spot_health_quote = (spot_open_orders.base_token_locked * price.value) + \\\n spot_open_orders.quote_token_total\n\n # From Daffy in Discord 2021-11-23: https://discord.com/channels/791995070613159966/857699200279773204/912705017767677982\n # --\n # There's a long_funding field on the PerpMarketCache which holds the current native USDC per\n # base position accrued. The long_settled_funding stores the last time funding was settled for\n # this particular user. So the funding owed is\n # (PerpMarketCache.long_funding - PerpAccount.long_settled_funding) * PerpAccount.base_position\n # if base position greater than 0 (i.e. long)\n #\n # And we use short_funding if base_position < 0\n #\n # The long_funding field in PerpMarketCache changes across time according to the\n # update_funding() function. If orderbook is above index price, then long_funding and\n # short_funding both increase.\n #\n # Usually long_funding and short_funding will be the same unless there was a socialized loss\n # event. IF you have negative equity and insurance fund is empty, then half of the negative\n # equity goes to longs and half goes to shorts. The way that's done is by increasing\n # long_funding and decreasing short_funding by same amount.\n #\n # But unless there's a socialized loss, long_funding == short_funding\n # --\n perp_position: Decimal = Decimal(0)\n perp_notional_position: Decimal = Decimal(0)\n perp_value: Decimal = Decimal(0)\n perp_health_base: Decimal = Decimal(0)\n perp_health_quote: Decimal = Decimal(0)\n unsettled_funding: Decimal = Decimal(0)\n perp_health_base_value: Decimal = Decimal(0)\n perp_asset: Decimal = Decimal(0)\n perp_liability: Decimal = Decimal(0)\n perp_current_value: Decimal = Decimal(0)\n if slot.perp_account is not None and not slot.perp_account.empty and market_cache is not None:\n perp_market: typing.Optional[GroupSlotPerpMarket] = group.perp_markets_by_index[slot.index]\n if perp_market is None:\n raise Exception(f\"Could not find perp market in Group at index {slot.index}.\")\n\n perp_position = slot.perp_account.lot_size_converter.base_size_lots_to_number(\n slot.perp_account.base_position)\n perp_notional_position = perp_position * price.value\n perp_value = slot.perp_account.quote_position_raw\n cached_perp_market: typing.Optional[PerpMarketCache] = market_cache.perp_market\n if cached_perp_market is None:\n raise Exception(f\"Could not find perp market in Cache at index {slot.index}.\")\n\n unsettled_funding = slot.perp_account.unsettled_funding(cached_perp_market)\n bids_quantity = slot.perp_account.lot_size_converter.base_size_lots_to_number(\n slot.perp_account.bids_quantity)\n asks_quantity = slot.perp_account.lot_size_converter.base_size_lots_to_number(\n slot.perp_account.asks_quantity)\n taker_quote = slot.perp_account.lot_size_converter.quote_size_lots_to_number(\n slot.perp_account.taker_quote)\n\n perp_bids_base_net: Decimal = perp_position + bids_quantity\n perp_asks_base_net: Decimal = perp_position - asks_quantity\n\n perp_asset = slot.perp_account.asset_value(cached_perp_market, price.value)\n perp_liability = slot.perp_account.liability_value(cached_perp_market, price.value)\n perp_current_value = slot.perp_account.current_value(cached_perp_market, price.value)\n\n quote_pos = slot.perp_account.quote_position / (10 ** self.shared_quote_token.decimals)\n if abs(perp_bids_base_net) > abs(perp_asks_base_net):\n perp_health_base = perp_bids_base_net\n perp_health_quote = (quote_pos + unsettled_funding) + \\\n taker_quote - (bids_quantity * price.value)\n else:\n perp_health_base = perp_asks_base_net\n perp_health_quote = (quote_pos + unsettled_funding) + \\\n taker_quote + (asks_quantity * price.value)\n perp_health_base_value = perp_health_base * price.value\n\n group_slot: typing.Optional[GroupSlot] = None\n if market_cache is not None:\n group_slot = group.slot_by_instrument(slot.base_instrument)\n\n spot_init_asset_weight: Decimal = Decimal(0)\n spot_maint_asset_weight: Decimal = Decimal(0)\n spot_init_liab_weight: Decimal = Decimal(0)\n spot_maint_liab_weight: Decimal = Decimal(0)\n if group_slot is not None and group_slot.spot_market is not None:\n spot_init_asset_weight = group_slot.spot_market.init_asset_weight\n spot_maint_asset_weight = group_slot.spot_market.maint_asset_weight\n spot_init_liab_weight = group_slot.spot_market.init_liab_weight\n spot_maint_liab_weight = group_slot.spot_market.maint_liab_weight\n elif slot.base_instrument == self.shared_quote_token:\n spot_init_asset_weight = Decimal(1)\n spot_maint_asset_weight = Decimal(1)\n spot_init_liab_weight = Decimal(1)\n spot_maint_liab_weight = Decimal(1)\n\n perp_init_asset_weight: Decimal = Decimal(0)\n perp_maint_asset_weight: Decimal = Decimal(0)\n perp_init_liab_weight: Decimal = Decimal(0)\n perp_maint_liab_weight: Decimal = Decimal(0)\n if group_slot is not None and group_slot.perp_market is not None:\n perp_init_asset_weight = group_slot.perp_market.init_asset_weight\n perp_maint_asset_weight = group_slot.perp_market.maint_asset_weight\n perp_init_liab_weight = group_slot.perp_market.init_liab_weight\n perp_maint_liab_weight = group_slot.perp_market.maint_liab_weight\n elif slot.base_instrument == self.shared_quote_token:\n perp_init_asset_weight = Decimal(1)\n perp_maint_asset_weight = Decimal(1)\n perp_init_liab_weight = Decimal(1)\n perp_maint_liab_weight = Decimal(1)\n\n base_open_unsettled: Decimal = Decimal(0)\n base_open_locked: Decimal = Decimal(0)\n base_open_total: Decimal = Decimal(0)\n quote_open_unsettled: Decimal = Decimal(0)\n quote_open_locked: Decimal = Decimal(0)\n if spot_open_orders is not None:\n base_open_unsettled = spot_open_orders.base_token_free\n base_open_locked = spot_open_orders.base_token_locked\n base_open_total = spot_open_orders.base_token_total\n quote_open_unsettled = (spot_open_orders.quote_token_free\n + spot_open_orders.referrer_rebate_accrued)\n quote_open_locked = spot_open_orders.quote_token_locked\n base_total: Decimal = slot.deposit.value - slot.borrow.value + base_open_total\n base_total_value: Decimal = base_total * price.value\n spot_init_value: Decimal\n spot_maint_value: Decimal\n if base_total_value >= 0:\n spot_init_value = base_total_value * spot_init_asset_weight\n spot_maint_value = base_total_value * spot_maint_asset_weight\n else:\n spot_init_value = base_total_value * spot_init_liab_weight\n spot_maint_value = base_total_value * spot_maint_liab_weight\n perp_init_value: Decimal\n perp_maint_value: Decimal\n if perp_health_base >= 0:\n perp_init_value = perp_notional_position * perp_init_asset_weight\n perp_maint_value = perp_notional_position * perp_maint_asset_weight\n perp_init_health_base_value = perp_health_base_value * perp_init_asset_weight\n perp_maint_health_base_value = perp_health_base_value * perp_maint_asset_weight\n else:\n perp_init_value = perp_notional_position * perp_init_liab_weight\n perp_maint_value = perp_notional_position * perp_maint_liab_weight\n perp_init_health_base_value = perp_health_base_value * perp_init_liab_weight\n perp_maint_health_base_value = perp_health_base_value * perp_maint_liab_weight\n data = {\n \"Name\": slot.base_instrument.name,\n \"Symbol\": slot.base_instrument.symbol,\n \"Spot\": base_total,\n \"SpotDeposit\": slot.deposit.value,\n \"SpotBorrow\": slot.borrow.value,\n \"SpotValue\": base_total_value,\n \"SpotInitValue\": spot_init_value,\n \"SpotMaintValue\": spot_maint_value,\n \"PerpInitValue\": perp_init_value,\n \"PerpMaintValue\": perp_maint_value,\n \"BaseUnsettled\": base_open_unsettled,\n \"BaseLocked\": base_open_locked,\n \"QuoteUnsettled\": quote_open_unsettled,\n \"QuoteLocked\": quote_open_locked,\n \"BaseUnsettledInMarginBasket\": base_open_unsettled if slot.index < len(self.in_margin_basket) and self.in_margin_basket[slot.index] else Decimal(0),\n \"BaseLockedInMarginBasket\": base_open_locked if slot.index < len(self.in_margin_basket) and self.in_margin_basket[slot.index] else Decimal(0),\n \"QuoteUnsettledInMarginBasket\": quote_open_unsettled if slot.index < len(self.in_margin_basket) and self.in_margin_basket[slot.index] else Decimal(0),\n \"QuoteLockedInMarginBasket\": quote_open_locked if slot.index < len(self.in_margin_basket) and self.in_margin_basket[slot.index] else Decimal(0),\n \"PerpPositionSize\": perp_position,\n \"PerpNotionalPositionSize\": perp_notional_position,\n \"PerpValue\": perp_value,\n \"UnsettledFunding\": unsettled_funding,\n \"SpotInitAssetWeight\": spot_init_asset_weight,\n \"SpotMaintAssetWeight\": spot_maint_asset_weight,\n \"SpotInitLiabilityWeight\": spot_init_liab_weight,\n \"SpotMaintLiabilityWeight\": spot_maint_liab_weight,\n \"PerpInitAssetWeight\": perp_init_asset_weight,\n \"PerpMaintAssetWeight\": perp_maint_asset_weight,\n \"PerpInitLiabilityWeight\": perp_init_liab_weight,\n \"PerpMaintLiabilityWeight\": perp_maint_liab_weight,\n \"SpotHealthBase\": spot_health_base,\n \"SpotHealthQuote\": spot_health_quote,\n \"PerpHealthBase\": perp_health_base,\n \"PerpHealthBaseValue\": perp_health_base_value,\n \"PerpInitHealthBaseValue\": perp_init_health_base_value,\n \"PerpMaintHealthBaseValue\": perp_maint_health_base_value,\n \"PerpHealthQuote\": perp_health_quote,\n \"PerpAsset\": perp_asset,\n \"PerpLiability\": perp_liability,\n \"PerpCurrentValue\": perp_current_value,\n }\n asset_data += [data]\n frame: pandas.DataFrame = pandas.DataFrame(asset_data)\n return frame\n\n def weighted_assets(self, frame: pandas.DataFrame, weighting_name: str = \"\") -> typing.Tuple[Decimal, Decimal]:\n non_quote = frame.loc[frame[\"Symbol\"] != self.shared_quote_token.symbol]\n quote = frame.loc[frame[\"Symbol\"] == self.shared_quote_token.symbol, \"SpotValue\"].sum()\n quote += frame[\"PerpHealthQuote\"].sum()\n quote += frame[\"QuoteUnsettledInMarginBasket\"].sum()\n\n assets = Decimal(0)\n liabilities = Decimal(0)\n if quote > 0:\n assets = quote\n else:\n liabilities = quote\n\n spot_value_key = f\"Spot{weighting_name}Value\"\n perp_value_key = f\"Perp{weighting_name}HealthBaseValue\"\n\n liabilities += Account.__sum_neg(non_quote, spot_value_key) + Account.__sum_neg(non_quote, perp_value_key)\n assets += Account.__sum_pos(non_quote, spot_value_key) + Account.__sum_pos(non_quote, perp_value_key)\n\n return assets, liabilities\n\n def unweighted_assets(self, frame: pandas.DataFrame) -> typing.Tuple[Decimal, Decimal]:\n non_quote = frame.loc[frame[\"Symbol\"] != self.shared_quote_token.symbol]\n quote = frame.loc[frame[\"Symbol\"] == self.shared_quote_token.symbol, \"SpotValue\"].sum()\n\n assets = Decimal(0)\n liabilities = Decimal(0)\n if quote > 0:\n assets = quote\n else:\n liabilities = quote\n\n liabilities += Account.__sum_neg(non_quote, \"SpotValue\") + non_quote['PerpLiability'].sum()\n\n assets += Account.__sum_pos(non_quote, \"SpotValue\") + \\\n non_quote['PerpAsset'].sum() + \\\n Account.__sum_pos(non_quote, \"QuoteUnsettled\")\n\n return assets, liabilities\n\n def init_health(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.weighted_assets(frame, \"Init\")\n return assets + liabilities\n\n def maint_health(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.weighted_assets(frame, \"Maint\")\n return assets + liabilities\n\n def init_health_ratio(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.weighted_assets(frame, \"Init\")\n if liabilities == 0:\n return Decimal(100)\n\n return ((assets / -liabilities) - 1) * 100\n\n def maint_health_ratio(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.weighted_assets(frame, \"Maint\")\n if liabilities == 0:\n return Decimal(100)\n\n return ((assets / -liabilities) - 1) * 100\n\n def total_value(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.unweighted_assets(frame)\n\n return assets + liabilities\n\n def is_liquidatable(self, frame: pandas.DataFrame) -> bool:\n if self.being_liquidated and self.init_health(frame) < 0:\n return True\n elif self.maint_health(frame) < 0:\n return True\n return False\n\n def leverage(self, frame: pandas.DataFrame) -> Decimal:\n assets, liabilities = self.unweighted_assets(frame)\n if assets <= 0:\n return Decimal(0)\n return -liabilities / (assets + liabilities)\n\n def __str__(self) -> str:\n info = f\"'{self.info}'\" if self.info else \"(un-named)\"\n shared_quote: str = f\"{self.shared_quote}\".replace(\"\\n\", \"\\n \")\n slot_count = len(self.base_slots)\n slots = \"\\n \".join([f\"{item}\".replace(\"\\n\", \"\\n \") for item in self.base_slots])\n\n symbols: typing.Sequence[str] = [slot.base_instrument.symbol for slot in self.base_slots]\n in_margin_basket = \", \".join(symbols) or \"None\"\n return f\"\"\"« Account {info}, {self.version} [{self.address}]\n {self.meta_data}\n Owner: {self.owner}\n Delegated To: {self.delegate}\n Group: « Group '{self.group_name}' [{self.group_address}] »\n Advanced Orders Account: {self.advanced_orders}\n MSRM: {self.msrm_amount}\n Bankrupt? {self.is_bankrupt}\n Upgradable? {not self.not_upgradable}\n Being Liquidated? {self.being_liquidated}\n Shared Quote Token:\n {shared_quote}\n In Basket: {in_margin_basket}\n Basket [{slot_count} in basket]:\n {slots}\n»\"\"\"\n\n def __repr__(self) -> str:\n return f\"{self}\"\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vasslitvinov/arkouda | [
"7751a512bd93211c4739d859462a7f1ae9ff8b4a"
] | [
"benchmarks/setops.py"
] | [
"#!/usr/bin/env python3 \n\nimport time, argparse\nimport numpy as np\nimport arkouda as ak\n\nOPS = ('intersect1d', 'union1d', 'setxor1d', 'setdiff1d')\nTYPES = ('int64',)\n\ndef time_ak_setops(N_per_locale, trials, dtype):\n print(\">>> arkouda setops\")\n cfg = ak.get_config()\n N = N_per_locale * cfg[\"numLocales\"]\n print(\"numLocales = {}, N = {:,}\".format(cfg[\"numLocales\"], N))\n if dtype == 'int64':\n a = ak.randint(0, 2**32, N)\n b = ak.randint(0, 2**32, N)\n \n timings = {op: [] for op in OPS}\n results = {}\n for i in range(trials):\n for op in timings.keys():\n fxn = getattr(ak, op)\n start = time.time()\n r = fxn(a,b)\n end = time.time()\n timings[op].append(end - start)\n results[op] = r\n tavg = {op: sum(t) / trials for op, t in timings.items()}\n\n for op, t in tavg.items():\n print(\" {} Average time = {:.4f} sec\".format(op, t))\n bytes_per_sec = (a.size * a.itemsize * 2) / t\n print(\" {} Average rate = {:.2f} GiB/sec\".format(op, bytes_per_sec/2**30))\n\ndef time_np_setops(N, trials, dtype):\n print(\">>> numpy setops\")\n print(\"N = {:,}\".format(N))\n if dtype == 'int64':\n a = np.random.randint(0, 2**32, N)\n b = np.random.randint(0, 2**32, N)\n \n timings = {op: [] for op in OPS}\n results = {}\n for i in range(trials):\n for op in timings.keys():\n fxn = getattr(np, op)\n start = time.time()\n r = fxn(a,b)\n end = time.time()\n timings[op].append(end - start)\n results[op] = r\n tavg = {op: sum(t) / trials for op, t in timings.items()}\n\n for op, t in tavg.items():\n print(\" {} Average time = {:.4f} sec\".format(op, t))\n bytes_per_sec = (a.size * a.itemsize * 2) / t\n print(\" {} Average rate = {:.2f} GiB/sec\".format(op, bytes_per_sec/2**30))\n\ndef check_correctness(dtype):\n N = 10**4\n if dtype == 'int64':\n a = np.random.randint(0, 2**32, N)\n b = np.random.randint(0, 2**32, N)\n\n for op in OPS:\n npa = a\n npb = b\n aka = ak.array(a)\n akb = ak.array(b)\n fxn = getattr(np, op)\n npr = fxn(npa, npb)\n fxn = getattr(ak, op)\n akr = fxn(aka, akb)\n np.isclose(npr, akr)\n\ndef create_parser():\n parser = argparse.ArgumentParser(description=\"Run the setops benchmarks: intersect1d, union1d, setdiff1d, setxor1d\")\n parser.add_argument('hostname', help='Hostname of arkouda server')\n parser.add_argument('port', type=int, help='Port of arkouda server')\n parser.add_argument('-n', '--size', type=int, default=10**8, help='Problem size: length of arrays A and B')\n parser.add_argument('-t', '--trials', type=int, default=1, help='Number of times to run the benchmark')\n parser.add_argument('-d', '--dtype', default='int64', help='Dtype of array ({})'.format(', '.join(TYPES)))\n parser.add_argument('--numpy', default=False, action='store_true', help='Run the same operation in NumPy to compare performance.')\n parser.add_argument('--correctness-only', default=False, action='store_true', help='Only check correctness, not performance.')\n return parser\n\nif __name__ == \"__main__\":\n import sys\n parser = create_parser()\n args = parser.parse_args()\n if args.dtype not in TYPES:\n raise ValueError(\"Dtype must be {}, not {}\".format('/'.join(TYPES), args.dtype))\n\n ak.verbose = False\n ak.connect(args.hostname, args.port)\n\n if args.correctness_only:\n for dtype in TYPES:\n check_correctness(dtype)\n sys.exit(0)\n \n print(\"array size = {:,}\".format(args.size))\n print(\"number of trials = \", args.trials)\n time_ak_setops(args.size, args.trials, args.dtype)\n if args.numpy:\n time_np_setops(args.size, args.trials, args.dtype)\n print(\"Verifying agreement between arkouda and NumPy on small problem... \", end=\"\")\n check_correctness(args.dtype)\n print(\"CORRECT\")\n \n sys.exit(0)\n"
] | [
[
"numpy.isclose",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lucaslingle/memn2n | [
"50007bc79d0bdaaf25723d78e86a1ee57bcf2237"
] | [
"babi_dataset_utils.py"
] | [
"import os\nimport re\nimport numpy as np\nimport pickle\nimport math\nimport errno\nimport collections\n\nclass Sentence:\n def __init__(self, string):\n self.string = string\n\n def get_tokens(self, drop_punctuation=True):\n if drop_punctuation:\n tokens = re.findall(r\"[\\w]+\", self.string)\n return tokens\n else:\n tokens_punctuation_and_whitespaces = re.split('(\\W+)?', self.string)\n tokens_and_punctuation = [x.strip() for x in tokens_punctuation_and_whitespaces if x.strip()]\n return tokens_and_punctuation\n\n @staticmethod\n def pad_tokens(tokens, max_sentence_len, pad_token):\n\n if len(tokens) < max_sentence_len:\n padding_tokens = [pad_token for _ in range(max_sentence_len - len(tokens))]\n tokens.extend(padding_tokens)\n return tokens\n\n if len(tokens) > max_sentence_len:\n tokens = tokens[0:max_sentence_len]\n return tokens\n\n @staticmethod\n def padded_int_array(sentence_ints, pad_id, max_sentence_len):\n sentence_ints_array = np.array(sentence_ints, dtype=np.int32)\n padding_array = pad_id * np.ones((max_sentence_len - len(sentence_ints)), dtype=np.int32)\n\n return np.concatenate([sentence_ints_array, padding_array])\n\n\n_SQATuple = collections.namedtuple(\"SQATuple\", (\"story_task_id\", \"context_sentences\", \"question\", \"answer\"))\n\nclass SQATuple(_SQATuple):\n \"\"\"\n Stores the context sentences of a story up to a question, the question itself, and the answer.\n \"\"\"\n __slots__ = ()\n\n\nclass Story:\n def __init__(self):\n # Note:\n # The bAbI dataset consists of stories.\n # Each story has one or more sentences, and one or more questions-answer pairs.\n #\n # Some stories have questions part-way through, then more sentences, and then another question.\n # Each question's scope is all prior sentences in the story.\n #\n # We use dictionaries keyed by line number in order to retrieve relevant story sentences for ease-of-use.\n # When performance is needed, we convert all sentences, questions, and answers to numpy arrays\n\n self.story_task_id = None\n\n self.sentences = []\n self.questions = []\n self.answers = []\n\n self.sqa_tuples = []\n\n def set_story_task_id(self, task_id):\n if self.story_task_id is None:\n self.story_task_id = task_id\n else:\n raise AttributeError(errno.ENOTSUP, os.strerror(errno.ENOTSUP), \"task id for story is immutable\")\n\n def sentences_update(self, sentence):\n self.sentences.append(Sentence(sentence))\n\n def questions_update(self, question):\n self.questions.append(Sentence(question))\n\n def answers_update(self, answer):\n self.answers.append(Sentence(answer))\n\n task_id = self.story_task_id\n s = self.sentences[:]\n q = self.questions[-1]\n a = self.answers[-1]\n\n sqa_tuple = SQATuple(task_id, s, q, a)\n self.sqa_tuples.append(sqa_tuple)\n\n @staticmethod\n def apply_to_sqa_tokens(sqa, f):\n task_id = sqa.story_task_id\n\n ss = list(map(lambda sentence: [f(token) for token in sentence.get_tokens()], sqa.context_sentences))\n q = [f(token) for token in sqa.question.get_tokens()]\n a = f(sqa.answer.string)\n\n return SQATuple(task_id, ss, q, a)\n\n\nclass bAbI:\n\n def __init__(self):\n self.file_partition_types = ['train_or_test', 'task_id']\n self.file_partition_values = {\n 'train_or_test': ['train', 'test'],\n 'task_id': range(1, 21)\n }\n\n self.file_prefix_formula = lambda task_id, train_or_test: 'qa{}_'.format(task_id)\n self.file_suffix_formula = lambda task_id, train_or_test: '_{}.txt'.format(train_or_test)\n\n self.unknown_token = '_UNK'\n self.pad_token = '_PAD'\n\n # H/t to seominjoon, whose regex for this task I have based mine on. All other code is my own.\n # https://github.com/seominjoon/memnn-tensorflow/blob/master/read_data.py\n #\n self.s_re = re.compile(\"^(\\d+) ([\\w\\s\\.]+)\")\n self.q_re = re.compile(\"^(\\d+) ([\\w\\s\\?]+)\\t([\\w\\,]+)\\t([\\d\\+\\s]+)\")\n\n self.vocab_dict = None\n self.max_sentence_len = 0\n\n\n def get_fp_for_task(self, data_dir, train_or_test, task_id):\n assert train_or_test in self.file_partition_values['train_or_test']\n assert task_id in self.file_partition_values['task_id']\n\n prefix = self.file_prefix_formula(task_id, train_or_test)\n suffix = self.file_suffix_formula(task_id, train_or_test)\n\n matching_files = [fn for fn in os.listdir(data_dir) if fn.startswith(prefix) and fn.endswith(suffix)]\n assert len(matching_files) == 1\n\n filename = matching_files[0]\n fp = os.path.join(data_dir, filename)\n\n return fp\n\n def get_stories(self, data_dir, train_or_test, task_id):\n fp = self.get_fp_for_task(data_dir, train_or_test, task_id)\n stories = []\n\n story = None\n\n f = open(fp, 'r+')\n for line in f:\n sentence_match = self.s_re.match(line)\n question_match = self.q_re.match(line)\n\n if question_match:\n story_line_nr, question, answer, supporting_facts = question_match.groups()\n question = question.lower()\n answer = answer.lower()\n story.questions_update(question)\n story.answers_update(answer)\n\n elif sentence_match:\n story_line_nr, sentence = sentence_match.groups()\n if int(story_line_nr) == 1:\n if story is not None:\n stories.append(story)\n story = Story()\n story.set_story_task_id(task_id)\n sentence = sentence.lower()\n story.sentences_update(sentence)\n\n stories.append(story)\n\n return stories\n\n def compute_max_sentence_len_from_sqa_tuples(self, sqa_tuples):\n max_sentence_len = 0\n\n for sqa in sqa_tuples:\n ss_lens = list(map(lambda sentence: len(sentence.get_tokens()), sqa.context_sentences))\n q_len = len(sqa.question.get_tokens())\n\n ss_max_len = max(ss_lens)\n\n max_sentence_len = max([max_sentence_len, ss_max_len, q_len])\n\n return max_sentence_len\n\n def compute_vocab_set_from_sqa_tuples(self, sqa_tuples):\n vocab = set()\n\n for sqa in sqa_tuples:\n ss_tokens = list(map(lambda sentence: sentence.get_tokens(), sqa.context_sentences))\n q_tokens = sqa.question.get_tokens()\n a_token = sqa.answer.string\n\n ss_tokens_flat = [token for sentence_tokens in ss_tokens for token in sentence_tokens]\n vocab |= set(ss_tokens_flat)\n vocab |= set(q_tokens)\n vocab.add(a_token)\n\n return vocab\n\n def compute_vocab_dict_from_sqa_tuples(self, sqa_tuples):\n vocab_set = self.compute_vocab_set_from_sqa_tuples(sqa_tuples)\n vocab_list = sorted(list(vocab_set))\n vocab_list.insert(0, self.unknown_token)\n vocab_list.insert(0, self.pad_token)\n\n vocab_dict = dict({w: i for i, w in enumerate(vocab_list)})\n return vocab_dict\n\n def save_vocab_dict_to_file(self, data, fp):\n with open(fp, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"[*] Successfully saved vocab dictionary to file {}\".format(fp))\n return\n\n def load_vocab_dict_from_file(self, fp):\n with open(fp, 'rb') as handle:\n vocab_dict = pickle.load(handle)\n print(\"[*] Successfully loaded vocab dictionary from file {}\".format(fp))\n return vocab_dict\n\n def save_max_sentence_len_to_file(self, data, fp):\n with open(fp, 'wb') as handle:\n pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)\n print(\"[*] Successfully saved max_sentence_len to file {}\".format(fp))\n return\n\n def load_max_sentence_len_from_file(self, fp):\n with open(fp, 'rb') as handle:\n max_sentence_len = pickle.load(handle)\n print(\"[*] Successfully loaded max_sentence_len from file {}\".format(fp))\n return max_sentence_len\n\n def _prepare_data_for_task_ids(self, data_dir, task_ids, validation_frac, vocab_dict=None, max_sentence_len=None):\n train_sqa_tuples_for_all_tasks = []\n validation_sqa_tuples_for_all_tasks = []\n test_sqa_tuples_for_all_tasks = []\n\n for task_id in task_ids:\n train_stories_for_task = self.get_stories(data_dir, 'train', task_id)\n test_stories_for_task = self.get_stories(data_dir, 'test', task_id)\n\n # each task has stories, each story has SQA tuples\n # SQA tuples consist of\n # - the story's cumulative context up to the question,\n # - the question,\n # - the answer\n\n train_sqa_tuples_for_task = [sqa for story in train_stories_for_task for sqa in story.sqa_tuples]\n test_sqa_tuples_for_task = [sqa for story in test_stories_for_task for sqa in story.sqa_tuples]\n\n # Our train/val split will be stratified by task.\n #\n # However, the split will be performed over the list of SQA tuples, not the list of stories.\n # Thus, there may be questions from any given story that are omitted from training set,\n # but we aren't omitting entire stories from the training set\n #\n # Note that, during training, the list of SQA tuples may have the SQA tuples from a given story\n # presented out of order,\n # e.g., we might train on an SQA tuple ([S1,S2,S3], Q2, A2)\n # before training on the SQA tuple ([S1,S2], Q1, A1) from that story.\n #\n # However, the order of the sentences contained WITHIN any given SQA tuple will remain intact.\n # I.e., S1 really is the first sentence, S2 really is the second sentence, etc.\n #\n # This is because the behavior of np.random.shuffle does not change the contents of each element of the list\n\n np.random.shuffle(train_sqa_tuples_for_task)\n\n validation_frac_size = math.floor(validation_frac * len(train_sqa_tuples_for_task))\n split_idx = len(train_sqa_tuples_for_task) - validation_frac_size\n\n _tmp = train_sqa_tuples_for_task[:]\n train_sqa_tuples_for_task = _tmp[0:split_idx]\n validation_sqa_tuples_for_task = _tmp[split_idx:]\n\n train_sqa_tuples_for_all_tasks.extend(train_sqa_tuples_for_task)\n validation_sqa_tuples_for_all_tasks.extend(validation_sqa_tuples_for_task)\n test_sqa_tuples_for_all_tasks.extend(test_sqa_tuples_for_task)\n\n # once we are done with all tasks, shuffle the training set again.\n np.random.shuffle(train_sqa_tuples_for_all_tasks)\n\n sqa_tuples_for_vocab = []\n sqa_tuples_for_vocab.extend(train_sqa_tuples_for_all_tasks)\n #sqa_tuples_for_vocab.extend(validation_sqa_tuples_for_all_tasks)\n #sqa_tuples_for_vocab.extend(test_sqa_tuples_for_all_tasks)\n\n sqa_tuples_for_max_sentence_len = []\n sqa_tuples_for_max_sentence_len.extend(train_sqa_tuples_for_all_tasks)\n sqa_tuples_for_max_sentence_len.extend(validation_sqa_tuples_for_all_tasks)\n sqa_tuples_for_max_sentence_len.extend(test_sqa_tuples_for_all_tasks)\n\n if vocab_dict is None:\n vocab_dict = self.compute_vocab_dict_from_sqa_tuples(sqa_tuples_for_vocab)\n\n if max_sentence_len is None:\n max_sentence_len = self.compute_max_sentence_len_from_sqa_tuples(sqa_tuples_for_max_sentence_len)\n\n self.vocab_dict = vocab_dict\n self.max_sentence_len = max_sentence_len\n\n f = lambda x: self.vocab_dict[x] if x in self.vocab_dict else self.vocab_dict[self.unknown_token]\n\n train_sqa_tuples_ints = [Story.apply_to_sqa_tokens(sqa, f) for sqa in train_sqa_tuples_for_all_tasks]\n validation_sqa_tuples_ints = [Story.apply_to_sqa_tokens(sqa, f) for sqa in validation_sqa_tuples_for_all_tasks]\n test_sqa_tuples_ints = [Story.apply_to_sqa_tokens(sqa, f) for sqa in test_sqa_tuples_for_all_tasks]\n\n return train_sqa_tuples_ints, validation_sqa_tuples_ints, test_sqa_tuples_ints\n\n def prepare_data_for_single_task(self, data_dir, task_id, validation_frac, vocab_dict=None, max_sentence_len=None):\n task_ids = [task_id]\n tr, va, te = self._prepare_data_for_task_ids(data_dir, task_ids, validation_frac, vocab_dict, max_sentence_len)\n return tr, va, te\n\n def prepare_data_for_joint_tasks(self, data_dir, validation_frac, vocab_dict=None, max_sentence_len=None):\n task_ids = self.file_partition_values['task_id']\n tr, va, te = self._prepare_data_for_task_ids(data_dir, task_ids, validation_frac, vocab_dict, max_sentence_len)\n return tr, va, te\n\n @staticmethod\n def standardize_features(sqa, max_sentence_length_J, number_of_memories_M, pad_id, intersperse_empty_memories=False):\n sentences_ints = sqa.context_sentences[:]\n question_ints = sqa.question\n answer_int = sqa.answer\n\n # Per Section 4.2:\n # \"The capacity of memory is restricted to the most recent 50 sentences.\"\n #\n # If the memory network can store M memories, we store only the M most recent sentences.\n #\n nr_sentences = len(sentences_ints)\n start_idx = max(0, (nr_sentences - number_of_memories_M))\n end_idx = nr_sentences\n sentences_ints = sentences_ints[start_idx:end_idx]\n\n Jpadded_sentences_ints_list = list(map(\n lambda s: Sentence.padded_int_array(s, pad_id=pad_id, max_sentence_len=max_sentence_length_J),\n sentences_ints))\n\n # Per Section 4.1:\n # \"Note that sentences are indexed in reverse order, reflecting their relative distance from the question\n # so that x1 is the last sentence of the story.\"\n #\n Jpadded_sentences_ints_list = Jpadded_sentences_ints_list[::-1]\n\n Jpadded_question_ints = Sentence.padded_int_array(question_ints, pad_id=pad_id, max_sentence_len=max_sentence_length_J)\n\n sentences_2d_array = pad_id * np.ones((number_of_memories_M, max_sentence_length_J), dtype=np.int32)\n empty_memory_timeword_id = number_of_memories_M\n timeword_array = empty_memory_timeword_id * np.ones(number_of_memories_M, dtype=np.int32)\n\n if intersperse_empty_memories:\n nr_sentences = len(Jpadded_sentences_ints_list)\n\n # This implementation is based on my understanding of the paper and the official implementation.\n # The details in the paper were ambiguous, and this is my attempt to understand it, and the matlab code from Facebook.\n #\n # Other than the official matlab implementation, I have not found anyone else who has implemented random noise,\n # so I don't have any other python code to check this against.\n #\n # For matlab code, see:\n # https://github.com/facebook/MemNN/blob/master/MemN2N-babi-matlab/train.m#L31\n\n extra_spaces = max(0, number_of_memories_M - nr_sentences)\n max_nr_empty_memories_to_intersperse = min(extra_spaces, int(math.ceil(0.10 * nr_sentences)))\n nr_empty_memories_to_intersperse = 0\n\n if max_nr_empty_memories_to_intersperse > 0:\n nr_empty_memories_to_intersperse = np.random.randint(low=0, high=max_nr_empty_memories_to_intersperse)\n\n permutation = np.random.permutation(nr_sentences + nr_empty_memories_to_intersperse)\n set_of_idxs_for_nonempty_memories = set(permutation[0:nr_sentences])\n target_idxs_for_nonempty_memories = sorted(list(set_of_idxs_for_nonempty_memories))\n\n for i in range(0,nr_sentences):\n target_idx_for_nonempty_memory_i = target_idxs_for_nonempty_memories[i]\n Jpadded_sentence_ints = Jpadded_sentences_ints_list[i]\n sentences_2d_array[target_idx_for_nonempty_memory_i,:] = np.array(Jpadded_sentence_ints)\n timeword_array[target_idx_for_nonempty_memory_i] = target_idx_for_nonempty_memory_i\n\n else:\n nr_sentences = len(Jpadded_sentences_ints_list)\n sentences_2d_array[0:nr_sentences,:] = np.array(Jpadded_sentences_ints_list, dtype=np.int32)\n timeword_array[0:nr_sentences] = np.array(range(0,nr_sentences), dtype=np.int32)\n\n return sentences_2d_array, timeword_array, Jpadded_question_ints, answer_int\n\n"
] | [
[
"numpy.random.shuffle",
"numpy.ones",
"numpy.concatenate",
"numpy.random.permutation",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PiotrGrzybowski/ProbabilisticMachineLearning | [
"c835a1bdf7ab1b2e58bcf90ae02b7405c9c72977"
] | [
"Lab2/task3.py"
] | [
"import numpy as np\nimport itertools\n\n\ndef student_application(tries, successes, probabilities):\n faculties = set(np.arange(tries))\n combinations = set(itertools.combinations(faculties, successes))\n\n return np.sum([np.prod([probabilities[i] for i in combination]) *\n np.prod([1 - probabilities[i] for i in faculties - set(combination)])\n for combination in combinations])\n\n\nif __name__ == \"__main__\":\n print(student_application(9, 0, [0.01] * 9))\n"
] | [
[
"numpy.arange",
"numpy.prod"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
e-koch/regions | [
"d1a6dd34def9442133065041974b2e33cafaf1cf"
] | [
"regions/_utils/wcs_helpers.py"
] | [
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# (taken from photutils: should probably migrate into astropy.wcs)\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.coordinates import UnitSphericalRepresentation\nfrom astropy.wcs.utils import skycoord_to_pixel\nfrom ..core.pixcoord import PixCoord\n\nskycoord_to_pixel_mode = 'all'\n\n\ndef skycoord_to_pixel_scale_angle(skycoord, wcs, small_offset=1 * u.arcsec):\n \"\"\"\n Convert a set of SkyCoord coordinates into pixel coordinates, pixel\n scales, and position angles.\n\n Parameters\n ----------\n skycoord : `~astropy.coordinates.SkyCoord`\n Sky coordinates\n wcs : `~astropy.wcs.WCS`\n The WCS transformation to use\n small_offset : `~astropy.units.Quantity`\n A small offset to use to compute the angle\n\n Returns\n -------\n pixcoord : `~regions.PixCoord`\n Pixel coordinates\n scale : float\n The pixel scale at each location, in degrees/pixel\n angle : `~astropy.units.Quantity`\n The position angle of the celestial coordinate system in pixel space.\n \"\"\"\n\n # Convert to pixel coordinates\n x, y = skycoord_to_pixel(skycoord, wcs, mode=skycoord_to_pixel_mode)\n pixcoord = PixCoord(x=x, y=y)\n\n # We take a point directly 'above' (in latitude) the position requested\n # and convert it to pixel coordinates, then we use that to figure out the\n # scale and position angle of the coordinate system at the location of\n # the points.\n\n # Find the coordinates as a representation object\n r_old = skycoord.represent_as('unitspherical')\n\n # Add a a small perturbation in the latitude direction (since longitude\n # is more difficult because it is not directly an angle).\n dlat = small_offset\n r_new = UnitSphericalRepresentation(r_old.lon, r_old.lat + dlat)\n coords_offset = skycoord.realize_frame(r_new)\n\n # Find pixel coordinates of offset coordinates\n x_offset, y_offset = skycoord_to_pixel(coords_offset, wcs,\n mode=skycoord_to_pixel_mode)\n\n # Find vector\n dx = x_offset - x\n dy = y_offset - y\n\n # Find the length of the vector\n scale = np.hypot(dx, dy) / dlat.to('degree').value\n\n # Find the position angle\n angle = np.arctan2(dy, dx) * u.radian\n\n return pixcoord, scale, angle\n\n\ndef assert_angle_or_pixel(name, q):\n \"\"\"\n Check that ``q`` is either an angular or a pixel `~astropy.units.Quantity`.\n \"\"\"\n if isinstance(q, u.Quantity):\n if q.unit.physical_type == 'angle' or q.unit is u.pixel:\n pass\n else:\n raise ValueError(\"{0} should have angular or pixel \"\n \"units\".format(name))\n else:\n raise TypeError(\"{0} should be a Quantity instance\".format(name))\n\n\ndef assert_angle(name, q):\n \"\"\"\n Check that ``q`` is an angular `~astropy.units.Quantity`.\n \"\"\"\n if isinstance(q, u.Quantity):\n if q.unit.physical_type == 'angle':\n pass\n else:\n raise ValueError(\"{0} should have angular units\".format(name))\n else:\n raise TypeError(\"{0} should be a Quantity instance\".format(name))\n"
] | [
[
"numpy.arctan2",
"numpy.hypot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
brian220/sketch2pointcloud | [
"55f9011dff89963af57d1bb842f763d2fa3603d2"
] | [
"layers/gcn.py"
] | [
"# -*- coding: utf-8 -*-\n#\n# Developed by Chao Yu Huang <[email protected]>\n# Lot's of codes are borrowed from treeGCN: \n# https://github.com/seowok/TreeGAN\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport math\n\nclass TreeGCN(nn.Module):\n def __init__(self, batch, depth, features, degrees, support=10, node=1, upsample=False, activation=True):\n self.batch = batch\n self.depth = depth\n self.in_feature = features[depth]\n self.out_feature = features[depth+1]\n self.node = node\n self.degree = degrees[depth]\n self.upsample = upsample\n self.activation = activation\n super(TreeGCN, self).__init__()\n\n self.W_root = nn.ModuleList([nn.Linear(features[inx], self.out_feature, bias=False) for inx in range(self.depth+1)])\n\n if self.upsample:\n self.W_branch = nn.Parameter(torch.FloatTensor(self.node, self.in_feature, self.degree*self.in_feature))\n \n self.W_loop = nn.Sequential(nn.Linear(self.in_feature, self.in_feature*support, bias=False),\n nn.Linear(self.in_feature*support, self.out_feature, bias=False))\n\n self.bias = nn.Parameter(torch.FloatTensor(1, self.degree, self.out_feature))\n\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)\n\n self.init_param()\n\n def init_param(self):\n if self.upsample:\n init.xavier_uniform_(self.W_branch.data, gain=init.calculate_gain('relu'))\n\n stdv = 1. / math.sqrt(self.out_feature)\n self.bias.data.uniform_(-stdv, stdv)\n\n def forward(self, tree):\n root = 0\n for inx in range(self.depth+1):\n root_num = tree[inx].size(1)\n repeat_num = int(self.node / root_num)\n root_node = self.W_root[inx](tree[inx])\n root = root + root_node.repeat(1,1,repeat_num).view(self.batch,-1,self.out_feature)\n\n branch = 0\n if self.upsample:\n branch = tree[-1].unsqueeze(2) @ self.W_branch\n branch = self.leaky_relu(branch)\n branch = branch.view(self.batch,self.node*self.degree,self.in_feature)\n \n branch = self.W_loop(branch)\n\n branch = root.repeat(1,1,self.degree).view(self.batch,-1,self.out_feature) + branch\n else:\n branch = self.W_loop(tree[-1])\n\n branch = root + branch\n\n if self.activation:\n branch = self.leaky_relu(branch + self.bias.repeat(1,self.node,1))\n tree.append(branch)\n\n return tree\n\n "
] | [
[
"torch.nn.Linear",
"torch.FloatTensor",
"torch.nn.LeakyReLU",
"torch.nn.init.calculate_gain"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SouppuoS/segan | [
"90d174e1e2277cb6a1711fe0fe94646cf0c116da"
] | [
"segan_module.py"
] | [
"import torch\nimport torch.nn as nn\n\nclass wavnetlike(nn.Module):\n def __init__(self, stride=2, kernal_size=31, channel_size=[], rev=False):\n super(wavnetlike, self).__init__()\n\n self.num_layer = len(channel_size) - 1\n self.cnn = nn.ModuleList([])\n self.skipTns = nn.ModuleList([])\n for i in range(self.num_layer):\n if rev:\n self.cnn.append(nn.ConvTranspose1d(in_channels=channel_size[i + 1],\n out_channels=channel_size[i], \n kernel_size=kernal_size, \n stride=stride, \n padding=kernal_size // 2,\n output_padding=1))\n self.skipTns.append(nn.Conv1d(in_channels=channel_size[i + 1] // 2,\n out_channels=channel_size[i + 1], \n kernel_size=1, \n stride=1))\n else:\n self.cnn.append(nn.Conv1d(in_channels=channel_size[i],\n out_channels=channel_size[i + 1], \n kernel_size=kernal_size, \n stride=stride, \n padding=kernal_size // 2))\n\n def forward(self, inputs):\n raise NotImplementedError\n\nclass g_module(nn.Module):\n def __init__(self, chnlCfg):\n super(g_module, self).__init__()\n self.cfg = chnlCfg\n self.enc = wavnetlike(channel_size=chnlCfg)\n self.dec = wavnetlike(channel_size=[1] + [2 * v for v in chnlCfg[1:]], rev=True)\n self.act = nn.PReLU()\n\n def forward(self, inputs):\n # inputs, [B, T]\n output = inputs[:, None, :]\n outs = []\n for cnn in self.enc.cnn:\n output = self.act(cnn(output))\n outs.append(output)\n\n # \"sample the noise samples z from our prior 8×1024-dimensional normal distribu-tion N(0, I). \"\n # \"skip connections and the addition of the latent vector make the number of feature maps in every layer to be doubled\"\n z = torch.randn(outs[-1].shape, device=outs[-1].device)\n output = torch.cat((outs[-1], z), dim=1)\n\n output = self.act(self.dec.cnn[-1](output))\n\n for skip, cnn, trn in zip(outs[-2::-1], self.dec.cnn[-2::-1], self.dec.skipTns[-2::-1]):\n output = self.act(cnn(output + trn(skip)))\n\n return output\n\nclass d_module(nn.Module):\n def __init__(self, chnlCfg):\n super(d_module, self).__init__()\n self.cfg = chnlCfg\n self.enc = wavnetlike(channel_size=chnlCfg)\n self.act = nn.LeakyReLU(negative_slope=0.3)\n self.vbn = nn.ModuleList([nn.BatchNorm1d(ch) for ch in chnlCfg[1:]])\n self.cls = nn.Sequential(\n nn.Conv1d(1024, 1, 1),\n nn.LeakyReLU(0.3),\n nn.Linear(8, 1),\n )\n self.prd = nn.Sigmoid()\n\n def forward(self, inputs):\n # inputs, [B, 2, T] -> [B, 1024, 8]\n output = inputs\n for bn, cnn in zip(self.vbn, self.enc.cnn):\n output = self.act(bn(cnn(output)))\n d = self.prd(self.cls(output))\n return d"
] | [
[
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.PReLU",
"torch.nn.ModuleList",
"torch.randn",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.nn.LeakyReLU",
"torch.nn.Conv1d",
"torch.nn.ConvTranspose1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
peerschuett/lattice_net | [
"bedee4c7e4adf5ae191a408597c058f2638c96cc"
] | [
"latticenet_py/misc/compute_class_frequency.py"
] | [
"#!/usr/bin/env python3.6\n\nimport torch\nfrom torch.autograd import Function\nfrom torch import Tensor\n\nimport sys\nimport os\nimport numpy as np\n# http://wiki.ros.org/Packages#Client_Library_Support\nimport rospkg\nrospack = rospkg.RosPack()\nsf_src_path=rospack.get_path('surfel_renderer')\nsf_build_path=os.path.abspath(sf_src_path + \"/../../build/surfel_renderer\")\nsys.path.append(sf_build_path) #contains the modules of pycom\n\nfrom DataLoaderTest import *\n\n\nconfig_file=\"compute_class_frequency.cfg\"\n\n\n\n\ndef run():\n view=Viewer(config_file)\n # loader=DataLoaderSemanticKitti(config_file)\n loader=DataLoaderShapeNetPartSeg(config_file)\n loader.start()\n nr_total_points=0\n nr_points_of_class=None\n nr_classes=0\n\n\n while True:\n view.update()\n\n if(loader.has_data()): \n print(\"got cloud\")\n cloud=loader.get_cloud()\n cloud.m_vis.m_point_size=4\n Scene.show(cloud,\"cloud\")\n\n nr_classes=cloud.m_label_mngr.nr_classes()\n if nr_points_of_class is None:\n nr_points_of_class=np.zeros(nr_classes)\n nr_total_points+=cloud.V.shape[0]\n # print(\"adding to total nr of points\", cloud.V.shape[0], \" updated is \", nr_total_points )\n for i in range(nr_classes):\n nr_points_of_class[i]+=(cloud.L_gt==i).sum()\n # print(\"adding for class \", i, \" nr of points \", (cloud.L_gt==i).sum(), \" updated nr is now \", nr_points_of_class[i] )\n\n if loader.is_finished():\n print(\"frequencies are:\")\n for i in range(nr_classes):\n print(nr_points_of_class[i]/nr_total_points)\n # return\n\n\ndef main():\n run()\n\n\n\nif __name__ == \"__main__\":\n main() # This is what you would have, but the following is useful:\n\n # # These are temporary, for debugging, so meh for programming style.\n # import sys, trace\n\n # # If there are segfaults, it's a good idea to always use stderr as it\n # # always prints to the screen, so you should get as much output as\n # # possible.\n # sys.stdout = sys.stderr\n\n # # Now trace execution:\n # tracer = trace.Trace(trace=1, count=0, ignoredirs=[\"/usr\", sys.prefix])\n # tracer.run('main()')"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Pandinosaurus/clana | [
"270da7fff0c09f690c8b9595bc29405eb9fdd244"
] | [
"clana/clustering.py"
] | [
"\"\"\"Everything about clustering classes of a confusion matrix.\"\"\"\n\n# Core Library\nimport logging\nimport random\nfrom typing import Any, List, TypeVar, Union\n\n# Third party\nimport numpy as np\n\n# First party\nimport clana.utils\n\ncfg = clana.utils.load_cfg()\nlogger = logging.getLogger(__name__)\n\n\nT = TypeVar(\"T\")\n\n\ndef apply_grouping(labels: List[T], grouping: List[bool]) -> List[List[T]]:\n \"\"\"\n Return list of grouped labels.\n\n Parameters\n ----------\n labels : List[T]\n grouping : List[bool]\n\n Returns\n -------\n grouped_labels : List[List[T]]\n\n Examples\n --------\n >>> labels = ['de', 'en', 'fr']\n >>> grouping = [False, True]\n >>> apply_grouping(labels, grouping)\n [['de', 'en'], ['fr']]\n \"\"\"\n groups = []\n current_group = [labels[0]]\n for label, cut in zip(labels[1:], grouping):\n if cut:\n groups.append(current_group)\n current_group = [label]\n else:\n current_group.append(label)\n groups.append(current_group)\n return groups\n\n\ndef _remove_single_element_groups(hierarchy: List[List]) -> List[Any]:\n \"\"\"\n Flatten sub-lists of length 1.\n\n Parameters\n ----------\n hierarchy : List[List]\n\n Returns\n -------\n hierarchy : list of el / lists\n\n Examples\n --------\n >>> hierarchy = [[0], [1, 2]]\n >>> _remove_single_element_groups(hierarchy)\n [0, [1, 2]]\n \"\"\"\n h_new = []\n for el in hierarchy:\n if len(el) > 1:\n h_new.append(el)\n else:\n h_new.append(el[0])\n return h_new\n\n\ndef extract_clusters(\n cm: np.ndarray,\n labels: List[str],\n steps: int = 10 ** 4,\n lambda_: float = 0.013,\n method: str = \"local-connectivity\",\n interactive: bool = False,\n) -> List[bool]:\n \"\"\"\n Find clusters in cm.\n\n Idea:\n mininmize lambda (error between clusters) - (count of clusters)\n s.t.: Each inter-cluster accuracy has to be lower than the overall\n accuracy\n\n Parameters\n ----------\n cm : np.ndarray\n labels : List[str]\n steps : int\n lambda_ : float\n The closer to 0, the more groups\n The bigger, the bigger groups\n method : {'local-connectivity', 'energy'}\n interactive : bool\n\n Returns\n -------\n clustes : List[bool]\n \"\"\"\n if method == \"energy\":\n n = len(cm)\n grouping = np.zeros(n - 1)\n minimal_score = get_score(cm, grouping, lambda_)\n best_grouping = grouping.copy()\n for _ in range(steps):\n pos = random.randint(0, n - 2)\n grouping = best_grouping.copy()\n grouping[pos] = (grouping[pos] + 1) % 2\n current_score = get_score(cm, grouping, lambda_)\n if current_score < minimal_score:\n best_grouping = grouping\n minimal_score = current_score\n logger.info(f\"Best grouping: {grouping} (score: {minimal_score})\")\n elif method == \"local-connectivity\":\n if interactive:\n thres: Union[float, int] = find_thres_interactive(cm, labels)\n else:\n thres = find_thres(cm, cfg[\"visualize\"][\"threshold\"])\n logger.info(f\"Found threshold for local connection: {thres}\")\n best_grouping = split_at_con_thres(cm, thres, labels, interactive=interactive)\n else:\n raise NotImplementedError(f\"method='{method}'\")\n logger.info(\"Found {} clusters\".format(sum(best_grouping) + 1))\n return best_grouping\n\n\ndef create_weight_matrix(grouping: List[int]) -> np.ndarray:\n \"\"\"\n Create a matrix which contains the distance to the diagonal.\n\n Parameters\n ----------\n grouping : List[int]\n\n Returns\n -------\n weight_matrix : np.ndarray\n A symmetric matrix\n \"\"\"\n n = len(grouping) + 1\n weight_matrix = np.zeros((n, n))\n for i in range(n):\n seen_1 = False\n for j in range(i + 1, n):\n if seen_1:\n weight_matrix[i][j] = 1\n elif grouping[j - 1] == 1:\n seen_1 = True\n weight_matrix[i][j] = 1\n return weight_matrix + weight_matrix.transpose()\n\n\ndef get_score(cm: np.ndarray, grouping: List, lambda_: float) -> float:\n \"\"\"\n Get the score of a confusion matrix.\n\n Parameters\n ----------\n cm : np.ndarray\n grouping : List\n lambda_ : float\n\n Returns\n -------\n score : float\n \"\"\"\n # First party\n from clana.visualize_cm import calculate_score\n\n inter_cluster_err = 0.0\n weights = create_weight_matrix(grouping)\n inter_cluster_err = calculate_score(cm, weights)\n return lambda_ * inter_cluster_err - sum(grouping)\n\n\ndef find_thres(cm: np.ndarray, percentage: float) -> float:\n \"\"\"\n Find a threshold for grouping.\n\n Parameters\n ----------\n cm : np.ndarray\n percentage : float\n Probability that two neighboring classes belong togehter\n\n Returns\n -------\n connectivity : float\n \"\"\"\n n = int(len(cm) * (1.0 - percentage)) - 1\n con = sorted(get_neighboring_connectivity(cm))\n return con[n]\n\n\ndef find_thres_interactive(cm: np.ndarray, labels: List[str]) -> float:\n \"\"\"\n Find a threshold for grouping.\n\n The threshold is the minimum connection strength for two classes to be\n within the same cluster.\n\n Parameters\n ----------\n cm : np.ndarray\n labels : List[str]\n\n Returns\n -------\n pos_str : float\n \"\"\"\n n = len(cm)\n con = sorted(zip(get_neighboring_connectivity(cm), zip(range(n - 1), range(1, n))))\n # pos_low = 0\n pos_str = None\n\n # Lowest position from which we know that they are connected\n pos_up = n - 1\n\n # Highest position from which we know that they are not connected\n neg_low = 0\n # neg_up = n - 1\n while pos_up - 1 > neg_low:\n print(f\"pos_up={pos_up}, neg_low={neg_low}, pos_str={pos_str}\")\n pos = int((pos_up + neg_low) / 2)\n con_str, (i1, i2) = con[pos]\n should_be_conn = input(\n \"Should {} and {} be in one cluster?\"\n \" (y/n): \".format(labels[i1], labels[i2])\n )\n if should_be_conn == \"n\":\n neg_low = pos\n elif should_be_conn == \"y\":\n pos_up = pos\n pos_str = con_str\n else:\n print(f\"Please type only 'y' or 'n'. You typed {should_be_conn}.\")\n assert pos_str is not None\n return pos_str\n\n\ndef get_neighboring_connectivity(cm: np.ndarray) -> List[float]:\n \"\"\"\n Get how strong neighboring classes are connected.\n\n Parameters\n ----------\n cm : np.ndarray\n\n Returns\n -------\n con : List[float]\n \"\"\"\n con = []\n n = len(cm)\n for i in range(n - 1):\n con.append(cm[i][i + 1] + cm[i + 1][i])\n return con\n\n\ndef split_at_con_thres(\n cm: np.ndarray, thres: float, labels: List[str], interactive: bool\n) -> List[bool]:\n \"\"\"\n Two classes are not in the same group if they are not connected strong.\n\n Minimum connection strength is thres. The bigger this value, the more\n clusters / the smaller clusters you will get.\n \"\"\"\n con = get_neighboring_connectivity(cm)\n grouping = []\n for i, el in enumerate(con):\n if el == thres and interactive:\n should_conn = \"-\"\n while should_conn not in [\"y\", \"n\"]:\n should_conn = input(\n \"Should {} and {} be in one \"\n \"cluster? (y/n): \".format(labels[i], labels[i + 1])\n )\n if should_conn == \"y\":\n grouping.append(False)\n elif should_conn == \"n\":\n grouping.append(True)\n else:\n print(\"please type either 'y' or 'n'\")\n else:\n grouping.append(el < thres)\n return grouping\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dnnspark/dsynth | [
"4dd0d502e143f0aece5739084ee5a45346608554"
] | [
"dsynth/util/object_loader.py"
] | [
"\"\"\"\n(ext) Carbon copy of:\n https://vhub.vicarious/vicarious/dataset/blob/master/dataset/util/object_loader.py\n\nWavefront .obj file loader.\n\nSupports basic material and texture properties.\nVertex colors are not supported.\n\"\"\"\n\nimport re\nimport os\nimport numpy as np\nimport logging\nimport math\nfrom collections import defaultdict, namedtuple\n\nlogger = logging.getLogger(__name__)\n\n\ndef checked_dtype_narrowing(array, target_dtype, dtype_name=\"\"):\n \"\"\"Cast a numpy array to a smaller dtype. Asserts that underflow and\n overflow will not occur\n\n Parameters\n ----------\n array : Any object that can be passed to np.array\n The array to be cast. Will be more efficient if it is already an array.\n target_dtype : dtype\n What the array should be case to, i.e., numpy.uint8\n dtype_name : string\n Optional name for the dtype for the error message\n\n Returns\n -------\n new_array : np.array of dtype target_dtype\n \"\"\"\n array = np.asarray(array)\n\n if array.size > 0:\n min_array = np.min(array)\n min_for_dtype = np.iinfo(target_dtype).min\n assert min_array >= min_for_dtype, \\\n 'Broaden {} dtype as cast will underflow: {} < dtype limit {}'.\\\n format(dtype_name, min_array, min_for_dtype)\n\n max_array = np.max(array)\n max_for_dtype = np.iinfo(target_dtype).max\n assert max_array <= max_for_dtype, \\\n 'Broaden {} dtype as cast will overflow {} > dtype limit {}'.\\\n format(dtype_name, max_array, max_for_dtype)\n\n return np.array(array, dtype=target_dtype, copy=False)\n\n\nFace = namedtuple('Face',\n ['i1', 'i2', 'i3',\n 'n1', 'n2', 'n3',\n 'uv1', 'uv2', 'uv3', 'fid'\n ])\n # ], verbose=False)\n\n\nclass FaceGroup(object):\n\n def __init__(self):\n self.smooth = 0\n self.material_name = None\n self.group_name = None\n self.object_name = None\n\n\n# .OBJ File Regular Expressions\n\n# v float float float\nvertex_pattern = re.compile(\n r'v( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)')\n\n# vn float float float\nnormal_pattern = re.compile(\n r'vn( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)')\n\n# vt float float\nuv_pattern = re.compile(r'vt( +[\\-\\w|\\d|\\.|\\+]+)( +[\\-\\w|\\d|\\.|\\+]+)')\n\n# f vertex vertex vertex ...\nface_pattern1 = re.compile(r'f( +\\d+)( +\\d+)( +\\d+)( +\\d+)?')\n\n# f vertex/uv vertex/uv vertex/uv ...\nface_pattern2 = re.compile(\n r'f( +(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+))?')\n\n# f vertex/uv/normal vertex/uv/normal vertex/uv/normal ...\nface_pattern3 = re.compile(\n r'f( +(\\d+)\\/(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+)\\/(\\d+))( +(\\d+)\\/(\\d+)\\/(\\d+))?')\n\n# f vertex//normal vertex//normal vertex//normal ...\nface_pattern4 = re.compile(\n r'f( +(\\d+)\\/\\/(\\d+))( +(\\d+)\\/\\/(\\d+))( +(\\d+)\\/\\/(\\d+))( +(\\d+)\\/\\/(\\d+))?')\n\n\n# .MTL File patterns\nnew_mtl_pattern = re.compile(r'newmtl (.+)')\n\n# Kd 1.00 1.00 1.00\nk_pattern = re.compile(\n r'(Kd|Ka|Ks)( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)( +[\\d|\\.|\\+|\\-|e]+)')\n\n# Ns 1.00\nsingle_pattern = re.compile(r'(Ns|Ni|d|illum)( +[\\d|\\.|\\+|\\-|e]+)')\n\n# map_Kd blah.png\nmap_pattern = re.compile(\n r'(map_Ka|map_Kd|map_Ks|map_bump|bump|disp|map_Ns) (.+)')\n\n# Can have optional arguments, but we're not parsing these ( e.g. map_Kd\n# asdf.png -clamp on )\nimage_pattern = re.compile(r'(?:\\w+) ([^-].+\\.(tga|png|jpeg|jpg|bmp|tiff))')\n\n\nclass Texture(object):\n\n def __init__(self, image_name):\n self.image_name = image_name\n\n\nclass Material(object):\n\n def __init__(self, name=None):\n self.name = name\n\n\nclass Mesh(object):\n\n def __init__(self, material=None, deformation=None):\n self.material = material or Material()\n self.deformation = deformation\n\n # GL data structures\n # Should be np.array, len(verts) == len(faces)*3\n self.vertices = None\n self.indices = None\n\n self._parent_vertices = None\n\n self.faces = [] # [ [1,2,3], [4,5,6] ]\n self.face_normals = [] # [ [N1,N2,N3], [N1,N2,N3] ] or None\n self.face_uvs = [] # [ [UV1,UV2,UV3], [UV1,UV2,UV3] ] or None\n\n def compute_smooth_normals(self, max_smoothing_angle=25):\n \"\"\"\n Computes normals for each vertex used by each face by averaging\n normals that are within 'max_smoothing_angle'.\n\n Many models have face indices defined for the front and back of a face,\n perhaps to ensure both are drawn. Blindly averaging all vertex normals\n together was resulting in near zero average vectors because of this.\n\n Warning: this is very slow.\n\n Parameters\n ----------\n max_smoothing_angle : float\n Cutoff for treating vectors as similar, in degrees.\n \"\"\"\n if max_smoothing_angle < 0.0 or max_smoothing_angle > 180.0:\n raise ValueError(\"Expected max_smoothing_angle between 0 and 180\")\n # We will use the cos of the angles to save time later on\n angle_thresh = max_smoothing_angle * np.pi / 180.0\n cos_thresh = math.cos(angle_thresh)\n\n vert_normal_groups = defaultdict(list)\n face_verts_to_groups = {}\n\n has_uvs = len(self.face_uvs) > 0\n\n if self.face_uvs:\n assert len(self.face_uvs) == len(self.faces), \\\n \"Expecting UVs for each face\"\n\n # First filter out faces with invalid normals\n valid_faces = []\n valid_uvs = []\n face_normals = []\n face_normals_dict = {}\n\n for idx, face in enumerate(self.faces):\n i1, i2, i3, fid = face\n v1 = self._parent_vertices[i1]\n v2 = self._parent_vertices[i2]\n v3 = self._parent_vertices[i3]\n face_normal = get_face_normal(v3, v2, v1)\n if face_normal is not None:\n valid_faces.append((i1, i2, i3, fid))\n face_normals.append(np.array(face_normal))\n face_normals_dict[face] = np.array(face_normal)\n if has_uvs:\n valid_uvs.append(self.face_uvs[idx])\n\n self.faces = valid_faces\n self.face_uvs = valid_uvs\n\n # -----------------------------\n # Create face mapping -- not optimized\n # -----------------------------\n # create a list of adjacent faces\n adjacent_faces = defaultdict(list)\n for face in self.faces:\n i1, i2, i3, fid = face\n adjacent_faces[i1].append(face)\n adjacent_faces[i2].append(face)\n adjacent_faces[i3].append(face)\n\n # map similar (in terms of normal) and nearby faces to the same fid\n new_fid_mapping = {}\n current_new_fid = 0\n\n def df_add_faces(new_fid, face, normal):\n i1, i2, i3, fid = face\n\n if fid in new_fid_mapping:\n return\n else:\n # check to see that their normals aren't too disparate\n\n # The face_normal vectors were constructed to have\n # norm 1, so u dot v is the cosine between u & v\n # Comparing the cosine is much faster than comparing\n # the actual angles.\n cos = face_normals_dict[face].dot(normal)\n if cos > cos_thresh:\n\n new_fid_mapping[fid] = new_fid\n for vert in [i1, i2, i3]:\n for n in adjacent_faces[vert]:\n df_add_faces(new_fid, n, face_normals_dict[face])\n\n for face in self.faces:\n i1, i2, i3, fid = face\n\n if fid in new_fid_mapping:\n continue\n else:\n current_new_fid += 1\n new_fid_mapping[fid] = current_new_fid\n\n if len(self.faces) > 1000:\n continue\n\n for vert in [i1, i2, i3]:\n for n in adjacent_faces[vert]:\n df_add_faces(current_new_fid, n, face_normals_dict[face])\n self.num_faces = current_new_fid\n\n if self.deformation is not None:\n for i, v in enumerate(self._parent_vertices):\n self._parent_vertices[i] = self.deformation.dot(v)\n\n # Recompute normals after deformation\n face_normals = []\n face_normals_dict = {}\n\n for idx, face in enumerate(self.faces):\n i1, i2, i3, fid = face\n v1 = self._parent_vertices[i1]\n v2 = self._parent_vertices[i2]\n v3 = self._parent_vertices[i3]\n\n face_normal = get_face_normal(v3, v2, v1)\n if face_normal is not None:\n face_normals.append(np.array(face_normal))\n face_normals_dict[face] = np.array(face_normal)\n\n # Go through each vertex used by each face, if the face normal is\n # within 'angle_thresh' of a normal used by a different face, add\n # it to a group of nearby normals. Once all groups have been created,\n # compute the average of each group.\n # TODO: this can be rewritten to be significantly faster.\n # For example, you could store vectors normalized to be length 1,\n # keeping those vectors in a kdtree. Then you can search in the\n # kdtree for the closest vector instead of iterating over all\n # vectors to find the closest.\n for idx, face in enumerate(self.faces):\n i1, i2, i3, fid = face\n v1 = self._parent_vertices[i1]\n v2 = self._parent_vertices[i2]\n v3 = self._parent_vertices[i3]\n\n face_normal = face_normals[idx]\n\n for vert_idx in [i1, i2, i3]:\n min_cos = -2.0 # basically -inf\n min_group = None\n\n for normal_group in vert_normal_groups[vert_idx]:\n for normal in normal_group:\n # The face_normal vectors were constructed to have\n # norm 1, so u dot v is the cosine between u & v\n # Comparing the cosine is much faster than comparing\n # the actual angles.\n cos = face_normal.dot(normal)\n if cos > cos_thresh and cos > min_cos:\n min_cos = cos\n min_group = normal_group\n\n if min_group is not None:\n min_group.append(face_normal)\n face_verts_to_groups[(i1, i2, i3, vert_idx)] = min_group\n else:\n new_group = [face_normal, ]\n face_verts_to_groups[(i1, i2, i3, vert_idx)] = new_group\n vert_normal_groups[vert_idx].append(new_group) # new group\n\n # Compute the average vector for each group\n group_to_avg_normal = {}\n for vert, normal_groups in vert_normal_groups.items():\n for group in normal_groups:\n group_to_avg_normal[id(group)] = average_vectors(group)\n\n vertices = []\n normals = []\n indices = []\n uvs = []\n vertex_face_ids = []\n\n # Create the final list of vertices for each face, where each vertex\n # uses the average vector from the group it was assigned to.\n for idx, face in enumerate(self.faces):\n i1, i2, i3, fid = face\n fid = new_fid_mapping[fid] / 255.\n for vert_idx in [i1, i2, i3]:\n group = face_verts_to_groups[(i1, i2, i3, vert_idx)]\n avg_normal = group_to_avg_normal[id(group)]\n\n vertices.append(self._parent_vertices[vert_idx])\n normals.append(avg_normal)\n vertex_face_ids.append([fid, fid, fid])\n\n if has_uvs:\n uv1, uv2, uv3 = self.face_uvs[idx]\n uvs.append(uv1)\n uvs.append(uv2)\n uvs.append(uv3)\n\n indices = range(len(vertices))\n\n assert len(normals) == len(vertices)\n\n if len(vertices) == 0:\n self.vertices = None\n self.indices = None\n logger.warning(\"Empty mesh found\")\n return\n\n vtype = [('a_position', np.float32, 3),\n ('a_normal', np.float32, 3),\n ('face_ids', np.float32, 3)]\n if has_uvs:\n vtype.append(('a_texcoord', np.float32, 2))\n\n gl_vertices = np.zeros(len(vertices), vtype)\n gl_vertices['a_position'] = vertices\n gl_vertices['a_normal'] = normals\n gl_vertices['face_ids'] = vertex_face_ids\n\n if has_uvs:\n gl_vertices['a_texcoord'] = uvs\n\n self.vertices = gl_vertices\n self.indices = np.array(indices)\n self.indices = checked_dtype_narrowing(self.indices, np.uint32)\n\n\ndef average_vectors(vectors):\n avg_vec = [0.0, 0.0, 0.0]\n for v1, v2, v3 in vectors:\n avg_vec[0] += v1\n avg_vec[1] += v2\n avg_vec[2] += v3\n\n avg_vec[0] /= float(len(vectors))\n avg_vec[1] /= float(len(vectors))\n avg_vec[2] /= float(len(vectors))\n return avg_vec\n\n\ndef get_face_normal(v1, v2, v3):\n # numpy is slow here\n a0 = v3[0] - v1[0]\n a1 = v3[1] - v1[1]\n a2 = v3[2] - v1[2]\n\n b0 = v2[0] - v1[0]\n b1 = v2[1] - v1[1]\n b2 = v2[2] - v1[2]\n\n n0 = a1 * b2 - a2 * b1\n n1 = a2 * b0 - a0 * b2\n n2 = a0 * b1 - a1 * b0\n\n mag = math.sqrt(n0 ** 2 + n1 ** 2 + n2 ** 2)\n\n if mag == 0:\n return None\n\n return [n0 / mag,\n n1 / mag,\n n2 / mag]\n\n\ndef angle_between(v1, v2):\n \"\"\" Returns the angle in radians between vectors 'v1' and 'v2'::\n\n >>> angle_between((1, 0, 0), (0, 1, 0))\n 1.5707963267948966\n >>> angle_between((1, 0, 0), (1, 0, 0))\n 0.0\n >>> angle_between((1, 0, 0), (-1, 0, 0))\n 3.141592653589793\n \"\"\"\n n1 = v1[0] * v1[0] + v1[1] * v1[1] + v1[2] * v1[2]\n n2 = v2[0] * v2[0] + v2[1] * v2[1] + v2[2] * v2[2]\n if n1 == 0.0 or n2 == 0.0:\n return np.pi / 2.0\n dot = (v1[0] * v2[0] + v1[1] * v2[1] + v1[2] * v2[2]) / math.sqrt(n1 * n2)\n if dot < -1.0:\n angle = np.pi\n elif dot > 1.0:\n angle = 0\n else:\n angle = np.arccos(dot)\n return angle\n\n\nclass MTLFile(object):\n\n def __init__(self, path):\n self.materials = []\n self.path = path\n self.load()\n\n def _match_line(self, line, pattern):\n \"\"\" Match 'pattern' to line, cache result and return match \"\"\"\n self.match_result = pattern.match(line)\n return self.match_result\n\n def load(self):\n\n with open(self.path, 'r') as f:\n\n def float_result(idx):\n s = self.match_result.group(idx)\n return float(s) if s else None\n\n def int_result(idx):\n s = self.match_result.group(idx)\n return float(s) if s else None\n\n for line in f:\n line = line.strip()\n\n # Line comment\n if not line or line[0] == '#':\n continue\n\n # newmtl material_name\n elif self._match_line(line, new_mtl_pattern):\n res = self.match_result\n mtl_name = res.group(1).strip()\n\n mat = Material(name=mtl_name)\n self.materials.append(mat)\n self.current_material = mat\n\n # Kd, Ka, Ks - Diffuse/Ambient/Specular\n elif self._match_line(line, k_pattern):\n k = self.match_result.group(1)\n color = [float_result(2), float_result(3), float_result(4)]\n key = {'Ka': 'ambient', 'Kd': 'diffuse', 'Ks': 'specular'}\n setattr(self.current_material, key[k], color)\n\n elif self._match_line(line, single_pattern):\n name = self.match_result.group(1)\n mat = self.current_material\n\n if name == 'Ns':\n mat.specular_exp = float_result(2)\n elif name == 'Ni':\n # Don't know what this is\n mat.Ni = float_result(2)\n elif name == 'd' or name.lower() == 'tr':\n mat.dissolve = float_result(2)\n elif name == 'illum':\n mat.illum = int_result(2)\n\n elif self._match_line(line, map_pattern):\n\n mat = self.current_material\n map_name = self.match_result.group(1)\n\n # Only map_Kd supported currently\n if map_name == 'map_Kd':\n\n img_match = image_pattern.search(line)\n if img_match:\n image_name = img_match.group(1).strip()\n mat.diffuse_texture = Texture(image_name)\n\n # Optional clamp argument\n clamp = re.search(r'-clamp (on|off)', line)\n if clamp:\n mat.diffuse_texture.clamp = clamp.group(1)\n\n else:\n logger.warning(\"Material discarding '{}'\"\n .format(map_name))\n\n\nclass OBJFile(object):\n\n def __init__(self, path, deformation):\n self.path = path\n self.face_groups = defaultdict(FaceGroup)\n self.fid = 0\n self.deformation = deformation\n\n self.vertices = []\n self.normals = []\n self.uvs = []\n\n self.faces = []\n self.face_idx = 0\n\n self.materials = []\n self.materials_dict = {}\n self.current_material_name = None\n\n self.load()\n\n #######\n def _match_line(self, line, pattern):\n \"\"\" Match 'pattern' to line, cache result and return match \"\"\"\n self.match_result = pattern.match(line)\n return self.match_result\n\n def _handle_vertex(self, result):\n vx = float(result.group(1))\n vy = float(result.group(2))\n vz = float(result.group(3))\n self.vertices.append([vx, vy, vz])\n\n def _handle_vertex_normal(self, result):\n vx = float(result.group(1))\n vy = float(result.group(2))\n vz = float(result.group(3))\n self.normals.append([vx, vy, vz])\n\n def _handle_uv(self, result):\n u = float(result.group(1))\n v = float(result.group(2))\n self.uvs.append([u, v])\n\n def _add_face(self, face, uv_idxs, normal_idxs):\n\n # Triangle face:\n if face[3] is None:\n i1 = face[0] - 1\n i2 = face[1] - 1\n i3 = face[2] - 1\n if normal_idxs:\n n1 = self.normals[normal_idxs[0] - 1]\n n2 = self.normals[normal_idxs[1] - 1]\n n3 = self.normals[normal_idxs[2] - 1]\n else:\n n1 = n2 = n3 = None\n\n if uv_idxs:\n uv1 = self.uvs[uv_idxs[0] - 1]\n uv2 = self.uvs[uv_idxs[1] - 1]\n uv3 = self.uvs[uv_idxs[2] - 1]\n else:\n uv1 = uv2 = uv3 = None\n\n self.faces.append(Face(i1, i2, i3, n1, n2, n3, uv1, uv2, uv3, self.fid))\n\n # Quad face:\n else:\n # Make two triangles\n for idx1, idx2, idx3 in ([0, 1, 3], [1, 2, 3]):\n\n i1 = face[idx1] - 1\n i2 = face[idx2] - 1\n i3 = face[idx3] - 1\n\n if normal_idxs:\n n1 = self.normals[normal_idxs[idx1] - 1]\n n2 = self.normals[normal_idxs[idx2] - 1]\n n3 = self.normals[normal_idxs[idx3] - 1]\n else:\n n1 = n2 = n3 = None\n\n if uv_idxs:\n uv1 = self.uvs[uv_idxs[idx1] - 1]\n uv2 = self.uvs[uv_idxs[idx2] - 1]\n uv3 = self.uvs[uv_idxs[idx3] - 1]\n else:\n uv1 = uv2 = uv3 = None\n\n self.faces.append(\n Face(i1, i2, i3, n1, n2, n3, uv1, uv2, uv3, self.fid))\n\n self.fid += 1\n self.face_idx = len(self.faces)\n\n ## Material related ##\n def _handle_smooth(self, smooth):\n # TODO\n logger.debug(\"Discarding smoothing\")\n\n def _handle_usemtl(self, mat_name):\n self.face_groups[self.face_idx].material_name = mat_name\n self.current_material_name = mat_name\n\n def _parse_mtl_file(self, mtl_file):\n logger.debug(\"Parsing MTL file {}\".format(mtl_file))\n mtl_file = os.path.join(os.path.dirname(self.path), mtl_file)\n self.materials = MTLFile(mtl_file).materials\n self.materials_dict = {mat.name: mat for mat in self.materials}\n logger.debug(\" parsed {} materials \".format(len(self.materials)))\n\n def _handle_new_group(self, group_name, file_obj):\n logger.debug(\" Adding group: {}\".format(group_name))\n self.face_groups[self.face_idx].group_name = group_name\n # New groups inherit current material unless overwritten later\n self.face_groups[self.face_idx].material_name = self.current_material_name\n\n def _start_new_object(self, obj_name=None):\n logger.debug(\" Adding object: {}\".format(obj_name))\n self.face_groups[self.face_idx].object_name = obj_name\n # New objects inherit current material unless overwritten later\n self.face_groups[self.face_idx].material_name = self.current_material_name\n\n #### Load ####\n def load(self):\n\n f = open(self.path, 'r')\n\n def result(idx):\n s = self.match_result.group(idx)\n return int(s) if s else None\n\n for line in f.readlines():\n line = line.strip()\n\n # Line comment\n if not line or line[0] == '#':\n continue\n\n # v float float float\n elif self._match_line(line, vertex_pattern):\n self._handle_vertex(self.match_result)\n\n # vn float float float\n elif self._match_line(line, normal_pattern):\n self._handle_vertex_normal(self.match_result)\n\n # vt float float\n elif self._match_line(line, uv_pattern):\n self._handle_uv(self.match_result)\n\n # f vertex vertex vertex ...\n elif self._match_line(line, face_pattern1):\n\n self._add_face(\n [result(1), result(2), result(3), result(4)], # faces\n None, # uv\n None, # normal\n )\n\n # f vertex/uv vertex/uv vertex/uv ...\n elif self._match_line(line, face_pattern2):\n\n self._add_face(\n [result(2), result(5), result(8), result(11)], # faces\n [result(3), result(6), result(9), result(12)], # uv\n None, # normal\n )\n\n # f vertex/uv/normal vertex/uv/normal vertex/uv/normal ...\n elif self._match_line(line, face_pattern3):\n\n self._add_face(\n [result(2), result(6), result(10), result(14)], # faces\n [result(3), result(7), result(11), result(15)], # uvs\n [result(4), result(8), result(12), result(16)] # normal\n )\n\n # f vertex//normal vertex//normal vertex//normal ...\n elif self._match_line(line, face_pattern4):\n\n self._add_face(\n [result(2), result(5), result(8), result(11)], # faces\n None, # uv\n [result(3), result(6), result(9), result(12)] # normal\n )\n\n # New Object\n elif re.match(r'^o ', line):\n obj_name = line[2:].strip()\n self._start_new_object(obj_name)\n\n # New Group\n elif re.match(r'^g', line):\n self._handle_new_group(line[2:].strip(), f)\n\n # use material\n elif re.match(r'^usemtl ', line):\n mat_name = line[7:].strip()\n self._handle_usemtl(mat_name)\n\n # Material definition\n elif re.match(r'^mtllib ', line):\n mtl_file = line[7:].strip()\n self._parse_mtl_file(mtl_file)\n\n # Smoothing\n elif re.match(r'^s ', line):\n smooth = line[2:].strip()\n self._handle_smooth(smooth)\n\n self._finalize_data()\n\n def _finalize_data(self):\n unique_face_groups = defaultdict(list)\n\n def face_group_hash(mod):\n return str(mod.material_name) + str(mod.has_uvs)\n\n face_idxs = sorted(self.face_groups.keys())\n\n num_faces = len(self.faces)\n for begin_idx, end_idx in zip(face_idxs, face_idxs[1:] + [num_faces, ]):\n if begin_idx >= num_faces:\n continue\n face_group = self.face_groups[begin_idx]\n face_group.face_begin_idx = begin_idx\n face_group.face_end_idx = end_idx\n face_group.has_uvs = (self.faces[begin_idx].uv1 is not None)\n unique_face_groups[face_group_hash(face_group)].append(face_group)\n\n meshes = []\n # Now create meshes for each renderable set of faces\n for group_hash, mergeable_groups in unique_face_groups.items():\n mergeable_groups.sort(key=lambda x: x.face_begin_idx)\n\n material_name = mergeable_groups[0].material_name\n material = self.materials_dict.get(material_name, None)\n mesh = Mesh(material=material, deformation=self.deformation)\n mesh._parent_vertices = self.vertices\n\n for face_group in mergeable_groups:\n for idx in range(face_group.face_begin_idx, face_group.face_end_idx):\n face = self.faces[idx]\n mesh.faces.append((face[0], face[1], face[2], face[-1]))\n if face[3] is not None:\n mesh.face_normals.append(face[3:6])\n if face[6] is not None:\n mesh.face_uvs.append(face[6:9])\n meshes.append(mesh)\n\n self.meshes = meshes\n"
] | [
[
"numpy.min",
"numpy.asarray",
"numpy.arccos",
"numpy.max",
"numpy.iinfo",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WilsonYangLiu/TCGADownload | [
"d3af7e3fecacd0703c4c82ebd889593a5ffb2e41"
] | [
"script/TPM.py"
] | [
"from __future__ import print_function, division\r\n\r\n#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n'''\r\nCopyright (c) 2016 Wei-Xin Liu\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n'''\r\n\r\nimport os, csv\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pandas import DataFrame, Series\r\nfrom itertools import islice\r\nfrom FPKM import calcFPKM\r\n\r\ndef calcTPM(Table, GeneLenDict, from_RAW=True):\r\n\t'''\r\n\tParameters:\r\n\t\tTable: DataFrame. It stores the read count table for each gene in each sample\r\n\t\tGeneLenDict: a Dict stores the length of each gene\r\n\t\t\r\n\tReturns:\r\n\t\ttmpTable: DataFrame. It stores the TPM table for each gene in each sample\r\n\t'''\r\n\tif from_RAW:\r\n\t\tTable = calcFPKM(Table, GeneLenDict)\r\n\t\tif isinstance(Table, DataFrame):\r\n\t\t\tTable = Table.iloc[:-5,:]\r\n\t\telif isinstance(Table, Series):\r\n\t\t\tTable = Table[:-5]\r\n\t\telse:\r\n\t\t\tprint(r'the type of data should be DataFrame or Series')\r\n\t\t\traise Exception\r\n\r\n\tif isinstance(Table, DataFrame):\r\n\t\ttpmTable = DataFrame(Table)\r\n\t\tLib = np.sum(Table, axis=0)\r\n\t\tfor col in Table.columns:\r\n\t\t\tfor gene in GeneLenDict.keys():\r\n\t\t\t\ttpmTable.ix[gene, col] = 10e6 * Table.ix[gene, col] / Lib[col]\r\n\t\t\t\t\r\n\telif isinstance(Table, Series):\r\n\t\ttpmTable = Series(Table)\r\n\t\tLib = np.sum(Table, axis=0)\r\n\t\tfor gene in GeneLenDict.keys():\r\n\t\t\ttpmTable[gene] = 10e6 * Table[gene] / Lib\r\n\t\r\n\telse:\r\n\t\tprint(r'the type of data should be DataFrame or Series')\r\n\t\traise Exception\r\n\t\t\t\r\n\treturn tpmTable\r\n\t\r\nif __name__ == '__main__':\r\n\tos.chdir(r'E:/Project_G/db.TCGA/TCGADownloader/script')\r\n\t\r\n\tTable = pd.read_csv(r'../data/COADREAD_trans_GeneExp_Counts.csv', index_col=0)\r\n\t\r\n\twith open(r'../data/gencode.v22.annotation.used4FPKM.csv', 'rb') as csvfile:\r\n\t\tspamreader = csv.reader(csvfile)\r\n\t\tGeneLenDict = {line[0]:int(line[2]) for line in islice(spamreader, 1, None) }\r\n\t\t\r\n\tTable = calcTPM(Table.iloc[:,0], GeneLenDict)\r\n\t#Table.to_csv(r'../data/COADREAD_trans_GeneExp_Counts2TPM.csv')\r\n\t\r\n\t"
] | [
[
"pandas.Series",
"pandas.read_csv",
"numpy.sum",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dylansnow/vimba_pose_detection | [
"2aeac00cb678f8a7f490f52be3fe5f87573703db"
] | [
"Vimba_5.0/VimbaPython/Source/vimba/frame.py"
] | [
"\"\"\"BSD 2-Clause License\n\nCopyright (c) 2019, Allied Vision Technologies GmbH\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport enum\nimport ctypes\nimport copy\nimport functools\n\nfrom typing import Optional, Tuple\nfrom .c_binding import create_string_buffer, byref, sizeof, decode_flags\nfrom .c_binding import call_vimba_c, call_vimba_image_transform, VmbFrameStatus, VmbFrameFlags, \\\n VmbFrame, VmbHandle, VmbPixelFormat, VmbImage, VmbDebayerMode, \\\n VmbTransformInfo, PIXEL_FORMAT_CONVERTIBILITY_MAP, PIXEL_FORMAT_TO_LAYOUT\nfrom .feature import FeaturesTuple, FeatureTypes, FeatureTypeTypes, discover_features\nfrom .shared import filter_features_by_name, filter_features_by_type, filter_features_by_category, \\\n attach_feature_accessors, remove_feature_accessors\nfrom .util import TraceEnable, RuntimeTypeCheckEnable, EnterContextOnCall, LeaveContextOnCall, \\\n RaiseIfOutsideContext\nfrom .error import VimbaFrameError, VimbaFeatureError\n\ntry:\n import numpy # type: ignore\n\nexcept ModuleNotFoundError:\n numpy = None\n\n\n__all__ = [\n 'PixelFormat',\n 'MONO_PIXEL_FORMATS',\n 'BAYER_PIXEL_FORMATS',\n 'RGB_PIXEL_FORMATS',\n 'RGBA_PIXEL_FORMATS',\n 'BGR_PIXEL_FORMATS',\n 'BGRA_PIXEL_FORMATS',\n 'YUV_PIXEL_FORMATS',\n 'YCBCR_PIXEL_FORMATS',\n 'COLOR_PIXEL_FORMATS',\n 'OPENCV_PIXEL_FORMATS',\n 'FrameStatus',\n 'Debayer',\n 'Frame',\n 'FrameTuple',\n 'FormatTuple',\n 'intersect_pixel_formats'\n]\n\n\n# Forward declarations\nFrameTuple = Tuple['Frame', ...]\nFormatTuple = Tuple['PixelFormat', ...]\n\n\nclass PixelFormat(enum.IntEnum):\n \"\"\"Enum specifying all PixelFormats. Note: Not all Cameras support all Pixelformats.\n\n Mono formats:\n Mono8 - Monochrome, 8 bits (PFNC:Mono8)\n Mono10 - Monochrome, 10 bits in 16 bits (PFNC:Mono10)\n Mono10p - Monochrome, 4x10 bits continuously packed in 40 bits\n (PFNC:Mono10p)\n Mono12 - Monochrome, 12 bits in 16 bits (PFNC:Mono12)\n Mono12Packed - Monochrome, 2x12 bits in 24 bits (GEV:Mono12Packed)\n Mono12p - Monochrome, 2x12 bits continuously packed in 24 bits\n (PFNC:Mono12p)\n Mono14 - Monochrome, 14 bits in 16 bits (PFNC:Mono14)\n Mono16 - Monochrome, 16 bits (PFNC:Mono16)\n\n Bayer formats:\n BayerGR8 - Bayer-color, 8 bits, starting with GR line\n (PFNC:BayerGR8)\n BayerRG8 - Bayer-color, 8 bits, starting with RG line\n (PFNC:BayerRG8)\n BayerGB8 - Bayer-color, 8 bits, starting with GB line\n (PFNC:BayerGB8)\n BayerBG8 - Bayer-color, 8 bits, starting with BG line\n (PFNC:BayerBG8)\n BayerGR10 - Bayer-color, 10 bits in 16 bits, starting with GR\n line (PFNC:BayerGR10)\n BayerRG10 - Bayer-color, 10 bits in 16 bits, starting with RG\n line (PFNC:BayerRG10)\n BayerGB10 - Bayer-color, 10 bits in 16 bits, starting with GB\n line (PFNC:BayerGB10)\n BayerBG10 - Bayer-color, 10 bits in 16 bits, starting with BG\n line (PFNC:BayerBG10)\n BayerGR12 - Bayer-color, 12 bits in 16 bits, starting with GR\n line (PFNC:BayerGR12)\n BayerRG12 - Bayer-color, 12 bits in 16 bits, starting with RG\n line (PFNC:BayerRG12)\n BayerGB12 - Bayer-color, 12 bits in 16 bits, starting with GB\n line (PFNC:BayerGB12)\n BayerBG12 - Bayer-color, 12 bits in 16 bits, starting with BG\n line (PFNC:BayerBG12)\n BayerGR12Packed - Bayer-color, 2x12 bits in 24 bits, starting with GR\n line (GEV:BayerGR12Packed)\n BayerRG12Packed - Bayer-color, 2x12 bits in 24 bits, starting with RG\n line (GEV:BayerRG12Packed)\n BayerGB12Packed - Bayer-color, 2x12 bits in 24 bits, starting with GB\n line (GEV:BayerGB12Packed)\n BayerBG12Packed - Bayer-color, 2x12 bits in 24 bits, starting with BG\n line (GEV:BayerBG12Packed)\n BayerGR10p - Bayer-color, 4x10 bits continuously packed in 40\n bits, starting with GR line (PFNC:BayerGR10p)\n BayerRG10p - Bayer-color, 4x10 bits continuously packed in 40\n bits, starting with RG line (PFNC:BayerRG10p)\n BayerGB10p - Bayer-color, 4x10 bits continuously packed in 40\n bits, starting with GB line (PFNC:BayerGB10p)\n BayerBG10p - Bayer-color, 4x10 bits continuously packed in 40\n bits, starting with BG line (PFNC:BayerBG10p)\n BayerGR12p - Bayer-color, 2x12 bits continuously packed in 24\n bits, starting with GR line (PFNC:BayerGR12p)\n BayerRG12p - Bayer-color, 2x12 bits continuously packed in 24\n bits, starting with RG line (PFNC:BayerRG12p)\n BayerGB12p - Bayer-color, 2x12 bits continuously packed in 24\n bits, starting with GB line (PFNC:BayerGB12p)\n BayerBG12p - Bayer-color, 2x12 bits continuously packed in 24\n bits, starting with BG line (PFNC:BayerBG12p)\n BayerGR16 - Bayer-color, 16 bits, starting with GR line\n (PFNC:BayerGR16)\n BayerRG16 - Bayer-color, 16 bits, starting with RG line\n (PFNC:BayerRG16)\n BayerGB16 - Bayer-color, 16 bits, starting with GB line\n (PFNC:BayerGB16)\n BayerBG16 - Bayer-color, 16 bits, starting with BG line\n (PFNC:BayerBG16)\n\n RGB formats:\n Rgb8 - RGB, 8 bits x 3 (PFNC:RGB8)\n Bgr8 - BGR, 8 bits x 3 (PFNC:Bgr8)\n Rgb10 - RGB, 10 bits in 16 bits x 3 (PFNC:RGB10)\n Bgr10 - BGR, 10 bits in 16 bits x 3 (PFNC:BGR10)\n Rgb12 - RGB, 12 bits in 16 bits x 3 (PFNC:RGB12)\n Bgr12 - BGR, 12 bits in 16 bits x 3 (PFNC:BGR12)\n Rgb14 - RGB, 14 bits in 16 bits x 3 (PFNC:RGB14)\n Bgr14 - BGR, 14 bits in 16 bits x 3 (PFNC:BGR14)\n Rgb16 - RGB, 16 bits x 3 (PFNC:RGB16)\n Bgr16 - BGR, 16 bits x 3 (PFNC:BGR16)\n\n RGBA formats:\n Argb8 - ARGB, 8 bits x 4 (PFNC:RGBa8)\n Rgba8 - RGBA, 8 bits x 4, legacy name\n Bgra8 - BGRA, 8 bits x 4 (PFNC:BGRa8)\n Rgba10 - RGBA, 10 bits in 16 bits x 4\n Bgra10 - BGRA, 10 bits in 16 bits x 4\n Rgba12 - RGBA, 12 bits in 16 bits x 4\n Bgra12 - BGRA, 12 bits in 16 bits x 4\n Rgba14 - RGBA, 14 bits in 16 bits x 4\n Bgra14 - BGRA, 14 bits in 16 bits x 4\n Rgba16 - RGBA, 16 bits x 4\n Bgra16 - BGRA, 16 bits x 4\n\n YUV/YCbCr formats:\n Yuv411 - YUV 411 with 8 bits (GEV:YUV411Packed)\n Yuv422 - YUV 422 with 8 bits (GEV:YUV422Packed)\n Yuv444 - YUV 444 with 8 bits (GEV:YUV444Packed)\n YCbCr411_8_CbYYCrYY - Y´CbCr 411 with 8 bits\n (PFNC:YCbCr411_8_CbYYCrYY) - identical to Yuv411\n YCbCr422_8_CbYCrY - Y´CbCr 422 with 8 bits\n (PFNC:YCbCr422_8_CbYCrY) - identical to Yuv422\n YCbCr8_CbYCr - Y´CbCr 444 with 8 bits\n (PFNC:YCbCr8_CbYCr) - identical to Yuv444\n \"\"\"\n # Mono Formats\n Mono8 = VmbPixelFormat.Mono8\n Mono10 = VmbPixelFormat.Mono10\n Mono10p = VmbPixelFormat.Mono10p\n Mono12 = VmbPixelFormat.Mono12\n Mono12Packed = VmbPixelFormat.Mono12Packed\n Mono12p = VmbPixelFormat.Mono12p\n Mono14 = VmbPixelFormat.Mono14\n Mono16 = VmbPixelFormat.Mono16\n\n # Bayer Formats\n BayerGR8 = VmbPixelFormat.BayerGR8\n BayerRG8 = VmbPixelFormat.BayerRG8\n BayerGB8 = VmbPixelFormat.BayerGB8\n BayerBG8 = VmbPixelFormat.BayerBG8\n BayerGR10 = VmbPixelFormat.BayerGR10\n BayerRG10 = VmbPixelFormat.BayerRG10\n BayerGB10 = VmbPixelFormat.BayerGB10\n BayerBG10 = VmbPixelFormat.BayerBG10\n BayerGR12 = VmbPixelFormat.BayerGR12\n BayerRG12 = VmbPixelFormat.BayerRG12\n BayerGB12 = VmbPixelFormat.BayerGB12\n BayerBG12 = VmbPixelFormat.BayerBG12\n BayerGR12Packed = VmbPixelFormat.BayerGR12Packed\n BayerRG12Packed = VmbPixelFormat.BayerRG12Packed\n BayerGB12Packed = VmbPixelFormat.BayerGB12Packed\n BayerBG12Packed = VmbPixelFormat.BayerBG12Packed\n BayerGR10p = VmbPixelFormat.BayerGR10p\n BayerRG10p = VmbPixelFormat.BayerRG10p\n BayerGB10p = VmbPixelFormat.BayerGB10p\n BayerBG10p = VmbPixelFormat.BayerBG10p\n BayerGR12p = VmbPixelFormat.BayerGR12p\n BayerRG12p = VmbPixelFormat.BayerRG12p\n BayerGB12p = VmbPixelFormat.BayerGB12p\n BayerBG12p = VmbPixelFormat.BayerBG12p\n BayerGR16 = VmbPixelFormat.BayerGR16\n BayerRG16 = VmbPixelFormat.BayerRG16\n BayerGB16 = VmbPixelFormat.BayerGB16\n BayerBG16 = VmbPixelFormat.BayerBG16\n\n # RGB Formats\n Rgb8 = VmbPixelFormat.Rgb8\n Bgr8 = VmbPixelFormat.Bgr8\n Rgb10 = VmbPixelFormat.Rgb10\n Bgr10 = VmbPixelFormat.Bgr10\n Rgb12 = VmbPixelFormat.Rgb12\n Bgr12 = VmbPixelFormat.Bgr12\n Rgb14 = VmbPixelFormat.Rgb14\n Bgr14 = VmbPixelFormat.Bgr14\n Rgb16 = VmbPixelFormat.Rgb16\n Bgr16 = VmbPixelFormat.Bgr16\n\n # RGBA Formats\n Rgba8 = VmbPixelFormat.Rgba8\n Bgra8 = VmbPixelFormat.Bgra8\n Argb8 = VmbPixelFormat.Argb8\n Rgba10 = VmbPixelFormat.Rgba10\n Bgra10 = VmbPixelFormat.Bgra10\n Rgba12 = VmbPixelFormat.Rgba12\n Bgra12 = VmbPixelFormat.Bgra12\n Rgba14 = VmbPixelFormat.Rgba14\n Bgra14 = VmbPixelFormat.Bgra14\n Rgba16 = VmbPixelFormat.Rgba16\n Bgra16 = VmbPixelFormat.Bgra16\n Yuv411 = VmbPixelFormat.Yuv411\n Yuv422 = VmbPixelFormat.Yuv422\n Yuv444 = VmbPixelFormat.Yuv444\n\n # YCbCr Formats\n YCbCr411_8_CbYYCrYY = VmbPixelFormat.YCbCr411_8_CbYYCrYY\n YCbCr422_8_CbYCrY = VmbPixelFormat.YCbCr422_8_CbYCrY\n YCbCr8_CbYCr = VmbPixelFormat.YCbCr8_CbYCr\n\n def __str__(self):\n return self._name_\n\n def __repr__(self):\n return 'PixelFormat.{}'.format(str(self))\n\n def get_convertible_formats(self) -> Tuple['PixelFormat', ...]:\n formats = PIXEL_FORMAT_CONVERTIBILITY_MAP[VmbPixelFormat(self)]\n return tuple([PixelFormat(fmt) for fmt in formats])\n\n\nMONO_PIXEL_FORMATS = (\n PixelFormat.Mono8,\n PixelFormat.Mono10,\n PixelFormat.Mono10p,\n PixelFormat.Mono12,\n PixelFormat.Mono12Packed,\n PixelFormat.Mono12p,\n PixelFormat.Mono14,\n PixelFormat.Mono16\n)\n\n\nBAYER_PIXEL_FORMATS = (\n PixelFormat.BayerGR8,\n PixelFormat.BayerRG8,\n PixelFormat.BayerGB8,\n PixelFormat.BayerBG8,\n PixelFormat.BayerGR10,\n PixelFormat.BayerRG10,\n PixelFormat.BayerGB10,\n PixelFormat.BayerBG10,\n PixelFormat.BayerGR12,\n PixelFormat.BayerRG12,\n PixelFormat.BayerGB12,\n PixelFormat.BayerBG12,\n PixelFormat.BayerGR12Packed,\n PixelFormat.BayerRG12Packed,\n PixelFormat.BayerGB12Packed,\n PixelFormat.BayerBG12Packed,\n PixelFormat.BayerGR10p,\n PixelFormat.BayerRG10p,\n PixelFormat.BayerGB10p,\n PixelFormat.BayerBG10p,\n PixelFormat.BayerGR12p,\n PixelFormat.BayerRG12p,\n PixelFormat.BayerGB12p,\n PixelFormat.BayerBG12p,\n PixelFormat.BayerGR16,\n PixelFormat.BayerRG16,\n PixelFormat.BayerGB16,\n PixelFormat.BayerBG16\n)\n\n\nRGB_PIXEL_FORMATS = (\n PixelFormat.Rgb8,\n PixelFormat.Rgb10,\n PixelFormat.Rgb12,\n PixelFormat.Rgb14,\n PixelFormat.Rgb16\n)\n\n\nRGBA_PIXEL_FORMATS = (\n PixelFormat.Rgba8,\n PixelFormat.Argb8,\n PixelFormat.Rgba10,\n PixelFormat.Rgba12,\n PixelFormat.Rgba14,\n PixelFormat.Rgba16\n)\n\n\nBGR_PIXEL_FORMATS = (\n PixelFormat.Bgr8,\n PixelFormat.Bgr10,\n PixelFormat.Bgr12,\n PixelFormat.Bgr14,\n PixelFormat.Bgr16\n)\n\n\nBGRA_PIXEL_FORMATS = (\n PixelFormat.Bgra8,\n PixelFormat.Bgra10,\n PixelFormat.Bgra12,\n PixelFormat.Bgra14,\n PixelFormat.Bgra16\n)\n\n\nYUV_PIXEL_FORMATS = (\n PixelFormat.Yuv411,\n PixelFormat.Yuv422,\n PixelFormat.Yuv444\n)\n\n\nYCBCR_PIXEL_FORMATS = (\n PixelFormat.YCbCr411_8_CbYYCrYY,\n PixelFormat.YCbCr422_8_CbYCrY,\n PixelFormat.YCbCr8_CbYCr\n)\n\n\nCOLOR_PIXEL_FORMATS = BAYER_PIXEL_FORMATS + RGB_PIXEL_FORMATS + RGBA_PIXEL_FORMATS + \\\n BGR_PIXEL_FORMATS + BGRA_PIXEL_FORMATS + YUV_PIXEL_FORMATS + \\\n YCBCR_PIXEL_FORMATS\n\n\nOPENCV_PIXEL_FORMATS = (\n PixelFormat.Mono8,\n PixelFormat.Bgr8,\n PixelFormat.Bgra8,\n PixelFormat.Mono16,\n PixelFormat.Bgr16,\n PixelFormat.Bgra16\n)\n\n\nclass Debayer(enum.IntEnum):\n \"\"\"Enum specifying debayer modes.\n\n Enum values:\n Mode2x2 - 2x2 with green averaging (this is the default if no debayering algorithm\n is added as transformation option).\n Mode3x3 - 3x3 with equal green weighting per line (8-bit images only).\n ModeLCAA - Debayering with horizontal local color anti-aliasing (8-bit images only).\n ModeLCAAV - Debayering with horizontal and vertical local color anti-aliasing\n ( 8-bit images only).\n ModeYuv422 - Debayering with YUV422-alike sub-sampling (8-bit images only).\n \"\"\"\n Mode2x2 = VmbDebayerMode.Mode_2x2\n Mode3x3 = VmbDebayerMode.Mode_3x3\n ModeLCAA = VmbDebayerMode.Mode_LCAA\n ModeLCAAV = VmbDebayerMode.Mode_LCAAV\n ModeYuv422 = VmbDebayerMode.Mode_YUV422\n\n def __str__(self):\n return 'DebayerMode.{}'.format(self._name_)\n\n def __repr__(self):\n return str(self)\n\n\nclass FrameStatus(enum.IntEnum):\n \"\"\"Enum specifying the current status of internal Frame data.\n\n Enum values:\n Complete - Frame data is complete without errors.\n Incomplete - Frame could not be filled to the end.\n TooSmall - Frame buffer was too small.\n Invalid - Frame buffer was invalid.\n \"\"\"\n\n Complete = VmbFrameStatus.Complete\n Incomplete = VmbFrameStatus.Incomplete\n TooSmall = VmbFrameStatus.TooSmall\n Invalid = VmbFrameStatus.Invalid\n\n\nclass AncillaryData:\n \"\"\"Ancillary Data are created after enabling a Cameras 'ChunkModeActive' Feature.\n Ancillary Data are Features stored within a Frame.\n \"\"\"\n @TraceEnable()\n @LeaveContextOnCall()\n def __init__(self, handle: VmbFrame):\n \"\"\"Do not call directly. Get Object via Frame access method\"\"\"\n self.__handle: VmbFrame = handle\n self.__data_handle: VmbHandle = VmbHandle()\n self.__feats: FeaturesTuple = ()\n self.__context_cnt: int = 0\n\n @TraceEnable()\n def __enter__(self):\n if not self.__context_cnt:\n self._open()\n\n self.__context_cnt += 1\n return self\n\n @TraceEnable()\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self.__context_cnt -= 1\n\n if not self.__context_cnt:\n self._close()\n\n @RaiseIfOutsideContext()\n def get_all_features(self) -> FeaturesTuple:\n \"\"\"Get all features in ancillary data.\n\n Returns:\n A set of all currently features stored in Ancillary Data.\n\n Raises:\n RuntimeError then called outside of \"with\" - statement.\n \"\"\"\n return self.__feats\n\n @RaiseIfOutsideContext()\n @RuntimeTypeCheckEnable()\n def get_features_by_type(self, feat_type: FeatureTypeTypes) -> FeaturesTuple:\n \"\"\"Get all features in ancillary data of a specific type.\n\n Valid FeatureTypes are: IntFeature, FloatFeature, StringFeature, BoolFeature,\n EnumFeature, CommandFeature, RawFeature\n\n Arguments:\n feat_type - FeatureType used find features of that type.\n\n Returns:\n A all features of type 'feat_type'.\n\n Raises:\n RuntimeError then called outside of \"with\" - statement.\n TypeError if parameters do not match their type hint.\n \"\"\"\n return filter_features_by_type(self.__feats, feat_type)\n\n @RaiseIfOutsideContext()\n @RuntimeTypeCheckEnable()\n def get_features_by_category(self, category: str) -> FeaturesTuple:\n \"\"\"Get all features in ancillary data of a specific category.\n\n Arguments:\n category - Category that should be used for filtering.\n\n Returns:\n A all features of category 'category'.\n\n Raises:\n RuntimeError then called outside of \"with\" - statement.\n TypeError if parameters do not match their type hint.\n \"\"\"\n return filter_features_by_category(self.__feats, category)\n\n @RaiseIfOutsideContext()\n @RuntimeTypeCheckEnable()\n def get_feature_by_name(self, feat_name: str) -> FeatureTypes:\n \"\"\"Get a features in ancillary data by its name.\n\n Arguments:\n feat_name - Name used to find a feature.\n\n Returns:\n Feature with the associated name.\n\n Raises:\n RuntimeError then called outside of \"with\" - statement.\n TypeError if parameters do not match their type hint.\n VimbaFeatureError if no feature is associated with 'feat_name'.\n \"\"\"\n feat = filter_features_by_name(self.__feats, feat_name)\n\n if not feat:\n raise VimbaFeatureError('Feature \\'{}\\' not found.'.format(feat_name))\n\n return feat\n\n @TraceEnable()\n @EnterContextOnCall()\n def _open(self):\n call_vimba_c('VmbAncillaryDataOpen', byref(self.__handle), byref(self.__data_handle))\n\n self.__feats = _replace_invalid_feature_calls(discover_features(self.__data_handle))\n attach_feature_accessors(self, self.__feats)\n\n @TraceEnable()\n @LeaveContextOnCall()\n def _close(self):\n remove_feature_accessors(self, self.__feats)\n self.__feats = ()\n\n call_vimba_c('VmbAncillaryDataClose', self.__data_handle)\n self.__data_handle = VmbHandle()\n\n\ndef _replace_invalid_feature_calls(feats: FeaturesTuple) -> FeaturesTuple:\n # AncillaryData are basically \"lightweight\" features. Calling most feature related\n # Functions with a AncillaryData - Handle leads to VimbaC Errors. This method decorates\n # all Methods that are unsafe to call with a decorator raising a RuntimeError.\n to_wrap = [\n 'get_access_mode',\n 'is_readable',\n 'is_writeable',\n 'register_change_handler',\n 'get_increment',\n 'get_range',\n 'set'\n ]\n\n # Decorator raising a RuntimeError instead of delegating call to inner function.\n def invalid_call(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n msg = 'Calling \\'{}\\' is invalid for AncillaryData Features.'\n raise RuntimeError(msg.format(func.__name__))\n\n return wrapper\n\n # Replace original implementation by injecting a surrounding decorator and\n # binding the resulting function as a method to the Feature instance.\n for f, a in [(f, a) for f in feats for a in to_wrap]:\n try:\n fn = invalid_call(getattr(f, a))\n setattr(f, a, fn.__get__(f))\n\n except AttributeError:\n pass\n\n return feats\n\n\nclass Frame:\n \"\"\"This class allows access to Frames acquired by a camera. The Frame is basically\n a buffer that wraps image data and some metadata.\n \"\"\"\n def __init__(self, buffer_size: int):\n \"\"\"Do not call directly. Create Frames via Camera methods instead.\"\"\"\n self._buffer = create_string_buffer(buffer_size)\n self._frame: VmbFrame = VmbFrame()\n\n # Setup underlaying Frame\n self._frame.buffer = ctypes.cast(self._buffer, ctypes.c_void_p)\n self._frame.bufferSize = sizeof(self._buffer)\n\n def __str__(self):\n msg = 'Frame(id={}, status={}, buffer={})'\n return msg.format(self._frame.frameID, str(FrameStatus(self._frame.receiveStatus)),\n hex(self._frame.buffer))\n\n def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n\n # VmbFrame contains Pointers and ctypes.Structure with Pointers can't be copied.\n # As a workaround VmbFrame contains a deepcopy-like Method performing deep copy of all\n # Attributes except PointerTypes. Those must be set manually after the copy operation.\n setattr(result, '_buffer', copy.deepcopy(self._buffer, memo))\n setattr(result, '_frame', self._frame.deepcopy_skip_ptr(memo))\n\n result._frame.buffer = ctypes.cast(result._buffer, ctypes.c_void_p)\n result._frame.bufferSize = sizeof(result._buffer)\n\n return result\n\n def get_buffer(self) -> ctypes.Array:\n \"\"\"Get internal buffer object containing image data.\"\"\"\n return self._buffer\n\n def get_buffer_size(self) -> int:\n \"\"\"Get byte size of internal buffer.\"\"\"\n return self._frame.bufferSize\n\n def get_image_size(self) -> int:\n \"\"\"Get byte size of image data stored in buffer.\"\"\"\n return self._frame.imageSize\n\n def get_ancillary_data(self) -> Optional[AncillaryData]:\n \"\"\"Get AncillaryData.\n\n Frames acquired with cameras where Feature ChunkModeActive is enabled can contain\n ancillary data within the image data.\n\n Returns:\n None if Frame contains no ancillary data.\n AncillaryData if Frame contains ancillary data.\n \"\"\"\n if not self._frame.ancillarySize:\n return None\n\n return AncillaryData(self._frame)\n\n def get_status(self) -> FrameStatus:\n \"\"\"Returns current frame status.\"\"\"\n return FrameStatus(self._frame.receiveStatus)\n\n def get_pixel_format(self) -> PixelFormat:\n \"\"\"Get format of the acquired image data\"\"\"\n return PixelFormat(self._frame.pixelFormat)\n\n def get_height(self) -> Optional[int]:\n \"\"\"Get image height in pixels.\n\n Returns:\n Image height in pixels if dimension data is provided by the camera.\n None if dimension data is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.Dimension not in flags:\n return None\n\n return self._frame.height\n\n def get_width(self) -> Optional[int]:\n \"\"\"Get image width in pixels.\n\n Returns:\n Image width in pixels if dimension data is provided by the camera.\n None if dimension data is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.Dimension not in flags:\n return None\n\n return self._frame.width\n\n def get_offset_x(self) -> Optional[int]:\n \"\"\"Get horizontal offset in pixels.\n\n Returns:\n Horizontal offset in pixel if offset data is provided by the camera.\n None if offset data is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.Offset not in flags:\n return None\n\n return self._frame.offsetX\n\n def get_offset_y(self) -> Optional[int]:\n \"\"\"Get vertical offset in pixels.\n\n Returns:\n Vertical offset in pixels if offset data is provided by the camera.\n None if offset data is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.Offset not in flags:\n return None\n\n return self._frame.offsetY\n\n def get_id(self) -> Optional[int]:\n \"\"\"Get Frame ID.\n\n Returns:\n Frame ID if the id is provided by the camera.\n None if frame id is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.FrameID not in flags:\n return None\n\n return self._frame.frameID\n\n def get_timestamp(self) -> Optional[int]:\n \"\"\"Get Frame timestamp.\n\n Returns:\n Timestamp if provided by the camera.\n None if timestamp is not provided by the camera.\n \"\"\"\n flags = decode_flags(VmbFrameFlags, self._frame.receiveFlags)\n\n if VmbFrameFlags.Timestamp not in flags:\n return None\n\n return self._frame.timestamp\n\n @RuntimeTypeCheckEnable()\n def convert_pixel_format(self, target_fmt: PixelFormat,\n debayer_mode: Optional[Debayer] = None):\n \"\"\"Convert internal pixel format to given format.\n\n Note: This method allocates a new buffer for internal image data leading to some\n runtime overhead. For performance reasons, it might be better to set the value\n of the camera's 'PixelFormat' feature instead. In addition, a non-default debayer mode\n can be specified.\n\n Arguments:\n target_fmt - PixelFormat to convert to.\n debayer_mode - Non-default algorithm used to debayer images in Bayer Formats. If\n no mode is specified, default debayering mode 'Mode2x2' is applied. If\n the current format is no Bayer format, this parameter is silently\n ignored.\n\n Raises:\n TypeError if parameters do not match their type hint.\n ValueError if the current format can't be converted into 'target_fmt'. Convertible\n Formats can be queried via get_convertible_formats() of PixelFormat.\n AssertionError if image width or height can't be determined.\n \"\"\"\n\n global BAYER_PIXEL_FORMATS\n\n # 1) Perform sanity checking\n fmt = self.get_pixel_format()\n\n if fmt == target_fmt:\n return\n\n if target_fmt not in fmt.get_convertible_formats():\n raise ValueError('Current PixelFormat can\\'t be converted into given format.')\n\n # 2) Specify Transformation Input Image\n height = self._frame.height\n width = self._frame.width\n\n c_src_image = VmbImage()\n c_src_image.Size = sizeof(c_src_image)\n c_src_image.Data = ctypes.cast(self._buffer, ctypes.c_void_p)\n\n call_vimba_image_transform('VmbSetImageInfoFromPixelFormat', fmt, width, height,\n byref(c_src_image))\n\n # 3) Specify Transformation Output Image\n c_dst_image = VmbImage()\n c_dst_image.Size = sizeof(c_dst_image)\n\n layout, bits = PIXEL_FORMAT_TO_LAYOUT[VmbPixelFormat(target_fmt)]\n\n call_vimba_image_transform('VmbSetImageInfoFromInputImage', byref(c_src_image), layout,\n bits, byref(c_dst_image))\n\n # 4) Allocate Buffer and perform transformation\n img_size = int(height * width * c_dst_image.ImageInfo.PixelInfo.BitsPerPixel / 8)\n anc_size = self._frame.ancillarySize\n\n buf = create_string_buffer(img_size + anc_size)\n c_dst_image.Data = ctypes.cast(buf, ctypes.c_void_p)\n\n # 5) Setup Debayering mode if given.\n transform_info = VmbTransformInfo()\n if debayer_mode and (fmt in BAYER_PIXEL_FORMATS):\n call_vimba_image_transform('VmbSetDebayerMode', VmbDebayerMode(debayer_mode),\n byref(transform_info))\n\n # 6) Perform Transformation\n call_vimba_image_transform('VmbImageTransform', byref(c_src_image), byref(c_dst_image),\n byref(transform_info), 1)\n\n # 7) Copy ancillary data if existing\n if anc_size:\n src = ctypes.addressof(self._buffer) + self._frame.imageSize\n dst = ctypes.addressof(buf) + img_size\n\n ctypes.memmove(dst, src, anc_size)\n\n # 8) Update frame metadata\n self._buffer = buf\n self._frame.buffer = ctypes.cast(self._buffer, ctypes.c_void_p)\n self._frame.bufferSize = sizeof(self._buffer)\n self._frame.imageSize = img_size\n self._frame.pixelFormat = target_fmt\n\n def as_numpy_ndarray(self) -> 'numpy.ndarray':\n \"\"\"Construct numpy.ndarray view on VimbaFrame.\n\n Returns:\n numpy.ndarray on internal image buffer.\n\n Raises:\n ImportError if numpy is not installed.\n VimbaFrameError if current PixelFormat can't be converted to a numpy.ndarray.\n \"\"\"\n if numpy is None:\n raise ImportError('\\'Frame.as_opencv_image()\\' requires module \\'numpy\\'.')\n\n # Construct numpy overlay on underlaying image buffer\n height = self._frame.height\n width = self._frame.width\n fmt = self._frame.pixelFormat\n\n c_image = VmbImage()\n c_image.Size = sizeof(c_image)\n\n call_vimba_image_transform('VmbSetImageInfoFromPixelFormat', fmt, width, height,\n byref(c_image))\n\n layout = PIXEL_FORMAT_TO_LAYOUT.get(fmt)\n\n if not layout:\n msg = 'Can\\'t construct numpy.ndarray for Pixelformat {}. ' \\\n 'Use \\'frame.convert_pixel_format()\\' to convert to a different Pixelformat.'\n raise VimbaFrameError(msg.format(str(self.get_pixel_format())))\n\n bits_per_channel = layout[1]\n channels_per_pixel = c_image.ImageInfo.PixelInfo.BitsPerPixel // bits_per_channel\n\n return numpy.ndarray(shape=(height, width, channels_per_pixel), buffer=self._buffer,\n dtype=numpy.uint8 if bits_per_channel == 8 else numpy.uint16)\n\n def as_opencv_image(self) -> 'numpy.ndarray':\n \"\"\"Construct OpenCV compatible view on VimbaFrame.\n\n Returns:\n OpenCV compatible numpy.ndarray\n\n Raises:\n ImportError if numpy is not installed.\n ValueError if current pixel format is not compatible with opencv. Compatible\n formats are in OPENCV_PIXEL_FORMATS.\n \"\"\"\n global OPENCV_PIXEL_FORMATS\n\n if numpy is None:\n raise ImportError('\\'Frame.as_opencv_image()\\' requires module \\'numpy\\'.')\n\n fmt = self._frame.pixelFormat\n\n if fmt not in OPENCV_PIXEL_FORMATS:\n raise ValueError('Current Format \\'{}\\' is not in OPENCV_PIXEL_FORMATS'.format(\n str(PixelFormat(self._frame.pixelFormat))))\n\n return self.as_numpy_ndarray()\n\n\n@TraceEnable()\n@RuntimeTypeCheckEnable()\ndef intersect_pixel_formats(fmts1: FormatTuple, fmts2: FormatTuple) -> FormatTuple:\n \"\"\"Build intersection of two sets containing PixelFormat.\n\n Arguments:\n fmts1 - PixelFormats to intersect with fmts2\n fmts2 - PixelFormats to intersect with fmts1\n\n Returns:\n Set of PixelFormats that occur in fmts1 and fmts2\n\n Raises:\n TypeError if parameters do not match their type hint.\n \"\"\"\n return tuple(set(fmts1).intersection(set(fmts2)))\n"
] | [
[
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
congvmit/mipkit | [
"d65a5083852dcfc5db766175aa402a5e3a506f21"
] | [
"examples/test_debugger.py"
] | [
"\"\"\"\n The MIT License (MIT)\n Copyright (c) 2021 Cong Vo\n \n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n \n The above copyright notice and this permission notice shall be included in\n all copies or substantial portions of the Software.\n \n Provided license texts might have their own copyrights and restrictions\n \n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n THE SOFTWARE.\n\"\"\"\n\nimport numpy as np\nimport torch\nfrom mipkit import set_trace\nx = torch.ones([1, 2, 3, 4])\nx_arr = np.ones([1, 2, 3])\nset_trace()\n\n\ndef main():\n x = torch.ones([1, 2, 3, 4])\n x_arr = np.ones([1, 2, 3])\n import mipkit;mipkit.set_trace();exit()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.ones",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sizhky/detr | [
"54f18a0b3a3be69be4c451567ea730c731c7ad48"
] | [
"main.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport argparse\nimport datetime\nimport json\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, DistributedSampler\n\nimport datasets\nimport util.misc as utils\nfrom datasets import build_dataset, get_coco_api_from_dataset\nfrom engine import evaluate, train_one_epoch\nfrom models import build_model\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Set transformer detector', add_help=False)\n parser.add_argument('--lr', default=1e-4, type=float)\n parser.add_argument('--lr_backbone', default=1e-5, type=float)\n parser.add_argument('--batch_size', default=2, type=int)\n parser.add_argument('--weight_decay', default=1e-4, type=float)\n parser.add_argument('--epochs', default=300, type=int)\n parser.add_argument('--lr_drop', default=200, type=int)\n parser.add_argument('--clip_max_norm', default=0.1, type=float,\n help='gradient clipping max norm')\n parser.add_argument('--nclasses', default=2, type=int,\n help='number of classes to train (excluding background)')\n # Model parameters\n parser.add_argument('--frozen_weights', type=str, default=None,\n help=\"Path to the pretrained model. If set, only the mask head will be trained\")\n # * Backbone\n parser.add_argument('--backbone', default='resnet50', type=str,\n help=\"Name of the convolutional backbone to use\")\n parser.add_argument('--dilation', action='store_true',\n help=\"If true, we replace stride with dilation in the last convolutional block (DC5)\")\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n\n # * Transformer\n parser.add_argument('--enc_layers', default=6, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=6, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=2048, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=100, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--pre_norm', action='store_true')\n\n # * Segmentation\n parser.add_argument('--masks', action='store_true',\n help=\"Train segmentation head if the flag is provided\")\n\n # Loss\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n # * Matcher\n parser.add_argument('--set_cost_class', default=1, type=float,\n help=\"Class coefficient in the matching cost\")\n parser.add_argument('--set_cost_bbox', default=5, type=float,\n help=\"L1 box coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=2, type=float,\n help=\"giou box coefficient in the matching cost\")\n # * Loss coefficients\n parser.add_argument('--mask_loss_coef', default=1, type=float)\n parser.add_argument('--dice_loss_coef', default=1, type=float)\n parser.add_argument('--bbox_loss_coef', default=5, type=float)\n parser.add_argument('--giou_loss_coef', default=2, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n\n # dataset parameters\n parser.add_argument('--dataset_file', default='coco')\n parser.add_argument('--coco_path', type=str)\n parser.add_argument('--coco_panoptic_path', type=str)\n parser.add_argument('--remove_difficult', action='store_true')\n\n parser.add_argument('--output_dir', default='',\n help='path where to save, empty for no saving')\n parser.add_argument('--device', default='cuda',\n help='device to use for training / testing')\n parser.add_argument('--seed', default=42, type=int)\n parser.add_argument('--resume', default='', help='resume from checkpoint')\n parser.add_argument('--start_epoch', default=0, type=int, metavar='N',\n help='start epoch')\n parser.add_argument('--eval', action='store_true')\n parser.add_argument('--num_workers', default=2, type=int)\n\n # distributed training parameters\n parser.add_argument('--world_size', default=1, type=int,\n help='number of distributed processes')\n parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')\n return parser\n\n\ndef main(args):\n utils.init_distributed_mode(args)\n print(\"git:\\n {}\\n\".format(utils.get_sha()))\n\n if args.frozen_weights is not None:\n assert args.masks, \"Frozen training is meant for segmentation only\"\n print(args)\n\n device = torch.device(args.device)\n\n # fix the seed for reproducibility\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion, postprocessors = build_model(args)\n model.to(device)\n\n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])\n model_without_ddp = model.module\n n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n param_dicts = [\n {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": args.lr_backbone,\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,\n weight_decay=args.weight_decay)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)\n\n dataset_train = build_dataset(image_set='train', args=args)\n dataset_val = build_dataset(image_set='val', args=args)\n\n if args.distributed:\n sampler_train = DistributedSampler(dataset_train)\n sampler_val = DistributedSampler(dataset_val, shuffle=False)\n else:\n sampler_train = torch.utils.data.RandomSampler(dataset_train)\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n\n batch_sampler_train = torch.utils.data.BatchSampler(\n sampler_train, args.batch_size, drop_last=True)\n\n data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,\n collate_fn=utils.collate_fn, num_workers=args.num_workers)\n data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)\n\n if args.dataset_file == \"coco_panoptic\":\n # We also evaluate AP during panoptic training, on original coco DS\n coco_val = datasets.coco.build(\"val\", args)\n base_ds = get_coco_api_from_dataset(coco_val)\n else:\n base_ds = get_coco_api_from_dataset(dataset_val)\n\n if args.frozen_weights is not None:\n checkpoint = torch.load(args.frozen_weights, map_location='cpu')\n model_without_ddp.detr.load_state_dict(checkpoint['model'])\n\n output_dir = Path(args.output_dir)\n if args.resume:\n if args.resume.startswith('https'):\n checkpoint = torch.hub.load_state_dict_from_url(\n args.resume, map_location='cpu', check_hash=True)\n else:\n checkpoint = torch.load(args.resume, map_location='cpu')\n model_without_ddp.load_state_dict(checkpoint['model'], strict=False)\n if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n args.start_epoch = checkpoint['epoch'] + 1\n\n if args.eval:\n test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n data_loader_val, base_ds, device, args.output_dir)\n if args.output_dir:\n utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n return\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(args.start_epoch, args.epochs):\n if args.distributed:\n sampler_train.set_epoch(epoch)\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch,\n args.clip_max_norm)\n lr_scheduler.step()\n if args.output_dir:\n checkpoint_paths = [output_dir / 'checkpoint.pth']\n # extra checkpoint before LR drop and every 100 epochs\n if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:\n checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'args': args,\n }, checkpoint_path)\n\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir\n )\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if args.output_dir and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 50 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])\n args = parser.parse_args()\n if args.output_dir:\n Path(args.output_dir).mkdir(parents=True, exist_ok=True)\n main(args)\n"
] | [
[
"torch.utils.data.DistributedSampler",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.utils.data.RandomSampler",
"torch.optim.AdamW",
"torch.save",
"torch.nn.parallel.DistributedDataParallel",
"torch.device",
"torch.hub.load_state_dict_from_url",
"torch.utils.data.BatchSampler",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
elopezphy/basic_dash | [
"deda0997d5b7e5378a0b3124791f9bfee4cee4cd"
] | [
"app.py"
] | [
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\n\ndata = pd.read_csv('data/avocado.csv')\n\ndata = data.query(\"type == 'conventional' and region == 'Albany'\")\ndata[\"Date\"] = pd.to_datetime(data[\"Date\"], format=\"%Y-%m-%d\")\ndata.sort_values(\"Date\", inplace=True)\n\napp = dash.Dash(__name__)\n\napp.layout = html.Div(\n children=[\n html.H1(children=\"Avocado Analytics\",),\n html.P(\n children=\"Analyze the behavior of avocado prices\"\n \" and the number of avocados sold in the US\"\n \" between 2015 and 2018\",\n ),\n dcc.Graph(\n figure={\n \"data\": [\n {\n \"x\": data[\"Date\"],\n \"y\": data[\"AveragePrice\"],\n \"type\": \"lines\",\n },\n ],\n \"layout\": {\"title\": \"Average Price of Avocados\"},\n },\n ),\n dcc.Graph(\n figure={\n \"data\": [\n {\n \"x\": data[\"Date\"],\n \"y\": data[\"Total Volume\"],\n \"type\": \"lines\",\n },\n ],\n \"layout\": {\"title\": \"Avocados Sold\"},\n },\n ),\n ]\n)\n\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)"
] | [
[
"pandas.read_csv",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NithinKumaraNT/DNN_Quantizer | [
"3a6885f77aabb9b539e554a34a1c7ad358a39336"
] | [
"examples/Optimize_Quant.py"
] | [
"\"\"\"\nAn example that learns the optimal approximate uniform symmetric mid-even quantizer for a given data distribution. \nWe use Stochastic gradient descent for optimization of the range. The #steps used for quantization is a fixed design\nparameter. We test it with:\n\n I) Normal distributed data\n II) Laplacian distributed data\n III) Uniform distributed data\n\nLukas Mauch\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\nfrom bayesian_dnn.misc import setup\nfrom bayesian_dnn.quantization import ClippedApproxUniformQuantizer as CAUQ\nfrom scipy.stats import norm\nfrom scipy.stats import laplace\nfrom scipy.stats import uniform\nimport matplotlib.pyplot as plt\n\nstd = 3.0\nn_steps = 11\n\n#--------------case I: Normal------------------\nx = tf.constant(std*np.random.randn(10000), dtype=tf.float32, name='x')\nqac = CAUQ(c_init=tf.constant_initializer(1.0, dtype=tf.float32), n_steps=n_steps, k=2, name='clipped_approx_quant')\nxq = qac(x)\n\n#compute the mean squared error (mse) between quantized values xq and unquantized values x\nmse_loss = tf.reduce_mean(tf.pow(x-xq, 2)) \n\n#set up the optimizer to optimize the range c of the quantizer for minimum mse\noptimizer = tf.train.GradientDescentOptimizer(2.0)\nmin_mse = optimizer.minimize(mse_loss, var_list=qac.c)\n\n#create the tf session, initialize all variables and optimize\nsess = tf.Session(config=setup.config_tensorflow())\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(50):\n _, mse = sess.run((min_mse, mse_loss))\n print(\"MSE: \" + str(mse))\n \nc_opt = sess.run(qac.c)\nprint(\"optimal c: \" + str(c_opt))\nprint(c_opt/std)\nsess.close()\n \n \n#plot the results\nx = np.linspace(-5*std,5*std,10000)\npdf = norm.pdf(x, scale=std)\n\nplt.figure(1)\nplt.plot(x, pdf)\nplt.axvline(-c_opt, color=\"limegreen\")\nplt.axvline(c_opt, color=\"limegreen\")\nplt.xlabel(\"unquantized x\")\nplt.ylabel(\"p(x)\")\nplt.title(\"Normal distribution\")\nplt.show()\n\ntf.reset_default_graph()\n\n\n#--------------case II: Laplacian------------------\nx = tf.constant(np.random.laplace(scale=std, size=10000), dtype=tf.float32, name='x')\nqac = CAUQ(c_init=tf.constant_initializer(1.0, dtype=tf.float32), n_steps=n_steps, k=2, name='clipped_approx_quant')\nxq = qac(x)\n\n#compute the mean squared error (mse) between quantized values xq and unquantized values x\nmse_loss = tf.reduce_mean(tf.pow(x-xq, 2)) \n\n#set up the optimizer to optimize the range c of the quantizer for minimum mse\noptimizer = tf.train.GradientDescentOptimizer(0.2)\nmin_mse = optimizer.minimize(mse_loss, var_list=qac.c)\n\n#create the tf session, initialize all variables and optimize\nsess = tf.Session(config=setup.config_tensorflow())\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(500):\n _, mse = sess.run((min_mse, mse_loss))\n print(\"MSE: \" + str(mse))\n \nc_opt = sess.run(qac.c)\nprint(\"optimal c: \" + str(c_opt))\nprint(c_opt/std)\nsess.close()\n \n \n#plot the results\nx = np.linspace(-5*std,5*std,1000)\npdf = laplace.pdf(x, scale=std)\n\nplt.figure(2)\nplt.plot(x, pdf)\nplt.axvline(-c_opt, color=\"limegreen\")\nplt.axvline(c_opt, color=\"limegreen\")\nplt.xlabel(\"unquantized x\")\nplt.ylabel(\"p(x)\")\nplt.title(\"Laplacian distribution\")\nplt.show()\n\ntf.reset_default_graph()\n\n\n#--------------case III: Uniform------------------\nc = std * np.sqrt(3)\nx = tf.constant(np.random.uniform(low=-c, high=c, size=1000), dtype=tf.float32, name='x')\nqac = CAUQ(c_init=tf.constant_initializer(1.0, dtype=tf.float32), n_steps=n_steps, k=2, alpha=0.5, name='clipped_approx_quant')\nxq = qac(x)\n\n#compute the mean squared error (mse) between quantized values xq and unquantized values x\nmse_loss = tf.reduce_mean(tf.pow(x-xq, 2)) \n\n#set up the optimizer to optimize the range c of the quantizer for minimum mse\noptimizer = tf.train.GradientDescentOptimizer(1.0)\nmin_mse = optimizer.minimize(mse_loss, var_list=qac.c)\n\n#create the tf session, initialize all variables and optimize\nsess = tf.Session(config=setup.config_tensorflow())\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nfor i in range(50):\n _, mse = sess.run((min_mse, mse_loss))\n print(\"MSE: \" + str(mse))\n \nc_opt = sess.run(qac.c)\nprint(\"optimal c: \" + str(c_opt))\nprint(c_opt/std)\nsess.close()\n \n \n#plot the results\nx = np.linspace(-5*std,5*std,1000)\npdf = uniform.pdf(x, loc=-c, scale=2*c)\n\nplt.figure(3)\nplt.plot(x, pdf)\nplt.axvline(-c_opt, color=\"limegreen\")\nplt.axvline(c_opt, color=\"limegreen\")\nplt.xlabel(\"unquantized x\")\nplt.ylabel(\"p(x)\")\nplt.title(\"Uniform distribution\")\nplt.show()\n"
] | [
[
"numpy.sqrt",
"numpy.linspace",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"numpy.random.laplace",
"tensorflow.reset_default_graph",
"scipy.stats.laplace.pdf",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.pow",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axvline",
"scipy.stats.norm.pdf",
"scipy.stats.uniform.pdf",
"tensorflow.constant_initializer",
"numpy.random.uniform"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
meokz/psd-tools | [
"64197a1c0f75b8d1b3bcfaaae7fa2b97e34ffb1e"
] | [
"src/psd_tools/composer/blend.py"
] | [
"\"\"\"\nBlending module.\n\nCheck Blending_ section of W3C recommendation for blending mode definitions.\n\n.. _Blending: https://www.w3.org/TR/compositing/#blending\n\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nimport logging\n\nfrom psd_tools.utils import new_registry\nfrom psd_tools.constants import BlendMode\nfrom psd_tools.terminology import Enum\n\nlogger = logging.getLogger(__name__)\n\nBLEND_FUNCTIONS, register = new_registry()\n\n\ndef blend(backdrop, image, offset, mode=None):\n from PIL import Image, ImageChops, ImageMath\n\n # Align the canvas size.\n if offset[0] < 0:\n if image.width <= -offset[0]:\n return backdrop\n image = image.crop((-offset[0], 0, image.width, image.height))\n offset = (0, offset[1])\n\n if offset[1] < 0:\n if image.height <= -offset[1]:\n return backdrop\n image = image.crop((0, -offset[1], image.width, image.height))\n offset = (offset[0], 0)\n\n # Operations must happen in RGBA in Pillow.\n image_ = Image.new(image.mode, backdrop.size)\n image_.paste(image, offset)\n image = image_.convert('RGBA')\n\n target_mode = backdrop.mode\n if target_mode != 'RGBA':\n backdrop = backdrop.convert('RGBA')\n\n # Composite blended image.\n if mode not in (BlendMode.NORMAL, Enum.Normal, None):\n blend_func = BLEND_FUNCTIONS.get(mode, _normal)\n image = _blend_image(backdrop, image, blend_func)\n backdrop = Image.alpha_composite(backdrop, image)\n\n if target_mode != 'RGBA':\n backdrop = backdrop.convert(target_mode)\n return backdrop\n\n\ndef _blend_image(backdrop, source, blend_fn):\n from PIL import Image\n import numpy as np\n Cb = np.asarray(backdrop.convert('RGB')).astype(np.float) / 255.\n Cs = np.asarray(source.convert('RGB')).astype(np.float) / 255.\n Ab = np.asarray(backdrop.getchannel('A')).astype(np.float) / 255.\n Ab = np.expand_dims(Ab, axis=2)\n Cr = (1. - Ab) * Cs + Ab * blend_fn(Cs, Cb)\n result = Image.fromarray((Cr * 255).round().astype(np.uint8), mode='RGB')\n result.putalpha(source.getchannel('A'))\n return result\n\n\n@register(BlendMode.NORMAL)\n@register(Enum.Normal)\ndef _normal(Cs, Cb):\n return Cs\n\n\n@register(BlendMode.MULTIPLY)\n@register(Enum.Multiply)\ndef _multiply(Cs, Cb):\n return Cs * Cb\n\n\n@register(BlendMode.SCREEN)\n@register(Enum.Screen)\ndef _screen(Cs, Cb):\n return Cb + Cs - (Cb * Cs)\n\n\n@register(BlendMode.OVERLAY)\n@register(Enum.Overlay)\ndef _overlay(Cs, Cb):\n return _hard_light(Cb, Cs)\n\n\n@register(BlendMode.DARKEN)\n@register(Enum.Darken)\ndef _darken(Cs, Cb):\n import numpy as np\n return np.minimum(Cb, Cs)\n\n\n@register(BlendMode.LIGHTEN)\n@register(Enum.Lighten)\ndef _lighten(Cs, Cb):\n import numpy as np\n return np.maximum(Cb, Cs)\n\n\n@register(BlendMode.COLOR_DODGE)\n@register(Enum.ColorDodge)\ndef _color_dodge(Cs, Cb, s=1.0):\n import numpy as np\n B = np.zeros_like(Cs)\n B[Cs == 1] = 1\n B[Cb == 0] = 0\n index = (Cs != 1) & (Cb != 0)\n B[index] = np.minimum(1, Cb[index] / (s * (1 - Cs[index])))\n return B\n\n\n@register(BlendMode.LINEAR_DODGE)\n@register(b'linearDodge')\ndef _linear_dodge(Cs, Cb):\n import numpy as np\n return np.minimum(1, Cb + Cs)\n\n\n@register(BlendMode.COLOR_BURN)\n@register(Enum.ColorBurn)\ndef _color_burn(Cs, Cb, s=1.0):\n import numpy as np\n B = np.zeros_like(Cb)\n B[Cb == 1] = 1\n index = (Cb != 1) & (Cs != 0)\n B[index] = 1 - np.minimum(1, (1 - Cb[index]) / (s * Cs[index]))\n return B\n\n\n@register(BlendMode.LINEAR_BURN)\n@register(b'linearBurn')\ndef _linear_burn(Cs, Cb):\n import numpy as np\n return np.maximum(0, Cb + Cs - 1)\n\n\n@register(BlendMode.HARD_LIGHT)\n@register(Enum.HardLight)\ndef _hard_light(Cs, Cb):\n index = Cs > 0.5\n B = _multiply(Cs, Cb)\n B[index] = _screen(Cs, Cb)[index]\n return B\n\n\n@register(BlendMode.SOFT_LIGHT)\n@register(Enum.SoftLight)\ndef _soft_light(Cs, Cb):\n import numpy as np\n index = Cs <= 0.25\n D = np.sqrt(Cb)\n D[index] = ((16 * Cb[index] - 12) * Cb[index] + 4) * Cb[index]\n index = Cs <= 0.5\n B = Cb + (2 * Cs - 1) * (D - Cb)\n B[index] = Cb[index] - (1 - 2 * Cs[index]) * Cb[index] * (1 - Cb[index])\n return B\n\n\n@register(BlendMode.VIVID_LIGHT)\n@register(b'vividLight')\ndef _vivid_light(Cs, Cb):\n \"\"\"\n Burns or dodges the colors by increasing or decreasing the contrast,\n depending on the blend color. If the blend color (light source) is lighter\n than 50% gray, the image is lightened by decreasing the contrast. If the\n blend color is darker than 50% gray, the image is darkened by increasing\n the contrast.\n \"\"\"\n # TODO: Still inaccurate.\n index = Cs > 0.5\n B = _color_dodge(Cs, Cb, 128)\n B[index] = _color_burn(Cs, Cb, 128)[index]\n return B\n\n\n@register(BlendMode.LINEAR_LIGHT)\n@register(b'linearLight')\ndef _linear_light(Cs, Cb):\n index = Cs > 0.5\n B = _linear_burn(Cs, Cb)\n B[index] = _linear_dodge(Cs, Cb)[index]\n return B\n\n\n@register(BlendMode.PIN_LIGHT)\n@register(b'pinLight')\ndef _pin_light(Cs, Cb):\n index = Cs > 0.5\n B = _darken(Cs, Cb)\n B[index] = _lighten(Cs, Cb)[index]\n return B\n\n\n@register(BlendMode.DIFFERENCE)\n@register(Enum.Difference)\ndef _difference(Cs, Cb):\n import numpy as np\n return np.abs(Cb - Cs)\n\n\n@register(BlendMode.EXCLUSION)\n@register(Enum.Exclusion)\ndef _exclusion(Cs, Cb):\n return Cb + Cs - 2 * Cb * Cs\n\n\n@register(BlendMode.SUBTRACT)\n@register(b'blendSubtraction')\ndef _subtract(Cs, Cb):\n import numpy as np\n return np.maximum(0, Cb - Cs)\n\n\n@register(BlendMode.HARD_MIX)\n@register(b'hardMix')\ndef _hard_mix(Cs, Cb):\n B = Cb.copy()\n B[(Cs + Cb) < 1] = 0\n return B\n\n\n@register(BlendMode.DIVIDE)\n@register(b'blendDivide')\ndef _divide(Cs, Cb):\n B = Cb.copy()\n index = Cs > 0\n B[index] = Cb[index] / Cs[index] # Seems incorrect...\n return B\n\n\n@register(BlendMode.HUE)\n@register(Enum.Hue)\ndef _hue(Cs, Cb):\n import numpy as np\n hs, ls, ss = rgb_to_hls(Cs)\n hb, lb, sb = rgb_to_hls(Cb)\n return hls_to_rgb(hs, lb, sb)\n\n\n@register(BlendMode.SATURATION)\n@register(Enum.Saturation)\ndef _saturation(Cs, Cb):\n import numpy as np\n hs, ls, ss = rgb_to_hls(Cs)\n hb, lb, sb = rgb_to_hls(Cb)\n return hls_to_rgb(hb, lb, ss)\n\n\n@register(BlendMode.COLOR)\n@register(Enum.Color)\ndef _color(Cs, Cb):\n import numpy as np\n hs, ls, ss = rgb_to_hls(Cs)\n hb, lb, sb = rgb_to_hls(Cb)\n return hls_to_rgb(hs, lb, ss)\n\n\n@register(BlendMode.LUMINOSITY)\n@register(Enum.Luminosity)\ndef _saturation(Cs, Cb):\n import numpy as np\n hs, ls, ss = rgb_to_hls(Cs)\n hb, lb, sb = rgb_to_hls(Cb)\n return hls_to_rgb(hb, ls, sb)\n\n\n# BlendMode.DISSOLVE: _dissolve,\n# BlendMode.DARKER_COLOR: _darker_color,\n# BlendMode.LIGHTER_COLOR: _lighter_color,\n\n# Enum.Dissolve: _dissolve,\n# b'darkerColor': _darker_color,\n# b'lighterColor': _lighter_color,\n\n\ndef rgb_to_hls(rgb):\n \"\"\"RGB to HSL conversion.\n\n See colorsys module.\n \"\"\"\n import numpy as np\n\n maxc = np.max(rgb, axis=2)\n minc = np.min(rgb, axis=2)\n nonzero_index = (minc < maxc)\n c_diff = maxc - minc\n\n l = (minc + maxc) / 2.0\n s = np.zeros_like(l)\n h = np.zeros_like(l)\n\n index = nonzero_index\n s[index] = c_diff[index] / (2.0 - maxc[index] - minc[index])\n index = (l <= 0.5) & nonzero_index\n s[index] = c_diff[index] / (maxc[index] + minc[index])\n\n rc, gc, bc = (\n maxc[nonzero_index] -\n rgb[:, :, i][nonzero_index] / c_diff[nonzero_index] for i in range(3)\n )\n hc = 4.0 + gc - rc # 4 + gc - rc\n index = (rgb[:, :, 1][nonzero_index] == maxc[nonzero_index])\n hc[index] = 2.0 + rc[index] - bc[index] # 2 + rc - bc\n index = (rgb[:, :, 0][nonzero_index] == maxc[nonzero_index])\n hc[index] = bc[index] - gc[index] # bc - gc\n h[nonzero_index] = (hc / 6.0) % 1.0\n return h, l, s\n\n\ndef hls_to_rgb(h, l, s):\n \"\"\"HSL to RGB conversion.\n\n See colorsys module.\n \"\"\"\n import numpy as np\n ONE_THIRD = 1. / 3.\n TWO_THIRD = 2. / 3.\n ONE_SIXTH = 1. / 6.\n r, g, b = np.copy(l), np.copy(l), np.copy(l)\n nonzero_index = (s != 0.)\n\n m2 = l + s - (l * s)\n index = l <= 0.5\n m2[index] = l[index] * (1.0 + s[index])\n m1 = 2.0 * l - m2\n\n def _v(m1, m2, hue):\n hue = hue % 1.0\n c = np.copy(m1)\n index = hue < TWO_THIRD\n c[index] = m1[index] + (m2[index] -\n m1[index]) * (TWO_THIRD - hue[index]) * 6.0\n index = hue < 0.5\n c[index] = m2[index]\n index = hue < ONE_SIXTH\n c[index] = m1[index] + (m2[index] - m1[index]) * hue[index] * 6.0\n return c\n\n r[nonzero_index] = _v(m1, m2, h + ONE_THIRD)[nonzero_index]\n g[nonzero_index] = _v(m1, m2, h)[nonzero_index]\n b[nonzero_index] = _v(m1, m2, h - ONE_THIRD)[nonzero_index]\n return np.stack((r, g, b), axis=2)\n"
] | [
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.minimum",
"numpy.sqrt",
"numpy.abs",
"numpy.min",
"numpy.stack",
"numpy.max",
"numpy.copy",
"numpy.zeros_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jkhu29/SoCo | [
"1cef465ce5bdc975a72d3d869147ebeb6031781d"
] | [
"main_linear.py"
] | [
"# --------------------------------------------------------\n# SoCo\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Yue Gao\n# --------------------------------------------------------\n\n\nimport json\nimport os\nimport time\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom contrast import resnet\nfrom contrast.data import get_loader\nfrom contrast.logger import setup_logger\nfrom contrast.lr_scheduler import get_scheduler\nfrom contrast.option import parse_option\nfrom contrast.util import AverageMeter, accuracy, reduce_tensor\n\ntry:\n from apex import amp # type: ignore\nexcept ImportError:\n amp = None\n\n\ndef build_model(args, num_class):\n # create model\n model = resnet.__dict__[args.arch](low_dim=num_class, head_type='reduce').cuda()\n\n # set requires_grad of parameters except last fc layer to False\n for name, p in model.named_parameters():\n if 'fc' not in name:\n p.requires_grad = False\n\n optimizer = torch.optim.SGD(model.fc.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n if args.amp_opt_level != \"O0\":\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.amp_opt_level)\n\n model = DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)\n\n return model, optimizer\n\n\ndef load_pretrained(model, pretrained_model):\n ckpt = torch.load(pretrained_model, map_location='cpu')\n model_dict = model.state_dict()\n\n base_fix = False\n for key in ckpt['model'].keys():\n if key.startswith('module.base.'):\n base_fix = True\n break\n\n if base_fix:\n state_dict = {k.replace(\"module.base.\", \"module.\"): v\n for k, v in ckpt['model'].items()\n if k.startswith('module.base.')}\n logger.info(f\"==> load checkpoint from Module.Base\")\n else:\n state_dict = {k.replace(\"module.encoder.\", \"module.\"): v\n for k, v in ckpt['model'].items()\n if k.startswith('module.encoder.')}\n logger.info(f\"==> load checkpoint from Module.Encoder\")\n\n state_dict = {k: v for k, v in state_dict.items()\n if k in model_dict and v.size() == model_dict[k].size()}\n\n model_dict.update(state_dict)\n model.load_state_dict(model_dict)\n logger.info(f\"==> loaded checkpoint '{pretrained_model}' (epoch {ckpt['epoch']})\")\n\n\ndef load_checkpoint(args, model, optimizer, scheduler):\n logger.info(\"=> loading checkpoint '{args.resume'\")\n\n checkpoint = torch.load(args.resume, map_location='cpu')\n\n global best_acc1\n best_acc1 = checkpoint['best_acc1']\n args.start_epoch = checkpoint['epoch'] + 1\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n scheduler.load_state_dict(checkpoint['scheduler'])\n if args.amp_opt_level != \"O0\" and checkpoint['args'].amp_opt_level != \"O0\":\n amp.load_state_dict(checkpoint['amp'])\n\n logger.info(f\"=> loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})\")\n\n\ndef save_checkpoint(args, epoch, model, test_acc, optimizer, scheduler):\n state = {\n 'args': args,\n 'epoch': epoch,\n 'model': model.state_dict(),\n 'best_acc1': test_acc,\n 'optimizer': optimizer.state_dict(),\n 'scheduler': scheduler.state_dict(),\n }\n if args.amp_opt_level != \"O0\":\n state['amp'] = amp.state_dict()\n torch.save(state, os.path.join(args.output_dir, f'ckpt_epoch_{epoch}.pth'))\n torch.save(state, os.path.join(args.output_dir, f'current.pth'))\n\n\ndef main(args):\n global best_acc1\n\n args.batch_size = args.total_batch_size // dist.get_world_size()\n train_loader = get_loader(args.aug, args, prefix='train')\n val_loader = get_loader('val', args, prefix='val')\n logger.info(f\"length of training dataset: {len(train_loader.dataset)}\")\n\n model, optimizer = build_model(args, num_class=len(train_loader.dataset.classes))\n scheduler = get_scheduler(optimizer, len(train_loader), args)\n\n # load pre-trained model\n load_pretrained(model, args.pretrained_model)\n\n # optionally resume from a checkpoint\n if args.auto_resume:\n resume_file = os.path.join(args.output_dir, \"current.pth\")\n if os.path.exists(resume_file):\n logger.info(f'auto resume from {resume_file}')\n args.resume = resume_file\n else:\n logger.info(f'no checkpoint found in {args.output_dir}, ignoring auto resume')\n if args.resume:\n assert os.path.isfile(args.resume), f\"no checkpoint found at '{args.resume}'\"\n load_checkpoint(args, model, optimizer, scheduler)\n\n if args.eval:\n logger.info(\"==> testing...\")\n validate(val_loader, model, args)\n return\n\n # tensorboard\n if dist.get_rank() == 0:\n summary_writer = SummaryWriter(log_dir=args.output_dir)\n else:\n summary_writer = None\n\n # routine\n for epoch in range(args.start_epoch, args.epochs + 1):\n if isinstance(train_loader.sampler, DistributedSampler):\n train_loader.sampler.set_epoch(epoch)\n\n tic = time.time()\n train(epoch, train_loader, model, optimizer, scheduler, args)\n logger.info(f'epoch {epoch}, total time {time.time() - tic:.2f}')\n\n logger.info(\"==> testing...\")\n test_acc, test_acc5, test_loss = validate(val_loader, model, args)\n if summary_writer is not None:\n summary_writer.add_scalar('test_acc', test_acc, epoch)\n summary_writer.add_scalar('test_acc5', test_acc5, epoch)\n summary_writer.add_scalar('test_loss', test_loss, epoch)\n\n # save model\n if dist.get_rank() == 0 and epoch % args.save_freq == 0:\n logger.info('==> Saving...')\n save_checkpoint(args, epoch, model, test_acc, optimizer, scheduler)\n\n\ndef train(epoch, train_loader, model, optimizer, scheduler, args):\n \"\"\"\n one epoch training\n \"\"\"\n\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n acc1_meter = AverageMeter()\n acc5_meter = AverageMeter()\n\n end = time.time()\n for idx, (x, _, y) in enumerate(train_loader):\n x = x.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # forward\n output = model(x)\n loss = F.cross_entropy(output, y)\n\n # backward\n optimizer.zero_grad()\n if args.amp_opt_level != \"O0\":\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n scheduler.step()\n\n # update meters\n acc1, acc5 = accuracy(output, y, topk=(1, 5))\n loss_meter.update(loss.item(), x.size(0))\n acc1_meter.update(acc1[0], x.size(0))\n acc5_meter.update(acc5[0], x.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if idx % args.print_freq == 0:\n logger.info(\n f'Epoch: [{epoch}][{idx}/{len(train_loader)}]\\t'\n f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n f'Lr {optimizer.param_groups[0][\"lr\"]:.3f} \\t'\n f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\\t'\n f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\\t'\n f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')\n\n return acc1_meter.avg, acc5_meter.avg, loss_meter.avg\n\n\ndef validate(val_loader, model, args):\n batch_time = AverageMeter()\n loss_meter = AverageMeter()\n acc1_meter = AverageMeter()\n acc5_meter = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for idx, (x, _, y) in enumerate(val_loader):\n x = x.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n\n # compute output\n output = model(x)\n loss = F.cross_entropy(output, y)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, y, topk=(1, 5))\n\n acc1 = reduce_tensor(acc1)\n acc5 = reduce_tensor(acc5)\n loss = reduce_tensor(loss)\n\n loss_meter.update(loss.item(), x.size(0))\n acc1_meter.update(acc1[0], x.size(0))\n acc5_meter.update(acc5[0], x.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if idx % args.print_freq == 0:\n logger.info(\n f'Test: [{idx}/{len(val_loader)}]\\t'\n f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f})\\t'\n f'Acc@1 {acc1_meter.val:.3f} ({acc1_meter.avg:.3f})\\t'\n f'Acc@5 {acc5_meter.val:.3f} ({acc5_meter.avg:.3f})')\n\n logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')\n\n return acc1_meter.avg, acc5_meter.avg, loss_meter.avg\n\n\nif __name__ == '__main__':\n opt = parse_option(stage='linear')\n\n if opt.amp_opt_level != \"O0\":\n assert amp is not None, \"amp not installed!\"\n\n torch.cuda.set_device(opt.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n cudnn.benchmark = True\n best_acc1 = 0\n\n os.makedirs(opt.output_dir, exist_ok=True)\n logger = setup_logger(output=opt.output_dir, distributed_rank=dist.get_rank(), name=\"contrast\")\n if dist.get_rank() == 0:\n path = os.path.join(opt.output_dir, \"config.json\")\n with open(path, \"w\") as f:\n json.dump(vars(opt), f, indent=2)\n logger.info(\"Full config saved to {}\".format(path))\n\n # print args\n # TODO: check format\n logger.info(vars(opt))\n\n main(opt)\n"
] | [
[
"torch.distributed.init_process_group",
"torch.cuda.set_device",
"torch.load",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"torch.distributed.get_rank",
"torch.distributed.get_world_size",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Hossam86/Statistics-Using-Python | [
"25fa7fb574c0cc8af48ea780da033a34b14affe8"
] | [
"confidence_interval_2.py"
] | [
"import numpy as np\nimport pandas as pd\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nimport scipy.stats\nimport scipy.optimize\nimport scipy.spatial\n\npoll=pd.read_csv(\"Statistics-Using-Python\\data\\poll.csv\")\npoll.info()\nprint (poll.vote.value_counts(normalize=True))\n\n#sampling func\n# ============\ndef sample(brown,n=1000):\n return pd.DataFrame({'vote':np.where(np.random.rand(n) <brown,'Brown','Green')})\n\ns=sample(0.51,n=1000)\nprint(s.vote.value_counts(normalize=True))\n\ndist=pd.DataFrame([sample(0.51).vote.value_counts(normalize=True) for i in range(1000)])\nprint (dist.head())\n\ndist.Brown.hist(histtype='step',bins=20)\nplt.show()"
] | [
[
"pandas.read_csv",
"numpy.random.rand",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sbitters/enrichTSS | [
"c1b8d18c8e6f08926725290c233a04a22e41dfaf"
] | [
"modGFF.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 2017-07-30\n# STB\n\nimport sys\nimport regex as re\nimport pandas as pd\nfrom argparse import ArgumentParser\nfrom file_read_backwards import FileReadBackwards\n\n\nMIT_license = \"\"\"Copyright 2017 Sven T. Bitters ([email protected])\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of\nthis software and associated documentation files (the \"Software\"), to deal in\nthe Software without restriction, including without limitation the rights to\nuse, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software is furnished to do so,\nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\nFOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\nCOPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\nIN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\nCONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\ndef parse_input():\n\n parser = ArgumentParser()\n parser.add_argument(\"-l\", action=\"store_true\", default=False, dest=\"license\")\n parser.add_argument(\"-i\", type=str, dest=\"gff_path\")\n parser.add_argument(\"-o\", type=str, dest=\"output\")\n\n parserargs = parser.parse_args()\n\n try:\n if parserargs.license:\n print(MIT_license)\n\n else:\n gff_path = parserargs.gff_path\n output_path = parserargs.output\n\n if gff_path is None and output_path is None:\n parser.print_help()\n raise SystemExit\n\n else:\n return gff_path, output_path\n\n except SystemExit:\n sys.exit()\n\n\ndef read_and_mod_gff(annot_gff):\n # Read the GFF file and replace the values \"attributes\" column with just the IDs of the annotated elements\n # i.e. something like \"ID=id364475;Parent=gene41724;Dbxref=GeneID:19989172;exon_number=1;gbkey=exon;gene=cox2\"\n # becomes \"id364475\"\n\n id_regex = re.compile(r\"(?<=ID=).+?(?=($|;))\")\n parent_regex = re.compile(r\"(?<=Parent=).+?(?=($|;))\")\n loc_regex = re.compile(r\"(?<=gene=).+?(?=($|;))\")\n name_regex = re.compile(r\"(?<=Name=).+?(?=($|;))\")\n\n id_dict = {}\n last_name = \"\"\n\n gff_line_list = list()\n print(\"Reading GFF...\")\n gff_in = FileReadBackwards(annot_gff, encoding=\"utf-8\")\n for line in gff_in:\n\n if re.match(\"\\w\", line) and not line.startswith('#'):\n tab_elements = line.split(\"\\t\")\n type = tab_elements[2]\n attributes = tab_elements[-1]\n\n try:\n element_id = re.search(id_regex, attributes)\n element_id = element_id.group()\n except:\n element_id = \".\"\n\n if element_id == \".\":\n id_dict[type] = id_dict.get(type, 0) + 1\n element_id = type + str(id_dict[type]-1)\n\n try:\n loc_name = re.search(loc_regex, attributes)\n loc_name = loc_name.group()\n except:\n loc_name = \".\"\n\n try:\n gene_name = re.search(name_regex, attributes)\n gene_name = gene_name.group()\n last_name = gene_name\n except:\n gene_name = \".\"\n\n if gene_name == \".\":\n gene_name = last_name\n\n if gene_name != \".\" and loc_name == \".\":\n loc_name = gene_name\n\n try:\n parent_gene = re.search(parent_regex, tab_elements[-1])\n parent_gene = parent_gene.group()\n except:\n parent_gene = \".\"\n\n if parent_gene == \".\" and gene_name != \".\":\n parent_gene = gene_name\n\n tab_elements[-1] = element_id\n tab_elements += [parent_gene, loc_name]\n\n gff_line_list.append(tab_elements)\n\n gff_cols = [\"seqid\", \"source\", \"type\", \"start\", \"end\", \"score\", \"strand\", \"phase\", \"id\", \"parent\", \"gene_name\"]\n gff_df = pd.DataFrame(gff_line_list, columns=gff_cols)\n\n return gff_df\n\n\ndef main():\n gff_path, output_path = parse_input()\n\n gff_df = read_and_mod_gff(gff_path)\n\n print(\"Saving GFF3...\")\n gff_df.to_csv(output_path, sep=\"\\t\", index=False)\n\nmain()\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
desh2608/css | [
"a595f40085f2a8de9a62f9d4e34950ab55a7d27e"
] | [
"css/models/conv_tasnet.py"
] | [
"#!/usr/bin/env python3\n# This module is taken from: https://github.com/JusperLee/Conv-TasNet/blob/master/Conv_TasNet_Pytorch/Conv_TasNet.py\n\nimport torch\n\nDEFAULT_CONV_TASNET_CONF = {\n \"num_filters\": 512,\n \"filter_length\": 16,\n \"bottleneck_channels\": 128,\n \"conv_channels\": 512,\n \"kernel_size\": 3,\n \"num_blocks\": 8,\n \"num_layers\": 3,\n}\n\n\nclass ConvTasNet(torch.nn.Module):\n \"\"\"\n Conformer model\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n parser.add_argument(\"--num-spk\", type=int, default=2)\n parser.add_argument(\"--num-noise\", type=int, default=1)\n parser.add_argument(\"--conv-tasnet-num-filters\", type=int, default=256)\n parser.add_argument(\"--conv-tasnet-filter-length\", type=int, default=16)\n parser.add_argument(\"--conv-tasnet-bottleneck-channels\", type=int, default=128)\n parser.add_argument(\"--conv-tasnet-conv-channels\", type=int, default=256)\n parser.add_argument(\"--conv-tasnet-kernel-size\", type=int, default=3)\n parser.add_argument(\"--conv-tasnet-num-blocks\", type=int, default=8)\n parser.add_argument(\"--conv-tasnet-num-layers\", type=int, default=3)\n parser.add_argument(\n \"--conv-tasnet-norm\", type=str, default=\"gln\", choices=[\"gln\", \"cln\", \"bn\"]\n )\n\n @classmethod\n def build_model(cls, conf):\n conv_tasnet_conf = {\n \"num_filters\": int(conf[\"conv_tasnet_num_filters\"]),\n \"filter_length\": int(conf[\"conv_tasnet_filter_length\"]),\n \"bottleneck_channels\": int(conf[\"conv_tasnet_bottleneck_channels\"]),\n \"conv_channels\": int(conf[\"conv_tasnet_conv_channels\"]),\n \"kernel_size\": int(conf[\"conv_tasnet_kernel_size\"]),\n \"num_blocks\": int(conf[\"conv_tasnet_num_blocks\"]),\n \"num_layers\": int(conf[\"conv_tasnet_num_layers\"]),\n \"norm\": conf[\"conv_tasnet_norm\"],\n }\n model = ConvTasNet(\n num_spk=conf[\"num_spk\"],\n num_noise=conf[\"num_noise\"],\n conv_tasnet_conf=conv_tasnet_conf,\n )\n return model\n\n def __init__(\n self,\n num_spk=2,\n num_noise=1,\n conv_tasnet_conf=DEFAULT_CONV_TASNET_CONF,\n activate=\"relu\",\n causal=False,\n ):\n N = conv_tasnet_conf[\"num_filters\"]\n L = conv_tasnet_conf[\"filter_length\"]\n B = conv_tasnet_conf[\"bottleneck_channels\"]\n H = conv_tasnet_conf[\"conv_channels\"]\n P = conv_tasnet_conf[\"kernel_size\"]\n X = conv_tasnet_conf[\"num_blocks\"]\n R = conv_tasnet_conf[\"num_layers\"]\n norm = conv_tasnet_conf[\"norm\"]\n\n super(ConvTasNet, self).__init__()\n # n x 1 x T => n x N x T\n self.encoder = Conv1D(1, N, L, stride=L // 2, padding=0)\n # n x N x T Layer Normalization of Separation\n self.LayerN_S = select_norm(\"cln\", N)\n # n x B x T Conv 1 x 1 of Separation\n self.BottleN_S = Conv1D(N, B, 1)\n # Separation block\n # n x B x T => n x B x T\n self.separation = self._Sequential_repeat(\n R, X, in_channels=B, out_channels=H, kernel_size=P, norm=norm, causal=causal\n )\n # n x B x T => n x 2*N x T\n self.gen_masks = Conv1D(B, (num_spk + num_noise) * N, 1)\n # n x N x T => n x 1 x L\n self.decoder = ConvTrans1D(N, 1, L, stride=L // 2)\n # activation function\n active_f = {\n \"relu\": torch.nn.ReLU(),\n \"sigmoid\": torch.nn.Sigmoid(),\n \"softmax\": torch.nn.Softmax(dim=0),\n }\n self.activation_type = activate\n self.activation = active_f[activate]\n self.num_spk = num_spk\n self.num_noise = num_noise\n\n def _Sequential_block(self, num_blocks, **block_kwargs):\n \"\"\"\n Sequential 1-D Conv Block\n input:\n num_block: how many blocks in every repeats\n **block_kwargs: parameters of Conv1D_Block\n \"\"\"\n Conv1D_Block_lists = [\n Conv1D_Block(**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)\n ]\n\n return torch.nn.Sequential(*Conv1D_Block_lists)\n\n def _Sequential_repeat(self, num_repeats, num_blocks, **block_kwargs):\n \"\"\"\n Sequential repeats\n input:\n num_repeats: Number of repeats\n num_blocks: Number of block in every repeats\n **block_kwargs: parameters of Conv1D_Block\n \"\"\"\n repeats_lists = [\n self._Sequential_block(num_blocks, **block_kwargs)\n for i in range(num_repeats)\n ]\n return torch.nn.Sequential(*repeats_lists)\n\n def forward(self, x):\n if x.dim() >= 3:\n raise RuntimeError(\n \"{} accept 1/2D tensor as input, but got {:d}\".format(\n self.__name__, x.dim()\n )\n )\n if x.dim() == 1:\n x = torch.unsqueeze(x, 0)\n # x: n x 1 x L => n x N x T\n w = self.encoder(x)\n # n x N x L => n x B x L\n e = self.LayerN_S(w)\n e = self.BottleN_S(e)\n # n x B x L => n x B x L\n e = self.separation(e)\n # n x B x L => n x (num_spk+num_noise)*N x L\n m = self.gen_masks(e)\n # n x N x L x num_spks\n m = torch.chunk(m, chunks=self.num_spk + self.num_noise, dim=1)\n # (num_spks + num_noise) x n x N x L\n m = self.activation(torch.stack(m, dim=0))\n d = [w * m[i] for i in range(self.num_spk + self.num_noise)]\n # decoder part (num_spks + num_noise) x n x L\n s = [\n self.decoder(d[i], squeeze=True)\n for i in range(self.num_spk + self.num_noise)\n ]\n return torch.stack(s[:-1], dim=1)\n\n\nclass GlobalLayerNorm(torch.nn.Module):\n \"\"\"\n Calculate Global Layer Normalization\n dim: (int or list or torch.Size) –\n input shape from an expected input of size\n eps: a value added to the denominator for numerical stability.\n elementwise_affine: a boolean value that when set to True,\n this module has learnable per-element affine parameters\n initialized to ones (for weights) and zeros (for biases).\n \"\"\"\n\n def __init__(self, dim, eps=1e-05, elementwise_affine=True):\n super(GlobalLayerNorm, self).__init__()\n self.dim = dim\n self.eps = eps\n self.elementwise_affine = elementwise_affine\n\n if self.elementwise_affine:\n self.weight = torch.nn.Parameter(torch.ones(self.dim, 1))\n self.bias = torch.nn.Parameter(torch.zeros(self.dim, 1))\n else:\n self.register_parameter(\"weight\", None)\n self.register_parameter(\"bias\", None)\n\n def forward(self, x):\n # x = N x C x L\n # N x 1 x 1\n # cln: mean,var N x 1 x L\n # gln: mean,var N x 1 x 1\n if x.dim() != 3:\n raise RuntimeError(\"{} accept 3D tensor as input\".format(self.__name__))\n\n mean = torch.mean(x, (1, 2), keepdim=True)\n var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)\n # N x C x L\n if self.elementwise_affine:\n x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias\n else:\n x = (x - mean) / torch.sqrt(var + self.eps)\n return x\n\n\nclass CumulativeLayerNorm(torch.nn.LayerNorm):\n \"\"\"\n Calculate Cumulative Layer Normalization\n dim: you want to norm dim\n elementwise_affine: learnable per-element affine parameters\n \"\"\"\n\n def __init__(self, dim, elementwise_affine=True):\n super(CumulativeLayerNorm, self).__init__(\n dim, elementwise_affine=elementwise_affine\n )\n\n def forward(self, x):\n # x: N x C x L\n # N x L x C\n x = torch.transpose(x, 1, 2)\n # N x L x C == only channel norm\n x = super().forward(x)\n # N x C x L\n x = torch.transpose(x, 1, 2)\n return x\n\n\ndef select_norm(norm, dim):\n if norm == \"gln\":\n return GlobalLayerNorm(dim, elementwise_affine=True)\n if norm == \"cln\":\n return CumulativeLayerNorm(dim, elementwise_affine=True)\n elif norm == \"bn\":\n return torch.nn.BatchNorm1d(dim)\n else:\n raise ValueError(\"Unknown normalization: {}\".format(norm))\n\n\nclass Conv1D(torch.nn.Conv1d):\n \"\"\"\n Applies a 1D convolution over an input signal composed of several input planes.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Conv1D, self).__init__(*args, **kwargs)\n\n def forward(self, x, squeeze=False):\n # x: N x C x L\n if x.dim() not in [2, 3]:\n raise RuntimeError(\"{} accept 2/3D tensor as input\".format(self.__name__))\n x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))\n if squeeze:\n x = torch.squeeze(x)\n return x\n\n\nclass ConvTrans1D(torch.nn.ConvTranspose1d):\n \"\"\"\n This module can be seen as the gradient of Conv1d with respect to its input.\n It is also known as a fractionally-strided convolution\n or a deconvolution (although it is not an actual deconvolution operation).\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(ConvTrans1D, self).__init__(*args, **kwargs)\n\n def forward(self, x, squeeze=False):\n \"\"\"\n x: N x L or N x C x L\n \"\"\"\n if x.dim() not in [2, 3]:\n raise RuntimeError(\"{} accept 2/3D tensor as input\".format(self.__name__))\n x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))\n if squeeze:\n x = torch.squeeze(x)\n return x\n\n\nclass Conv1D_Block(torch.nn.Module):\n \"\"\"\n Consider only residual links\n \"\"\"\n\n def __init__(\n self,\n in_channels=256,\n out_channels=512,\n kernel_size=3,\n dilation=1,\n norm=\"gln\",\n causal=False,\n ):\n super(Conv1D_Block, self).__init__()\n # conv 1 x 1\n self.conv1x1 = Conv1D(in_channels, out_channels, 1)\n self.PReLU_1 = torch.nn.PReLU()\n self.norm_1 = select_norm(norm, out_channels)\n # not causal don't need to padding, causal need to pad+1 = kernel_size\n self.pad = (\n (dilation * (kernel_size - 1)) // 2\n if not causal\n else (dilation * (kernel_size - 1))\n )\n # depthwise convolution\n self.dwconv = Conv1D(\n out_channels,\n out_channels,\n kernel_size,\n groups=out_channels,\n padding=self.pad,\n dilation=dilation,\n )\n self.PReLU_2 = torch.nn.PReLU()\n self.norm_2 = select_norm(norm, out_channels)\n self.Sc_conv = torch.nn.Conv1d(out_channels, in_channels, 1, bias=True)\n self.causal = causal\n\n def forward(self, x):\n # x: N x C x L\n # N x O_C x L\n c = self.conv1x1(x)\n # N x O_C x L\n c = self.PReLU_1(c)\n c = self.norm_1(c)\n # causal: N x O_C x (L+pad)\n # noncausal: N x O_C x L\n c = self.dwconv(c)\n # N x O_C x L\n if self.causal:\n c = c[:, :, : -self.pad]\n c = self.PReLU_2(c)\n c = self.norm_2(c)\n c = self.Sc_conv(c)\n return x + c\n"
] | [
[
"torch.nn.Sequential",
"torch.mean",
"torch.transpose",
"torch.nn.Softmax",
"torch.nn.BatchNorm1d",
"torch.ones",
"torch.zeros",
"torch.sqrt",
"torch.nn.PReLU",
"torch.unsqueeze",
"torch.nn.Sigmoid",
"torch.nn.Conv1d",
"torch.chunk",
"torch.nn.ReLU",
"torch.squeeze",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
scivision/apexpy | [
"a2e919fd9ea9a65d49c4c22c9eb030c8ccf48386"
] | [
"tests/test_Apex.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, absolute_import, unicode_literals\n\nimport datetime as dt\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose\n\nfrom apexpy import fortranapex as fa\nfrom apexpy import Apex, ApexHeightError, helpers\n\n\n##############################################################################\n# NOTE: whenever function outputs are tested against hard-coded numbers, #\n# the test results (numbers) were obtained by running the code that is #\n# tested. Therefore these tests below only check that nothing changes when #\n# refactoring etc., and not if the results are actually correct #\n##############################################################################\n\n\n###============================================================================\n### Test initiating the Apex class\n###============================================================================\n\n\ndef test_init_defaults():\n Apex()\n\n\ndef test_init_date_int():\n apex_out = Apex(date=2015)\n assert apex_out.year == 2015\n\n\ndef test_init_date_float():\n apex_out = Apex(date=2015.5)\n assert apex_out.year == 2015.5\n\n\ndef test_init_date():\n date = dt.date(2015, 1, 1)\n apex_out = Apex(date=date)\n assert apex_out.year == helpers.toYearFraction(date)\n\n\ndef test_init_datetime():\n datetime = dt.datetime(2015, 6, 1, 18, 23, 45)\n apex_out = Apex(date=datetime)\n assert apex_out.year == helpers.toYearFraction(datetime)\n\n\ndef test_init_datafile_IOError():\n with pytest.raises(IOError):\n Apex(date=2015, datafile='foo/path/to/datafile.blah')\n\n\n###============================================================================\n### Test the low-level interfaces to the fortran wrappers\n###============================================================================\n\ndef test__geo2qd_scalar():\n apex_out = Apex(date=2000, refh=300)\n for lat in [0, 30, 60, 89]:\n for lon in [-179, -90, 0, 90, 180]:\n assert_allclose(apex_out._geo2qd(lat, lon, 100),\n fa.apxg2q(lat, lon, 100, 0)[:2])\n\n\ndef test__geo2qd_array():\n apex_out = Apex(date=2000, refh=300)\n lats, lons = apex_out._geo2qd([[0, 30], [60, 90]], 15,\n [[100, 200], [300, 400]])\n lat1, lon1 = fa.apxg2q(0, 15, 100, 0)[:2]\n lat2, lon2 = fa.apxg2q(30, 15, 200, 0)[:2]\n lat3, lon3 = fa.apxg2q(60, 15, 300, 0)[:2]\n lat4, lon4 = fa.apxg2q(90, 15, 400, 0)[:2]\n assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],\n dtype=float))\n assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],\n dtype=float))\n\n\ndef test__geo2qd_longitude():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out._geo2qd(60, 180, 100),\n fa.apxg2q(60, 180, 100, 0)[:2])\n assert_allclose(apex_out._geo2qd(60, -180, 100),\n fa.apxg2q(60, -180, 100, 0)[:2])\n assert_allclose(apex_out._geo2qd(60, -180, 100),\n apex_out._geo2qd(60, 180, 100))\n for i in range(-5, 5):\n for lat in [0, 30, 60, 90]:\n assert_allclose(apex_out._geo2qd(lat, 15+i*360, 100),\n fa.apxg2q(lat, 15, 100, 0)[:2])\n\n\ndef test__geo2apex_scalar():\n apex_out = Apex(date=2000, refh=300)\n for lat in [0, 30, 60, 89]:\n for lon in [-179, -90, 0, 90, 180]:\n assert_allclose(apex_out._geo2apex(lat, lon, 100),\n fa.apxg2all(lat, lon, 100, 300, 0)[2:4])\n\n\ndef test__geo2apex_array():\n apex_out = Apex(date=2000, refh=300)\n lats, lons = apex_out._geo2apex([[0, 30], [60, 90]], 15,\n [[100, 200], [300, 400]])\n lat1, lon1 = fa.apxg2all(0, 15, 100, 300, 0)[2:4]\n lat2, lon2 = fa.apxg2all(30, 15, 200, 300, 0)[2:4]\n lat3, lon3 = fa.apxg2all(60, 15, 300, 300, 0)[2:4]\n lat4, lon4 = fa.apxg2all(90, 15, 400, 300, 0)[2:4]\n assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],\n dtype=float))\n assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],\n dtype=float))\n\n\ndef test__geo2apex_longitude():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out._geo2apex(60, 180, 100),\n fa.apxg2all(60, 180, 100, 300, 0)[2:4])\n assert_allclose(apex_out._geo2apex(60, -180, 100),\n fa.apxg2all(60, -180, 100, 300, 0)[2:4])\n assert_allclose(apex_out._geo2apex(60, -180, 100),\n apex_out._geo2apex(60, 180, 100))\n for i in range(-5, 5):\n for lat in [0, 30, 60, 90]:\n assert_allclose(apex_out._geo2apex(lat, 15+i*360, 100),\n fa.apxg2all(lat, 15, 100, 300, 0)[2:4])\n\n\ndef test__geo2apexall_scalar():\n apex_out = Apex(date=2000, refh=300)\n for lat in [0, 30, 60, 89]:\n for lon in [-179, -90, 0, 90, 180]:\n ret1 = apex_out._geo2apexall(lat, lon, 100)\n ret2 = fa.apxg2all(lat, lon, 100, 300, 1)\n for r1, r2 in zip(ret1, ret2):\n assert_allclose(r1, r2)\n\n\ndef test__geo2apexall_array():\n apex_out = Apex(date=2000, refh=300)\n ret = apex_out._geo2apexall([[0, 30], [60, 90]], 15,\n [[100, 200], [300, 400]])\n ret1 = fa.apxg2all(0, 15, 100, 300, 1)\n ret2 = fa.apxg2all(30, 15, 200, 300, 1)\n ret3 = fa.apxg2all(60, 15, 300, 300, 1)\n ret4 = fa.apxg2all(90, 15, 400, 300, 1)\n for i in range(len(ret)):\n try:\n # ret[i] is array of floats\n assert_allclose(ret[i].astype(float),\n np.array([[ret1[i], ret2[i]], [ret3[i], ret4[i]]],\n dtype=float))\n except:\n # ret[i] is array of arrays\n assert_allclose(ret[i][0, 0], ret1[i])\n assert_allclose(ret[i][0, 1], ret2[i])\n assert_allclose(ret[i][1, 0], ret3[i])\n assert_allclose(ret[i][1, 1], ret4[i])\n\n\ndef test__qd2geo_scalar():\n apex_out = Apex(date=2000, refh=300)\n for lat in [0, 30, 60, 89]:\n for lon in [-179, -90, 0, 90, 180]:\n for prec in [-1, 1e-2, 1e-10]:\n assert_allclose(apex_out._qd2geo(lat, lon, 100, prec),\n fa.apxq2g(lat, lon, 100, prec))\n\n\ndef test__qd2geo_array():\n apex_out = Apex(date=2000, refh=300)\n lats, lons, errs = apex_out._qd2geo([[0, 30], [60, 90]], 15,\n [[100, 200], [300, 400]], 1e-2)\n lat1, lon1, err1 = fa.apxq2g(0, 15, 100, 1e-2)\n lat2, lon2, err2 = fa.apxq2g(30, 15, 200, 1e-2)\n lat3, lon3, err3 = fa.apxq2g(60, 15, 300, 1e-2)\n lat4, lon4, err4 = fa.apxq2g(90, 15, 400, 1e-2)\n assert_allclose(lats.astype(float), np.array([[lat1, lat2], [lat3, lat4]],\n dtype=float))\n assert_allclose(lons.astype(float), np.array([[lon1, lon2], [lon3, lon4]],\n dtype=float))\n assert_allclose(errs.astype(float), np.array([[err1, err2], [err3, err4]],\n dtype=float))\n\n\ndef test__qd2geo_longitude():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out._qd2geo(60, 180, 100, 1e-2),\n fa.apxq2g(60, 180, 100, 1e-2))\n assert_allclose(apex_out._qd2geo(60, -180, 100, 1e-2),\n fa.apxq2g(60, -180, 100, 1e-2))\n assert_allclose(apex_out._qd2geo(60, -180, 100, 1e-2),\n apex_out._qd2geo(60, 180, 100, 1e-2))\n for i in range(-5, 5):\n for lat in [0, 30, 60, 90]:\n assert_allclose(apex_out._qd2geo(lat, 15+i*360, 100, 1e-2),\n fa.apxq2g(lat, 15, 100, 1e-2))\n\n\ndef test__basevec_scalar():\n apex_out = Apex(date=2000, refh=300)\n for lat in [0, 30, 60, 89]:\n for lon in [-179, -90, 0, 90, 180]:\n assert_allclose(apex_out._basevec(lat, lon, 100),\n fa.apxg2q(lat, lon, 100, 1)[2:4])\n\n\ndef test__basevec_array():\n apex_out = Apex(date=2000, refh=300)\n f1s, f2s = apex_out._basevec([[0, 30], [60, 90]], 15,\n [[100, 200], [300, 400]])\n f11, f21 = fa.apxg2q(0, 15, 100, 1)[2:4]\n f12, f22 = fa.apxg2q(30, 15, 200, 1)[2:4]\n f13, f23 = fa.apxg2q(60, 15, 300, 1)[2:4]\n f14, f24 = fa.apxg2q(90, 15, 400, 1)[2:4]\n assert_allclose(f1s[0, 0], f11)\n assert_allclose(f1s[0, 1], f12)\n assert_allclose(f1s[1, 0], f13)\n assert_allclose(f1s[1, 1], f14)\n assert_allclose(f2s[0, 0], f21)\n assert_allclose(f2s[0, 1], f22)\n assert_allclose(f2s[1, 0], f23)\n assert_allclose(f2s[1, 1], f24)\n\n\ndef test__basevec_longitude():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out._basevec(60, 180, 100),\n fa.apxg2q(60, 180, 100, 1)[2:4])\n assert_allclose(apex_out._basevec(60, -180, 100),\n fa.apxg2q(60, -180, 100, 1)[2:4])\n assert_allclose(apex_out._basevec(60, -180, 100),\n apex_out._basevec(60, 180, 100))\n for i in range(-5, 5):\n for lat in [0, 30, 60, 90]:\n assert_allclose(apex_out._basevec(lat, 15+i*360, 100),\n fa.apxg2q(lat, 15, 100, 1)[2:4])\n\n\n###============================================================================\n### Test the convert() method\n###============================================================================\n\n\ndef test_convert_geo2apex():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'geo', 'apex', height=100),\n apex_out.geo2apex(60, 15, 100))\n\n\ndef test_convert_geo2qd():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'geo', 'qd', height=100),\n apex_out.geo2qd(60, 15, 100))\n\n\ndef test_convert_geo2mlt_nodate():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.convert(60, 15, 'geo', 'mlt')\n\n\ndef test_convert_geo2mlt():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'geo', 'mlt', height=100,\n ssheight=2e5, datetime=datetime)[1],\n apex_out.mlon2mlt(apex_out.geo2apex(60, 15, 100)[1],\n datetime, ssheight=2e5))\n\n\ndef test_convert_apex2geo():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'apex', 'geo', height=100,\n precision=1e-2),\n apex_out.apex2geo(60, 15, 100, precision=1e-2)[:-1])\n\n\ndef test_convert_apex2qd():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'apex', 'qd', height=100),\n apex_out.apex2qd(60, 15, height=100))\n\n\ndef test_convert_apex2mlt():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'apex', 'mlt', height=100,\n datetime=datetime, ssheight=2e5)[1],\n apex_out.mlon2mlt(15, datetime, ssheight=2e5))\n\n\ndef test_convert_qd2geo():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'qd', 'geo', height=100,\n precision=1e-2),\n apex_out.qd2geo(60, 15, 100, precision=1e-2)[:-1])\n\n\ndef test_convert_qd2apex():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'qd', 'apex', height=100),\n apex_out.qd2apex(60, 15, height=100))\n\n\ndef test_convert_qd2mlt():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'qd', 'mlt', height=100,\n datetime=datetime, ssheight=2e5)[1],\n apex_out.mlon2mlt(15, datetime, ssheight=2e5))\n\n\ndef test_convert_mlt2geo():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'mlt', 'geo', height=100,\n datetime=datetime, precision=1e-2,\n ssheight=2e5),\n apex_out.apex2geo(60, apex_out.mlt2mlon(15, datetime,\n ssheight=2e5), 100,\n precision=1e-2)[:-1])\n\n\ndef test_convert_mlt2apex():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'mlt', 'apex', height=100,\n datetime=datetime, ssheight=2e5),\n (60, apex_out.mlt2mlon(15, datetime, ssheight=2e5)))\n\n\ndef test_convert_mlt2qd():\n datetime = dt.datetime(2000, 3, 9, 14, 25, 58)\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.convert(60, 15, 'mlt', 'qd', height=100,\n datetime=datetime, ssheight=2e5),\n apex_out.apex2qd(60, apex_out.mlt2mlon(15, datetime,\n ssheight=2e5),\n height=100))\n\n\ndef test_convert_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.convert(91, 0, 'geo', 'geo')\n with pytest.raises(ValueError):\n apex_out.convert(-91, 0, 'geo', 'geo')\n apex_out.convert(90, 0, 'geo', 'geo')\n apex_out.convert(-90, 0, 'geo', 'geo')\n\n assert_allclose(apex_out.convert(90+1e-5, 0, 'geo', 'apex'),\n apex_out.convert(90, 0, 'geo', 'apex'), rtol=0, atol=1e-8)\n\n\ndef test_convert_invalid_transformation():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(NotImplementedError):\n apex_out.convert(0, 0, 'foobar', 'geo')\n with pytest.raises(NotImplementedError):\n apex_out.convert(0, 0, 'geo', 'foobar')\n\n\n###============================================================================\n### Test the geo2apex() method\n###============================================================================\n\n\ndef test_geo2apex():\n apex_out = Apex(date=2000, refh=300)\n lat, lon = apex_out.geo2apex(60, 15, 100)\n assert_allclose((lat, lon), apex_out._geo2apex(60, 15, 100))\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n\n\ndef test_geo2apex_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.geo2apex([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.geo2apex(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.geo2apex(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_geo2apex_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.geo2apex(91, 0, 0)\n with pytest.raises(ValueError):\n apex_out.geo2apex(-91, 0, 0)\n apex_out.geo2apex(90, 0, 0)\n apex_out.geo2apex(-90, 0, 0)\n\n assert_allclose(apex_out.geo2apex(90+1e-5, 0, 0),\n apex_out.geo2apex(90, 0, 0), rtol=0, atol=1e-8)\n\n\ndef test_geo2apex_undefined_warning():\n apex_out = Apex(date=2000, refh=10000)\n with warnings.catch_warnings(record=True) as w:\n ret = apex_out.geo2apex(0, 0, 0)\n assert ret[0] == -9999\n assert issubclass(w[-1].category, UserWarning)\n assert 'set to -9999 where' in str(w[-1].message)\n\n\n###============================================================================\n### Test the apex2geo() method\n###============================================================================\n\n\ndef test_apex2geo():\n apex_out = Apex(date=2000, refh=300)\n lat, lon, error = apex_out.apex2geo(60, 15, 100, precision=1e-2)\n assert_allclose((lat, lon, error),\n apex_out.qd2geo(*apex_out.apex2qd(60, 15, 100), height=100,\n precision=1e-2))\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n assert type(error) != np.ndarray\n\n\ndef test_apex2geo_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.apex2geo([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.apex2geo(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.apex2geo(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_apex2geo_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.apex2geo(91, 0, 0, 1e-2)\n with pytest.raises(ValueError):\n apex_out.apex2geo(-91, 0, 0, 1e-2)\n apex_out.apex2geo(90, 0, 0, 1e-2)\n apex_out.apex2geo(-90, 0, 0, 1e-2)\n\n assert_allclose(apex_out.apex2geo(90+1e-5, 0, 0, 1e-2),\n apex_out.apex2geo(90, 0, 0, 1e-2), rtol=0, atol=1e-8)\n\n\n###============================================================================\n### Test the geo2qd() method\n###============================================================================\n\n\ndef test_geo2qd():\n apex_out = Apex(date=2000, refh=300)\n lat, lon = apex_out.geo2qd(60, 15, 100)\n assert_allclose((lat, lon), apex_out._geo2qd(60, 15, 100))\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n\n\ndef test_geo2qd_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.geo2qd([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.geo2qd(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.geo2qd(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_geo2qd_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.geo2qd(91, 0, 0)\n with pytest.raises(ValueError):\n apex_out.geo2qd(-91, 0, 0)\n apex_out.geo2qd(90, 0, 0)\n apex_out.geo2qd(-90, 0, 0)\n\n assert_allclose(apex_out.geo2qd(90+1e-5, 0, 0), apex_out.geo2qd(90, 0, 0),\n rtol=0, atol=1e-8)\n\n\n###============================================================================\n### Test the qd2geo() method\n###============================================================================\n\n\ndef test_qd2geo():\n apex_out = Apex(date=2000, refh=300)\n lat, lon, error = apex_out.qd2geo(60, 15, 100, precision=1e-2)\n assert_allclose((lat, lon, error), apex_out._qd2geo(60, 15, 100, 1e-2))\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n assert type(error) != np.ndarray\n\n\ndef test_qd2geo_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.qd2geo([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.qd2geo(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.qd2geo(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_qd2geo_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.qd2geo(91, 0, 0, precision=1e-2)\n with pytest.raises(ValueError):\n apex_out.qd2geo(-91, 0, 0, precision=1e-2)\n apex_out.qd2geo(90, 0, 0, precision=1e-2)\n apex_out.qd2geo(-90, 0, 0, precision=1e-2)\n\n assert_allclose(apex_out.qd2geo(90+1e-5, 0, 0, 1e-2),\n apex_out.qd2geo(90, 0, 0, 1e-2), rtol=0, atol=1e-8)\n\n\n###============================================================================\n### Test the apex2qd() method\n###============================================================================\n\n\ndef test_apex2qd():\n apex_out = Apex(date=2000, refh=300)\n lat, lon = apex_out.apex2qd(60, 15, 100)\n assert_allclose((lat, lon),\n [60.498401, 15])\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n\n\ndef test_apex2qd_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.apex2qd([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.apex2qd(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.apex2qd(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_apex2qd_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.apex2qd(91, 0, 0)\n with pytest.raises(ValueError):\n apex_out.apex2qd(-91, 0, 0)\n apex_out.apex2qd(90, 0, 0)\n apex_out.apex2qd(-90, 0, 0)\n\n assert_allclose(apex_out.apex2qd(90+1e-5, 0, 0), apex_out.apex2qd(90, 0, 0),\n rtol=0, atol=1e-8)\n\n\ndef test_apex2qd_apexheight_close():\n apex_out = Apex(date=2000, refh=300)\n apex_out.apex2qd(0, 15, 300+1e-6)\n\n\ndef test_apex2qd_apexheight_over():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ApexHeightError):\n apex_out.apex2qd(0, 15, 301)\n\n\n###============================================================================\n### Test the qd2apex() method\n###============================================================================\n\n\ndef test_qd2apex():\n apex_out = Apex(date=2000, refh=300)\n lat, lon = apex_out.qd2apex(60, 15, 100)\n assert_allclose((lat, lon),\n [59.491381, 15])\n assert type(lat) != np.ndarray\n assert type(lon) != np.ndarray\n\n\ndef test_qd2apex_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.qd2apex([60, 60], 15, 100)[0].shape == (2,)\n assert apex_out.qd2apex(60, [15, 15], 100)[0].shape == (2,)\n assert apex_out.qd2apex(60, 15, [100, 100])[0].shape == (2,)\n\n\ndef test_qd2apex_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.qd2apex(91, 0, 0)\n with pytest.raises(ValueError):\n apex_out.qd2apex(-91, 0, 0)\n apex_out.qd2apex(90, 0, 0)\n apex_out.qd2apex(-90, 0, 0)\n\n assert_allclose(apex_out.qd2apex(90+1e-5, 0, 0), apex_out.qd2apex(90, 0, 0),\n rtol=0, atol=1e-8)\n\n\ndef test_qd2apex_apexheight_close():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.qd2apex(0, 15, 300-1e-5),\n apex_out.qd2apex(0, 15, 300))\n\n\ndef test_qd2apex_apexheight_over():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ApexHeightError):\n apex_out.qd2apex(0, 15, 299)\n\n\n###============================================================================\n### Test mlon2mlt()\n###============================================================================\n\n\ndef test_mlon2mlt_scalar():\n apex_out = Apex(date=2000, refh=300)\n mlon = apex_out.mlon2mlt(0, dt.datetime(2000, 2, 3, 4, 5, 6))\n assert_allclose(mlon, 23.019629923502603)\n assert type(mlon) != np.ndarray\n\n\ndef test_mlon2mlt_ssheight():\n apex_out = Apex(date=2000, refh=300)\n mlt = apex_out.mlon2mlt(0, dt.datetime(2000, 2, 3, 4, 5, 6),\n ssheight=50*2000)\n assert_allclose(mlt, 23.026712036132814)\n\n\ndef test_mlon2mlt_1Darray():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlon2mlt([0, 180],\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [23.019261, 11.019261], rtol=1e-4)\n\n\ndef test_mlon2mlt_2Darray():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlon2mlt([[0, 180], [0, 180]],\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [[23.019261, 11.019261], [23.019261, 11.019261]], rtol=1e-4)\n\n\ndef test_mlon2mlt_diffdates():\n apex_out = Apex(date=2000, refh=300)\n dtime1 = dt.datetime(2000, 2, 3, 4, 5, 6)\n dtime2 = dt.datetime(2000, 2, 3, 5, 5, 6)\n assert apex_out.mlon2mlt(0, dtime1) != apex_out.mlon2mlt(0, dtime2)\n\n\ndef test_mlon2mlt_offset():\n apex_out = Apex(date=2000, refh=300)\n date = dt.datetime(2000, 2, 3, 4, 5, 6)\n assert_allclose(apex_out.mlon2mlt(0, date),\n apex_out.mlon2mlt(-15, date) + 1)\n assert_allclose(apex_out.mlon2mlt(0, date),\n apex_out.mlon2mlt(-10*15, date) + 10)\n\n\ndef test_mlon2mlt_range():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlon2mlt(range(0, 361, 30),\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [23.01963, 1.01963, 3.01963, 5.01963, 7.01963,\n 9.01963, 11.01963, 13.01963, 15.01963, 17.01963,\n 19.01963, 21.01963, 23.01963],\n rtol=1e-4)\n\n\n###============================================================================\n### Test mlt2mlon()\n###============================================================================\n\n\ndef test_mlt2mlon_scalar():\n apex_out = Apex(date=2000, refh=300)\n mlt = apex_out.mlt2mlon(0, dt.datetime(2000, 2, 3, 4, 5, 6))\n assert_allclose(mlt, 14.705551147460938)\n assert type(mlt) != np.ndarray\n\n\ndef test_mlt2mlon_ssheight():\n apex_out = Apex(date=2000, refh=300)\n mlt = apex_out.mlt2mlon(0, dt.datetime(2000, 2, 3, 4, 5, 6),\n ssheight=50*2000)\n assert_allclose(mlt, 14.599319458007812)\n\n\ndef test_mlt2mlon_1Darray():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlt2mlon([0, 12],\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [14.705551, 194.705551], rtol=1e-4)\n\n\ndef test_mlt2mlon_2Darray():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlt2mlon([[0, 12], [0, 12]],\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [[14.705551, 194.705551], [14.705551, 194.705551]],\n rtol=1e-4)\n\n\ndef test_mlt2mlon_diffdates():\n apex_out = Apex(date=2000, refh=300)\n dtime1 = dt.datetime(2000, 2, 3, 4, 5, 6)\n dtime2 = dt.datetime(2000, 2, 3, 5, 5, 6)\n assert apex_out.mlt2mlon(0, dtime1) != apex_out.mlt2mlon(0, dtime2)\n\n\ndef test_mlt2mlon_offset():\n apex_out = Apex(date=2000, refh=300)\n date = dt.datetime(2000, 2, 3, 4, 5, 6)\n assert_allclose(apex_out.mlt2mlon(0, date), apex_out.mlt2mlon(1, date) - 15)\n assert_allclose(apex_out.mlt2mlon(0, date),\n apex_out.mlt2mlon(10, date) - 150)\n\n\ndef test_mlt2mlon_range():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.mlt2mlon(range(0, 25, 2),\n dt.datetime(2000, 2, 3, 4, 5, 6)),\n [14.705551, 44.705551, 74.705551, 104.705551, 134.705551,\n 164.705551, 194.705551, 224.705551, 254.705551, 284.705551,\n 314.705551, 344.705551, 14.705551],\n rtol=1e-4)\n\n\n###============================================================================\n### Test mlt/mlon back and forth\n###============================================================================\n\n\ndef test_mlon2mlt2mlon():\n apex_out = Apex(date=2000, refh=300)\n date = dt.datetime(2000, 2, 3, 4, 5, 6)\n assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(0, date), date), 0)\n assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(6, date), date), 6)\n assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(12, date), date), 12)\n assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(18, date), date), 18)\n assert_allclose(apex_out.mlon2mlt(apex_out.mlt2mlon(24, date), date), 0)\n\n\ndef test_mlt2mlon2mlt():\n apex_out = Apex(date=2000, refh=300)\n date = dt.datetime(2000, 2, 3, 4, 5, 6)\n assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(0, date), date), 0)\n assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(90, date), date), 90)\n assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(180, date), date), 180)\n assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(270, date), date), 270)\n assert_allclose(apex_out.mlt2mlon(apex_out.mlon2mlt(360, date), date), 0)\n\n\n###============================================================================\n### Test the map_to_height() method\n###============================================================================\n\n\ndef test_map_to_height():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.map_to_height(60, 15, 100, 10000, conjugate=False,\n precision=1e-10),\n (31.841459274291992, 17.916629791259766, 0))\n assert_allclose(apex_out.map_to_height(30, 170, 100, 500, conjugate=False,\n precision=1e-2),\n (25.727252960205078, 169.60546875, 0.00017655163537710905))\n\n\ndef test_map_to_height_same_height():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.map_to_height(60, 15, 100, 100, conjugate=False,\n precision=1e-10),\n (60, 15, 3.4150946248701075e-6), rtol=1e-5)\n\n\ndef test_map_to_height_conjugate():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.map_to_height(60, 15, 100, 10000, conjugate=True,\n precision=1e-10),\n (-25.424892425537109, 27.310417175292969,\n 1.2074182222931995e-6))\n assert_allclose(apex_out.map_to_height(30, 170, 100, 500, conjugate=True,\n precision=1e-2),\n (-13.76642894744873, 164.24259948730469,\n 0.00056820799363777041))\n\n\ndef test_map_to_height_vectorization():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.map_to_height([60, 60], 15, 100, 100),\n ([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)\n assert_allclose(apex_out.map_to_height(60, [15, 15], 100, 100),\n ([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)\n assert_allclose(apex_out.map_to_height(60, 15, [100, 100], 100),\n ([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)\n assert_allclose(apex_out.map_to_height(60, 15, 100, [100, 100]),\n ([60]*2, [15]*2, [3.4150946248701075e-6]*2), rtol=1e-5)\n\n\ndef test_map_to_height_ApexHeightError():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ApexHeightError):\n apex_out.map_to_height(0, 15, 100, 10000)\n\n\n###============================================================================\n### Test the map_E_to_height() method\n###============================================================================\n\n\ndef test_map_E_to_height():\n apex_out = Apex(date=2000, refh=300)\n out_60_15_100_500 = [0.7115211, 2.3562392, 0.57259707]\n out_60_15_100_500_234 = [1.560284, 3.439154, 0.782339]\n out_60_15_100_1000 = [0.677964, 2.089811, 0.558601]\n out_60_15_200_500 = [0.723773, 2.427366, 0.590826]\n out_60_30_100_500 = [0.686265, 2.375296, 0.600594]\n out_70_15_100_500 = [0.727605, 2.180817, 0.291414]\n\n # scalar\n assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500, [1, 2, 3]),\n out_60_15_100_500, rtol=1e-5)\n assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500, [2, 3, 4]),\n out_60_15_100_500_234, rtol=1e-5)\n assert_allclose(apex_out.map_E_to_height(60, 15, 100, 1000, [1, 2, 3]),\n out_60_15_100_1000, rtol=1e-5)\n assert_allclose(apex_out.map_E_to_height(60, 15, 200, 500, [1, 2, 3]),\n out_60_15_200_500, rtol=1e-5)\n assert_allclose(apex_out.map_E_to_height(60, 30, 100, 500, [1, 2, 3]),\n out_60_30_100_500, rtol=1e-5)\n assert_allclose(apex_out.map_E_to_height(70, 15, 100, 500, [1, 2, 3]),\n out_70_15_100_500, rtol=1e-5)\n\n # vectorize lat\n assert_allclose(apex_out.map_E_to_height([60, 70], 15, 100, 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_70_15_100_500]).T,\n rtol=1e-5)\n\n # vectorize lon\n assert_allclose(apex_out.map_E_to_height(60, [15, 30], 100, 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_30_100_500]).T,\n rtol=1e-5)\n\n # vectorize height\n assert_allclose(apex_out.map_E_to_height(60, 15, [100, 200], 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_15_200_500]).T,\n rtol=1e-5)\n\n # vectorize newheight\n assert_allclose(apex_out.map_E_to_height(60, 15, 100, [500, 1000],\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_15_100_1000]).T,\n rtol=1e-5)\n\n # vectorize E\n assert_allclose(apex_out.map_E_to_height(60, 15, 100, 500,\n np.array([[1, 2, 3], [2, 3, 4]]).T),\n np.array([out_60_15_100_500, out_60_15_100_500_234]).T,\n rtol=1e-5)\n\n\n###============================================================================\n### Test the map_V_to_height() method\n###============================================================================\n\n\ndef test_map_V_to_height():\n apex_out = Apex(date=2000, refh=300)\n out_60_15_100_500 = [0.819719, 2.845114, 0.695437]\n out_60_15_100_500_234 = [1.830277, 4.14345, 0.947624]\n out_60_15_100_1000 = [0.924577, 3.149964, 0.851343]\n out_60_15_200_500 = [0.803882, 2.793206, 0.682839]\n out_60_30_100_500 = [0.761412, 2.878837, 0.736549]\n out_70_15_100_500 = [0.846819, 2.592572, 0.347919]\n\n # scalar\n assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500, [1, 2, 3]),\n out_60_15_100_500, rtol=1e-5)\n assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500, [2, 3, 4]),\n out_60_15_100_500_234, rtol=1e-5)\n assert_allclose(apex_out.map_V_to_height(60, 15, 100, 1000, [1, 2, 3]),\n out_60_15_100_1000, rtol=1e-5)\n assert_allclose(apex_out.map_V_to_height(60, 15, 200, 500, [1, 2, 3]),\n out_60_15_200_500, rtol=1e-5)\n assert_allclose(apex_out.map_V_to_height(60, 30, 100, 500, [1, 2, 3]),\n out_60_30_100_500, rtol=1e-5)\n assert_allclose(apex_out.map_V_to_height(70, 15, 100, 500, [1, 2, 3]),\n out_70_15_100_500, rtol=1e-5)\n\n # vectorize lat\n assert_allclose(apex_out.map_V_to_height([60, 70], 15, 100, 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_70_15_100_500]).T,\n rtol=1e-5)\n\n # vectorize lon\n assert_allclose(apex_out.map_V_to_height(60, [15, 30], 100, 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_30_100_500]).T,\n rtol=1e-5)\n\n # vectorize height\n assert_allclose(apex_out.map_V_to_height(60, 15, [100, 200], 500,\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_15_200_500]).T,\n rtol=1e-5)\n\n # vectorize newheight\n assert_allclose(apex_out.map_V_to_height(60, 15, 100, [500, 1000],\n np.array([[1, 2, 3]]*2).T),\n np.array([out_60_15_100_500, out_60_15_100_1000]).T,\n rtol=1e-5)\n\n # vectorize E\n assert_allclose(apex_out.map_V_to_height(60, 15, 100, 500,\n np.array([[1, 2, 3],\n [2, 3, 4]]).T),\n np.array([out_60_15_100_500, out_60_15_100_500_234]).T,\n rtol=1e-5)\n\n\n###============================================================================\n### Test basevectors_qd()\n###============================================================================\n\n\n# test coords\n\ndef test_basevectors_qd_scalar_geo():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='geo'),\n apex_out._basevec(60, 15, 100))\n\n\ndef test_basevectors_qd_scalar_apex():\n apex_out = Apex(date=2000, refh=300)\n glat, glon, _ = apex_out.apex2geo(60, 15, 100, precision=1e-2)\n assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='apex',\n precision=1e-2),\n apex_out._basevec(glat, glon, 100))\n\n\ndef test_basevectors_qd_scalar_qd():\n apex_out = Apex(date=2000, refh=300)\n glat, glon, _ = apex_out.qd2geo(60, 15, 100, precision=1e-2)\n assert_allclose(apex_out.basevectors_qd(60, 15, 100, coords='qd',\n precision=1e-2),\n apex_out._basevec(glat, glon, 100))\n\n# test shapes and vectorization of arguments\n\ndef test_basevectors_qd_scalar_shape():\n apex_out = Apex(date=2000, refh=300)\n ret = apex_out.basevectors_qd(60, 15, 100)\n for r in ret:\n assert r.shape == (2,)\n\n\ndef test_basevectors_qd_vectorization():\n apex_out = Apex(date=2000, refh=300)\n ret = apex_out.basevectors_qd([60, 60, 60, 60], 15, 100, coords='geo')\n for r in ret:\n assert r.shape == (2, 4)\n ret = apex_out.basevectors_qd(60, [15, 15, 15, 15], 100, coords='geo')\n for r in ret:\n assert r.shape == (2, 4)\n ret = apex_out.basevectors_qd(60, 15, [100, 100, 100, 100], coords='geo')\n for r in ret:\n assert r.shape == (2, 4)\n\n\n# test array return values\n\ndef test_basevectors_qd_array():\n apex_out = Apex(date=2000, refh=300)\n f1, f2 = apex_out.basevectors_qd([0, 30], 15, 100, coords='geo')\n f1_lat0, f2_lat0 = apex_out._basevec(0, 15, 100)\n f1_lat30, f2_lat30 = apex_out._basevec(30, 15, 100)\n assert_allclose(f1[:, 0], f1_lat0)\n assert_allclose(f2[:, 0], f2_lat0)\n assert_allclose(f1[:, 1], f1_lat30)\n assert_allclose(f2[:, 1], f2_lat30)\n\n\n###============================================================================\n### Test basevectors_apex()\n###============================================================================\n\n\n# test against return from _geo2apexall for different coords\n\ndef test_basevectors_apex_scalar_geo():\n apex_out = Apex(date=2000, refh=300)\n\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(60, 15, 100, coords='geo')\n\n (_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,\n e3_) = apex_out._geo2apexall(60, 15, 100)\n\n assert_allclose(f1, f1_)\n assert_allclose(f2, f2_)\n assert_allclose(d1, d1_)\n assert_allclose(d2, d2_)\n assert_allclose(d3, d3_)\n assert_allclose(e1, e1_)\n assert_allclose(e2, e2_)\n assert_allclose(e3, e3_)\n\n\ndef test_basevectors_apex_scalar_apex():\n apex_out = Apex(date=2000, refh=300)\n\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(60, 15, 100, coords='apex', precision=1e-2)\n\n glat, glon, _ = apex_out.apex2geo(60, 15, 100, precision=1e-2)\n (_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,\n e3_) = apex_out._geo2apexall(glat, glon, 100)\n\n assert_allclose(f1, f1_)\n assert_allclose(f2, f2_)\n assert_allclose(d1, d1_)\n assert_allclose(d2, d2_)\n assert_allclose(d3, d3_)\n assert_allclose(e1, e1_)\n assert_allclose(e2, e2_)\n assert_allclose(e3, e3_)\n\n\ndef test_basevectors_apex_scalar_qd():\n apex_out = Apex(date=2000, refh=300)\n\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(60, 15, 100, coords='qd', precision=1e-2)\n\n glat, glon, _ = apex_out.qd2geo(60, 15, 100, precision=1e-2)\n (_, _, _, _, f1_, f2_, _, d1_, d2_, d3_, _, e1_, e2_,\n e3_) = apex_out._geo2apexall(glat, glon, 100)\n\n assert_allclose(f1, f1_)\n assert_allclose(f2, f2_)\n assert_allclose(d1, d1_)\n assert_allclose(d2, d2_)\n assert_allclose(d3, d3_)\n assert_allclose(e1, e1_)\n assert_allclose(e2, e2_)\n assert_allclose(e3, e3_)\n\n\n# test shapes and vectorization of arguments\n\ndef test_basevectors_apex_scalar_shape():\n apex_out = Apex(date=2000, refh=300)\n ret = apex_out.basevectors_apex(60, 15, 100, precision=1e-2)\n for r in ret[:2]:\n assert r.shape == (2,)\n for r in ret[2:]:\n assert r.shape == (3,)\n\n\ndef test_basevectors_apex_vectorization():\n apex_out = Apex(date=2000, refh=300)\n ret = apex_out.basevectors_apex([60, 60, 60, 60], 15, 100)\n for r in ret[:2]:\n assert r.shape == (2, 4)\n for r in ret[2:]:\n assert r.shape == (3, 4)\n ret = apex_out.basevectors_apex(60, [15, 15, 15, 15], 100)\n for r in ret[:2]:\n assert r.shape == (2, 4)\n for r in ret[2:]:\n assert r.shape == (3, 4)\n ret = apex_out.basevectors_apex(60, 15, [100, 100, 100, 100])\n for r in ret[:2]:\n assert r.shape == (2, 4)\n for r in ret[2:]:\n assert r.shape == (3, 4)\n\n\n# test correct vectorization of height\ndef test_basevectors_apex_vectorization_height():\n apex_out = Apex(date=2000, refh=0)\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(60, 15, [200, 400], coords='geo')\n (_, _, _, _, f1_1, f2_1, _, d1_1, d2_1, d3_1, _, e1_1, e2_1,\n e3_1) = apex_out._geo2apexall(60, 15, 200)\n (_, _, _, _, f1_2, f2_2, _, d1_2, d2_2, d3_2, _, e1_2, e2_2,\n e3_2) = apex_out._geo2apexall(60, 15, 400)\n\n assert_allclose(f1[:, 0], f1_1)\n assert_allclose(f2[:, 0], f2_1)\n assert_allclose(d1[:, 0], d1_1)\n assert_allclose(d2[:, 0], d2_1)\n assert_allclose(d3[:, 0], d3_1)\n assert_allclose(e1[:, 0], e1_1)\n assert_allclose(e2[:, 0], e2_1)\n assert_allclose(e3[:, 0], e3_1)\n\n assert_allclose(f3[:, 0], np.array([-0.088671, -0.018272, 0.993576]),\n rtol=1e-4)\n assert_allclose(g1[:, 0], np.array([0.903098, 0.245273, 0.085107]),\n rtol=1e-4)\n assert_allclose(g2[:, 0], np.array([-0.103495, 1.072078, 0.01048]),\n rtol=1e-4)\n assert_allclose(g3[:, 0], np.array([0, 0, 1.006465]), rtol=1e-4)\n\n assert_allclose(f1[:, 1], f1_2)\n assert_allclose(f2[:, 1], f2_2)\n assert_allclose(d1[:, 1], d1_2)\n assert_allclose(d2[:, 1], d2_2)\n assert_allclose(d3[:, 1], d3_2)\n assert_allclose(e1[:, 1], e1_2)\n assert_allclose(e2[:, 1], e2_2)\n assert_allclose(e3[:, 1], e3_2)\n\n assert_allclose(f3[:, 1], np.array([-0.085415, -0.021176, 0.989645]),\n rtol=1e-4)\n assert_allclose(g1[:, 1], np.array([0.902695, 0.246919, 0.083194]),\n rtol=1e-4)\n assert_allclose(g2[:, 1], np.array([-0.11051, 1.066094, 0.013274]),\n rtol=1e-4)\n assert_allclose(g3[:, 1], np.array([0, 0, 1.010463]), rtol=1e-4)\n\n\n# test scalar return values\n\ndef test_basevectors_apex_scalar():\n apex_out = Apex(date=2000, refh=300)\n\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(0, 15, 100, coords='geo')\n (_, _, _, _, f1_1, f2_1, _, d1_1, d2_1, d3_1, _, e1_1, e2_1,\n e3_1) = apex_out._geo2apexall(0, 15, 100)\n\n assert_allclose(f1, f1_1)\n assert_allclose(f2, f2_1)\n assert_allclose(d1, d1_1)\n assert_allclose(d2, d2_1)\n assert_allclose(d3, d3_1)\n assert_allclose(e1, e1_1)\n assert_allclose(e2, e2_1)\n assert_allclose(e3, e3_1)\n\n assert_allclose(f3, np.array([0.092637, -0.245951, 0.938848]), rtol=1e-4)\n assert_allclose(g1, np.array([0.939012, 0.073416, -0.07342]), rtol=1e-4)\n assert_allclose(g2, np.array([0.055389, 1.004155, 0.257594]), rtol=1e-4)\n assert_allclose(g3, np.array([0, 0, 1.065135]), rtol=1e-4)\n\n\n# test 1D array return values\n\ndef test_basevectors_apex_array():\n apex_out = Apex(date=2000, refh=300)\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex([0, 30], 15, 100, coords='geo')\n (_, _, _, _, f1_1, f2_1, _, d1_1, d2_1, d3_1, _, e1_1, e2_1,\n e3_1) = apex_out._geo2apexall(0, 15, 100)\n (_, _, _, _, f1_2, f2_2, _, d1_2, d2_2, d3_2, _, e1_2, e2_2,\n e3_2) = apex_out._geo2apexall(30, 15, 100)\n\n assert_allclose(f1[:, 0], f1_1)\n assert_allclose(f2[:, 0], f2_1)\n assert_allclose(d1[:, 0], d1_1)\n assert_allclose(d2[:, 0], d2_1)\n assert_allclose(d3[:, 0], d3_1)\n assert_allclose(e1[:, 0], e1_1)\n assert_allclose(e2[:, 0], e2_1)\n assert_allclose(e3[:, 0], e3_1)\n\n assert_allclose(f3[:, 0], np.array([0.092637, -0.245951, 0.938848]),\n rtol=1e-4)\n assert_allclose(g1[:, 0], np.array([0.939012, 0.073416, -0.07342]),\n rtol=1e-4)\n assert_allclose(g2[:, 0], np.array([0.055389, 1.004155, 0.257594]),\n rtol=1e-4)\n assert_allclose(g3[:, 0], np.array([0, 0, 1.065135]), rtol=1e-4)\n\n assert_allclose(f1[:, 1], f1_2)\n assert_allclose(f2[:, 1], f2_2)\n assert_allclose(d1[:, 1], d1_2)\n assert_allclose(d2[:, 1], d2_2)\n assert_allclose(d3[:, 1], d3_2)\n assert_allclose(e1[:, 1], e1_2)\n assert_allclose(e2[:, 1], e2_2)\n assert_allclose(e3[:, 1], e3_2)\n\n assert_allclose(f3[:, 1], np.array([-0.036618, -0.071019, 0.861604]),\n rtol=1e-4)\n assert_allclose(g1[:, 1], np.array([0.844391, 0.015353, 0.037152]),\n rtol=1e-4)\n assert_allclose(g2[:, 1], np.array([0.050808, 1.02131, 0.086342]),\n rtol=1e-4)\n assert_allclose(g3[:, 1], np.array([0, 0, 1.160625]), rtol=1e-4)\n\n\n# test that vectors are calculated correctly\n\ndef test_basevectors_apex_delta():\n apex_out = Apex(date=2000, refh=300)\n for lat in range(0, 90, 10):\n for lon in range(0, 360, 15):\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(lat, lon, 500)\n f = [np.append(f1, 0), np.append(f2, 0), f3]\n g = [g1, g2, g3]\n d = [d1, d2, d3]\n e = [e1, e2, e3]\n for i, j in [(i, j) for i in range(3) for j in range(3)]:\n delta = 1 if i == j else 0\n assert_allclose(np.sum(f[i]*g[j]), delta, rtol=0, atol=1e-5)\n assert_allclose(np.sum(d[i]*e[j]), delta, rtol=0, atol=1e-5)\n\n\ndef test_basevectors_apex_invalid_scalar():\n apex_out = Apex(date=2000, refh=10000)\n with warnings.catch_warnings(record=True) as w:\n (f1, f2, f3, g1, g2, g3, d1, d2, d3, e1, e2,\n e3) = apex_out.basevectors_apex(0, 0, 0)\n assert issubclass(w[-1].category, UserWarning)\n assert 'set to -9999 where' in str(w[-1].message)\n\n invalid = [-9999, -9999, -9999]\n assert not np.allclose(f1, invalid[:2])\n assert not np.allclose(f2, invalid[:2])\n assert_allclose(f3, invalid)\n assert_allclose(g1, invalid)\n assert_allclose(g2, invalid)\n assert_allclose(g3, invalid)\n assert_allclose(d1, invalid)\n assert_allclose(d2, invalid)\n assert_allclose(d3, invalid)\n assert_allclose(e1, invalid)\n assert_allclose(e2, invalid)\n assert_allclose(e3, invalid)\n\n\n###============================================================================\n### Test the get_apex() method\n###============================================================================\n\n\ndef test_get_apex():\n apex_out = Apex(date=2000, refh=300)\n assert_allclose(apex_out.get_apex(10), 507.409702543805)\n assert_allclose(apex_out.get_apex(60), 20313.026999999987)\n\n\ndef test_get_apex_invalid_lat():\n apex_out = Apex(date=2000, refh=300)\n with pytest.raises(ValueError):\n apex_out.get_apex(91)\n with pytest.raises(ValueError):\n apex_out.get_apex(-91)\n apex_out.get_apex(90)\n apex_out.get_apex(-90)\n\n assert_allclose(apex_out.get_apex(90+1e-5), apex_out.get_apex(90),\n rtol=0, atol=1e-8)\n\n\n###============================================================================\n### Test the set_epoch() method\n###============================================================================\n\n\ndef test_set_epoch():\n apex_out = Apex(date=2000.2, refh=300)\n assert_allclose(apex_out.year, 2000.2)\n ret_2000_2_py = apex_out._geo2apex(60, 15, 100)\n apex_out.set_epoch(2000.8)\n assert_allclose(apex_out.year, 2000.8)\n ret_2000_8_py = apex_out._geo2apex(60, 15, 100)\n\n assert ret_2000_2_py != ret_2000_8_py\n\n fa.loadapxsh(apex_out.datafile, 2000.2)\n ret_2000_2_apex = fa.apxg2all(60, 15, 100, 300, 0)[2:4]\n fa.loadapxsh(apex_out.datafile, 2000.8)\n ret_2000_8_apex = fa.apxg2all(60, 15, 100, 300, 0)[2:4]\n\n assert ret_2000_2_apex != ret_2000_8_apex\n\n assert_allclose(ret_2000_2_py, ret_2000_2_apex)\n assert_allclose(ret_2000_8_py, ret_2000_8_apex)\n\n\n###============================================================================\n### Test the set_refh() method\n###============================================================================\n\n\ndef test_set_refh():\n apex_out = Apex(date=2000, refh=300)\n assert apex_out.refh, 300\n ret_300 = apex_out._geo2apex(60, 15, 100)\n apex_out.set_refh(500)\n assert apex_out.refh == 500\n ret_500 = apex_out._geo2apex(60, 15, 100)\n\n assert_allclose(ret_300, fa.apxg2all(60, 15, 100, 300, 0)[2:4])\n assert_allclose(ret_500, fa.apxg2all(60, 15, 100, 500, 0)[2:4])\n\n\nif __name__ == '__main__':\n pytest.main()\n"
] | [
[
"numpy.allclose",
"numpy.append",
"numpy.testing.assert_allclose",
"numpy.array",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
markvilar/Trajectory | [
"1879bec9c0383576464d92772f2b802cd2cbd725"
] | [
"Python/georeference.py"
] | [
"import copy\n\nfrom typing import Dict, List, Tuple\n\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nimport matplotlib.pyplot as plt\nplt.style.use(\"./Styles/Scientific.mplstyle\")\nimport matplotlib.patches as patches\nimport msgpack\nimport numpy as np\nimport quaternion as quat\n\nimport optimization\n\nfrom configuration import Configuration\nfrom data_structures import Map, MapUnpacker, Trajectory\nfrom plotting import plot_3D_line, plot_3D_scatter\nfrom utilities import closest_point, clamp_signal, quat_from_axis_angle, \\\n quat_array_to_vec3_array, vec4_array_to_quat_array\n\ndef get_extremas_array_2D(arrays: List):\n m, n = arrays[0].shape\n\n extremas = []\n for array in arrays:\n extremas.append([ np.min(array, axis=0), np.max(array, axis=0) ])\n\n extremas = np.array(extremas)\n\n mins = np.min(np.min(extremas, axis=0), axis=0)\n maxs= np.max(np.max(extremas, axis=0), axis=0)\n\n return np.stack([mins, maxs], axis=1)\n\ndef visualize_alignment_results(config: Configuration, trajectories: Dict, \\\n key_est, key_gt, label_est: str=\"Keyframes\", label_gt: str=\"Ground Truth\"):\n # Plot parameters.\n margins_pos = np.array([ -2, 2 ])\n margins_ang = np.array([ -10, 10 ])\n pad = 2.0\n w_pad = 2.0\n h_pad = 2.0\n patch_color = \"y\"\n patch_alpha = 0.5\n\n # Assign to variables.\n ground_truth = trajectories[key_gt]\n estimate = trajectories[key_est]\n\n # Truncate ground truth.\n start, _ = closest_point(estimate.timestamps[0], ground_truth.timestamps)\n end, _ = closest_point(estimate.timestamps[-1], ground_truth.timestamps)\n ground_truth.timestamps = ground_truth.timestamps[start:end+1]\n ground_truth.positions= ground_truth.positions[start:end+1]\n ground_truth.attitudes= ground_truth.attitudes[start:end+1]\n\n # Get ground truth attitude.\n q_ground_truth = vec4_array_to_quat_array(ground_truth.attitudes)\n q_estimate = vec4_array_to_quat_array(estimate.attitudes)\n angles_ground_truth = quat.as_euler_angles(q_ground_truth) * 180 / np.pi\n angles_estimate = quat.as_euler_angles(q_estimate) * 180 / np.pi\n angles_ground_truth = clamp_signal(angles_ground_truth, 0, 360)\n angles_estimate = clamp_signal(angles_estimate, 0, 360)\n\n # Calculate limits.\n lims_time = [ estimate.timestamps[0], estimate.timestamps[-1] ]\n lims_pos = get_extremas_array_2D( \\\n [ estimate.positions, ground_truth.positions ])\n lims_ang = get_extremas_array_2D( \\\n [ angles_estimate, angles_ground_truth ])\n lims_pos += margins_pos\n lims_ang += margins_ang\n\n # Visualize trajectory - Figure.\n fig1, ax1 = plt.subplots(nrows=3, ncols=1, figsize=(7, 4.5))\n fig1.tight_layout(pad=pad, w_pad=w_pad, h_pad=h_pad)\n\n # Position figure - Northing.\n ax1[0].plot(estimate.timestamps, estimate[:, 0])\n ax1[0].plot(ground_truth.timestamps, ground_truth[:, 0])\n ax1[0].set_xlim(lims_time)\n ax1[0].set_ylim(lims_pos[0])\n ax1[0].set_xlabel(r\"Time, $t$ $[s]$\")\n ax1[0].set_ylabel(r\"Northing, $N$ $[m]$\")\n \n # Position figure - Easting.\n ax1[1].plot(estimate.timestamps, estimate[:, 1])\n ax1[1].plot(ground_truth.timestamps, ground_truth[:, 1])\n ax1[1].set_xlim(lims_time)\n ax1[1].set_ylim(lims_pos[1])\n ax1[1].set_xlabel(r\"Time, $t$ $[s]$\")\n ax1[1].set_ylabel(r\"Easting, $E$ $[m]$\")\n\n # Position figure - Depth.\n ax1[2].plot(estimate.timestamps, estimate[:, 2], label=label_est)\n ax1[2].plot(ground_truth.timestamps, ground_truth[:, 2], \\\n label=label_gt)\n ax1[2].set_xlim(lims_time)\n ax1[2].set_ylim(lims_pos[2])\n ax1[2].set_xlabel(r\"Time, $t$ $[s]$\")\n ax1[2].set_ylabel(r\"Depth, $D$ $[m]$\")\n\n # Position figure - legend.\n lg1 = fig1.legend(bbox_to_anchor=(1, 1), loc=\"upper right\", frameon=True, \\\n fancybox=False)\n fr1 = lg1.get_frame()\n fr1.set_facecolor(\"white\")\n fr1.set_edgecolor(\"black\")\n\n # Visualize attitudes - Figure.\n fig2, ax2 = plt.subplots(nrows=3, ncols=1, figsize=(7, 4.5))\n fig2.tight_layout(pad=2.0, w_pad=2.0, h_pad=2.0)\n\n # Rotation 1.\n ax2[0].plot(estimate.timestamps, angles_estimate[:, 0])\n ax2[0].plot(ground_truth.timestamps, angles_ground_truth[:, 0])\n ax2[0].set_xlim(lims_time)\n ax2[0].set_ylim([ 80, 220 ])\n #ax2[0].set_ylim(lims_ang[0])\n ax2[0].set_xlabel(r\"Time, $t$ $[s]$\")\n ax2[0].set_ylabel(r\"Euler X, $r_{x}$ $[\\text{deg}]$\")\n\n # Rotation 2.\n ax2[1].plot(estimate.timestamps, angles_estimate[:, 1])\n ax2[1].plot(ground_truth.timestamps, angles_ground_truth[:, 1])\n ax2[1].set_xlim(lims_time)\n ax2[1].set_ylim(lims_ang[1])\n ax2[1].set_xlabel(r\"Time, $t$ $[s]$\")\n ax2[1].set_ylabel(r\"Euler Y, $r_{y}$ $[\\text{deg}]$\")\n\n # Rotation 3.\n ax2[2].plot(estimate.timestamps, angles_estimate[:, 2], label=label_est)\n ax2[2].plot(ground_truth.timestamps, angles_ground_truth[:, 2], \\\n label=label_gt)\n ax2[2].set_xlim(lims_time)\n ax2[2].set_ylim(lims_ang[2])\n ax2[2].set_xlabel(r\"Time, $t$ $[s]$\")\n ax2[2].set_ylabel(r\"Euler Z, $r_{z}$ $[\\text{deg}]$\")\n\n lg2 = fig2.legend(bbox_to_anchor=(1, 1), loc=\"upper right\", frameon=True, \\\n fancybox=False)\n fr2 = lg2.get_frame()\n fr2.set_facecolor(\"white\")\n fr2.set_edgecolor(\"black\")\n\n if config.save_figures:\n fig1.savefig(config.output_dir + config.name + \"-\" + \"Positions.pdf\", \\\n dpi=300)\n fig2.savefig(config.output_dir + config.name + \"-\" + \"Attitudes.pdf\", \\\n dpi=300)\n\n if config.show_figures:\n plt.show()\n\ndef georeference(config: Configuration, trajectories: Dict, map: Map):\n \"\"\"\n \"\"\"\n # Get trajectories.\n ground_truth = trajectories[\"Ground-Truth\"]\n keyframes = trajectories[\"Keyframes\"]\n frames = trajectories[\"Frames\"]\n\n # Get map landmarks.\n landmarks = map.get_landmarks()\n\n # Perform temporal and spatial optimization.\n results = optimization.optimize(config.optim, keyframes, ground_truth)\n rotation = results.rotation\n translation = results.translation\n matched_keyframes = results.matched_frames\n matched_ground_truth = results.matched_ground_truth\n\n # Add matched trajectories.\n trajectories[\"Matched-Keyframes\"] = matched_keyframes\n trajectories[\"Matched-Ground-Truth\"] = matched_ground_truth\n\n # Add bias and apply rotation and translation.\n keyframes.add_time_bias(config.optim.bias)\n frames.add_time_bias(config.optim.bias)\n keyframes.apply_SE3_transform(rotation, translation)\n frames.apply_SE3_transform(rotation, translation)\n landmarks.apply_SE3_transform(rotation, translation)\n\n trajectories[\"Keyframes\"] = keyframes\n trajectories[\"Frames\"] = frames\n\n visualize_alignment_results(config, trajectories, \"Keyframes\", \\\n \"Ground-Truth\")\n\n if config.save_output:\n keyframes.save_as_csv(config.output_dir + config.name + \"-\" \\\n + \"Keyframes.csv\")\n frames.save_as_csv(config.output_dir + config.name + \"-\" \\\n + \"Frames.csv\")\n landmarks.save_as_csv(config.output_dir + config.name + \"-\" \\\n + \"Landmarks.csv\")\n matched_keyframes.save_as_csv(config.output_dir + config.name + \"-\" \\\n + \"Matched-Keyframes.csv\")\n matched_ground_truth.save_as_csv(config.output_dir + config.name + \"-\" \\\n + \"Matched-Ground-Truth.csv\")\n"
] | [
[
"numpy.min",
"matplotlib.use",
"matplotlib.pyplot.subplots",
"numpy.stack",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WagnerLabPapers/Waskom_PNAS_2017 | [
"ef7ec8513c61ef031e09f9e67a3d061b038f8db0"
] | [
"sup_figure_4.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom plotutils import set_style, savefig, get_colormap, get_subject_order\n\nfrom figure_2 import plot_brains, plot_hists\n\n\ndef setup_figure():\n\n f = plt.figure(figsize=(7, 2.8))\n\n brain_gs = plt.GridSpec(3, 4, .13, .13, .87, .99, .05, .05)\n brain_axes = [f.add_subplot(gs) for gs in brain_gs]\n brain_axes = np.vstack(np.array_split(brain_axes, 6))\n\n hist_gs = plt.GridSpec(3, 2, .01, .15, .99, .99, 7, .08)\n hist_axes = [f.add_subplot(gs) for gs in hist_gs]\n\n cbar_ax = f.add_axes([.35, .07, .3, .04])\n\n return f, brain_axes, hist_axes, cbar_ax\n\n\ndef plot_colorbar(f, ax):\n\n cmap = get_colormap(\"sticks\")\n\n xx = np.arange(200).reshape(1, 200)\n\n ax.imshow(xx, rasterized=True, aspect=\"auto\", cmap=cmap)\n\n kws = dict(size=7, ha=\"center\")\n f.text(.35, .03, \"Orientation\", **kws)\n f.text(.65, .03, \"Color\", **kws)\n\n ax.set(xticks=[], yticks=[])\n\n\nif __name__ == \"__main__\":\n\n set_style()\n f, brain_axes, hist_axes, cbar_ax = setup_figure()\n\n subjects = get_subject_order(\"sticks\")\n\n plot_brains(subjects, brain_axes)\n plot_hists(subjects, hist_axes, 2, 450)\n plot_colorbar(f, cbar_ax)\n\n savefig(f, __file__)\n"
] | [
[
"numpy.arange",
"matplotlib.pyplot.GridSpec",
"numpy.array_split",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hanaecarrie/pisap | [
"958f53dbc28afc6fb84c7f3d678c8549307ef9f5"
] | [
"pisap/base/image.py"
] | [
"##########################################################################\n# XXX - Copyright (C) XXX, 2017\n# Distributed under the terms of the CeCILL-B license, as published by\n# the CEA-CNRS-INRIA. Refer to the LICENSE file or to\n# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html\n# for details.\n##########################################################################\n\n# System import\nimport numpy\n\n# Package import\nfrom .observable import Observable\nfrom pisap.base.exceptions import Exception\nfrom pisap.plotting import plot_data\n\n\nclass Image(Observable):\n \"\"\" Class that defines an image.\n\n An image contains:\n * data: an array of data stored in a numpy.ndarray\n * data_type: whether the data is scalar, vector or matrix.\n * a dictionary of metadata\n\n If data_type is 'vector' or 'matrix', an array of dimension N will have a\n spacing of size N-1, respectivelly N-2.\n\n The following event is allowed:\n * modified\n \"\"\"\n def __init__(self, shape=None, spacing=None, data_type=\"scalar\",\n metadata=None, **kwargs):\n \"\"\" Create an image that can be modified as a numpy arrray.\n\n Parameters\n ----------\n shape: uplet (optional, default None)\n set this parameter to created an empty image.\n spacing: uplet (optional, default None)\n the image spacing, if not set consider a default isotropic spacing.\n data_type: str (optional, default 'scalar')\n the image data type: 'scalar', 'vector' or 'matrix'.\n metadata: dict (optional, default None)\n some metadata attached to this image.\n kwargs: dict (optional)\n extra arguments may contain the image data as 'data', the empty\n image data filled value as 'value' or any argument of numpy.ndarray\n constructor.\n \"\"\"\n # Check input parameters\n if data_type not in [\"scalar\", \"vector\", \"matrix\"]:\n raise Exception(\"Unknown data type '{0}'.\".format(data_type))\n\n # Define class attributes\n self._scroll_axis = 0\n self.data = None\n self.data_type = data_type\n self.metadata = metadata or {}\n self._spacing = None\n\n # Initialize the Image class\n Observable.__init__(self, [\"modified\"])\n\n # Image data initialization\n if \"data\" in kwargs:\n self.data = numpy.asarray(kwargs[\"data\"])\n del kwargs[\"data\"]\n else:\n if shape is None:\n raise Exception(\"Wrong shape '{0}'.\".format(shape))\n if \"value\" in kwargs:\n value = kwargs[\"value\"]\n del kwargs[\"value\"]\n else:\n value = None\n self.data = numpy.ndarray(shape, **kwargs)\n if value is not None:\n self.data.fill(value)\n\n # Image spacing initialization\n if spacing is None:\n self._set_spacing(self._default_spacing())\n else:\n self._set_spacing(spacing)\n\n def show(self):\n \"\"\" Display the image data.\n \"\"\"\n if numpy.iscomplex(self.data).any():\n plot_data(numpy.abs(self.data), scroll_axis=self._scroll_axis)\n else:\n plot_data(self.data, scroll_axis=self._scroll_axis)\n\n def modified(self):\n \"\"\" Send a modified signal to the observers.\n \"\"\"\n self.notify_observers(\"modified\")\n\n def __getitem__(self, where):\n \"\"\" Get an items of the image data.\n \"\"\"\n return self.data[where]\n\n def __setitem__(self, where, value):\n \"\"\" Set an item to the image data.\n \"\"\"\n self.data[where] = value\n\n def __array__(self):\n \"\"\" Return image data as a numpy array.\n \"\"\"\n return numpy.asarray(self.data)\n\n ######################################################################\n # Properties\n ######################################################################\n\n def _get_spacing(self):\n \"\"\" Get the image spacing.\n \"\"\"\n return self._spacing\n\n def _set_spacing(self, spacing):\n \"\"\" Set the image spacing.\n\n Parameters\n ----------\n spacing: uplet\n the image spacing.\n \"\"\"\n self._spacing = numpy.asarray(spacing, dtype=numpy.single)\n\n def _get_shape(self):\n \"\"\" Get the shape of the image.\n This function accounts for non-scalar data, i.e. 'vector' or 'matrix'\n vs 'scalar' data types.\n \"\"\"\n if self.data_type == \"scalar\":\n return self.data.shape\n elif self.data_type == \"vector\":\n return self.data.shape[:-1]\n elif self.data_type == \"matrix\":\n return self.data.shape[:-2]\n\n def _get_dtype(self):\n \"\"\" Get the image data type.\n \"\"\"\n return self.data.dtype\n\n def _get_ndim(self):\n \"\"\" Get the image dimension.\n This function accounts for non-scalar data, i.e. 'vector' or 'matrix'\n vs 'scalar' data types.\n \"\"\"\n if self.data_type == \"scalar\":\n return self.data.ndim\n elif self.data_type == \"vector\":\n return self.data.ndim - 1\n elif self.data_type == \"matrix\":\n return self.data.ndim - 2\n\n def _get_scroll_axis(self):\n \"\"\" Get the scroll axis.\n\n Returns\n ----------\n scroll_axis: int\n the scroll axis for 3d data.\n \"\"\"\n return self._scroll_axis\n\n def _set_scroll_axis(self, scroll_axis):\n \"\"\" Modify the scroll axis.\n\n Parameters\n ----------\n scroll_axis: int\n the scroll axis for 3d data.\n \"\"\"\n self._scroll_axis = scroll_axis\n\n scroll_axis = property(_get_scroll_axis, _set_scroll_axis)\n spacing = property(_get_spacing, _set_spacing)\n shape = property(_get_shape)\n dtype = property(_get_dtype)\n ndim = property(_get_ndim)\n\n ######################################################################\n # Private interface\n ######################################################################\n\n def _default_spacing(self):\n \"\"\" Return the default image spacing.\n \"\"\"\n dim = self._get_ndim()\n return numpy.ones(dim, dtype=numpy.single)\n"
] | [
[
"numpy.abs",
"numpy.asarray",
"numpy.ndarray",
"numpy.ones",
"numpy.iscomplex"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
maryamghr/whatshap | [
"61f64612751af3c0882582e6e879c6e6a90a3556"
] | [
"whatshap/cli/compare.py"
] | [
"\"\"\"\nCompare two or more phasings\n\"\"\"\nimport logging\nimport math\nfrom collections import defaultdict\nfrom contextlib import ExitStack\nimport dataclasses\nfrom itertools import chain, permutations\nfrom typing import Set, List, Optional, DefaultDict, Dict\n\nfrom whatshap.vcf import VcfReader, VcfVariant, VariantTable, PloidyError\nfrom whatshap.core import Genotype, SwitchFlipCalculator\nfrom whatshap.cli import CommandLineError\n\n\nlogger = logging.getLogger(__name__)\n\ncount_width = 9\n\n\n# fmt: off\ndef add_arguments(parser):\n add = parser.add_argument\n add('--sample', metavar='SAMPLE', default=None, help='Name of the sample '\n 'to process. If not given, use first sample found in VCF.')\n add('--names', metavar='NAMES', default=None, help='Comma-separated list '\n 'of data set names to be used in the report (in same order as VCFs).')\n add('--tsv-pairwise', metavar='TSVPAIRWISE', default=None, help='Filename to write '\n 'comparison results from pair-wise comparison to (tab-separated).')\n add('--tsv-multiway', metavar='TSVMULTIWAY', default=None, help='Filename to write '\n 'comparison results from multiway comparison to (tab-separated). Only for diploid vcfs.')\n add('--only-snvs', default=False, action=\"store_true\", help='Only process SNVs '\n 'and ignore all other variants.')\n add('--switch-error-bed', default=None, help='Write BED file with switch error positions '\n 'to given filename. Only for diploid vcfs.')\n add('--plot-blocksizes', default=None, help='Write PDF file with a block length histogram '\n 'to given filename (requires matplotlib).')\n add('--plot-sum-of-blocksizes', default=None, help='Write PDF file with a block length histogram in which the height of each bar corresponds to the sum of lengths.')\n add('--longest-block-tsv', default=None, help='Write position-wise agreement of longest '\n 'joint blocks in each chromosome to tab-separated file. Only for diploid vcfs.')\n add('--ploidy', '-p', metavar='PLOIDY', type=int, default=2, help='The ploidy of the sample(s) (default: %(default)s).')\n # TODO: what's the best way to request \"two or more\" VCFs?\n add('vcf', nargs='+', metavar='VCF', help='At least two phased VCF files to be compared.')\n# fmt: on\n\n\ndef validate(args, parser):\n if len(args.vcf) < 2:\n parser.error(\"At least two VCFs need to be given.\")\n if args.ploidy < 2:\n parser.error(\"Ploidy must be > 1.\")\n if args.ploidy > 2 and args.tsv_multiway:\n parser.error(\"Option --tsv-multiway can only be used if ploidy=2.\")\n if args.ploidy > 2 and args.switch_error_bed:\n parser.error(\"Option --switch-error-bed can only be used if ploidy=2.\")\n if args.ploidy > 2 and args.longest_block_tsv:\n parser.error(\"Option --longest-block-tsv can only be used if ploidy=2.\")\n\n\nclass SwitchFlips:\n def __init__(self, switches: int = 0, flips: int = 0):\n self.switches: int = switches\n self.flips: int = flips\n\n def __iadd__(self, other):\n self.switches += other.switches\n self.flips += other.flips\n return self\n\n def __repr__(self):\n return \"SwitchFlips(switches={}, flips={})\".format(self.switches, self.flips)\n\n def __str__(self):\n return \"{}/{}\".format(self.switches, self.flips)\n\n\nclass PhasingErrors:\n def __init__(\n self,\n switches: int = 0,\n hamming: int = 0,\n switch_flips: Optional[SwitchFlips] = None,\n diff_genotypes: int = 0,\n ):\n self.switches = switches\n self.hamming = hamming\n self.switch_flips = SwitchFlips() if switch_flips is None else switch_flips\n self.diff_genotypes = diff_genotypes\n\n def __iadd__(self, other: object) -> \"PhasingErrors\":\n if not isinstance(other, PhasingErrors):\n raise TypeError(\"Can only add to PhasingErrors\")\n self.switches += other.switches\n self.hamming += other.hamming\n self.switch_flips += other.switch_flips\n self.diff_genotypes += other.diff_genotypes\n return self\n\n def __repr__(self):\n return \"PhasingErrors(switches={}, hamming={}, switch_flips={}, diff_genotypes={})\".format(\n self.switches, self.hamming, self.switch_flips, self.diff_genotypes\n )\n\n\ndef complement(s):\n \"\"\"\n >>> complement('01100')\n '10011'\n \"\"\"\n t = {\"0\": \"1\", \"1\": \"0\"}\n return \"\".join(t[c] for c in s)\n\n\ndef hamming(s0, s1):\n \"\"\"\n >>> hamming('ABCD', 'AXCY')\n 2\n \"\"\"\n assert len(s0) == len(s1)\n return sum(c0 != c1 for c0, c1 in zip(s0, s1))\n\n\ndef switch_encoding(phasing):\n \"\"\"\n >>> switch_encoding('0001011')\n '001110'\n \"\"\"\n assert isinstance(phasing, str)\n return \"\".join((\"0\" if phasing[i - 1] == phasing[i] else \"1\") for i in range(1, len(phasing)))\n\n\ndef compute_switch_flips(phasing0, phasing1) -> SwitchFlips:\n assert len(phasing0) == len(phasing1)\n s0 = switch_encoding(phasing0)\n s1 = switch_encoding(phasing1)\n result = SwitchFlips()\n switches_in_a_row = 0\n for i, (p0, p1) in enumerate(zip(s0, s1)):\n if p0 != p1:\n switches_in_a_row += 1\n if (i + 1 == len(s0)) or (p0 == p1):\n result.flips += switches_in_a_row // 2\n result.switches += switches_in_a_row % 2\n switches_in_a_row = 0\n\n return result\n\n\ndef compute_matching_genotype_pos(phasing0, phasing1):\n \"\"\"\n Computes the positions on which both phasings agree on the genotype.\n \"\"\"\n assert len(phasing0) == len(phasing1)\n assert len(phasing0) >= 2\n assert len(phasing0[0]) == len(phasing1[0])\n assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))\n num_vars = len(phasing0[0])\n matching_pos = [\n i\n for i in range(num_vars)\n if Genotype([int(hap[i]) for hap in phasing0])\n == Genotype([int(hap[i]) for hap in phasing1])\n ]\n return matching_pos\n\n\ndef compute_switch_errors_poly(phasing0, phasing1, matching_pos=None):\n \"\"\"\n Computes the number of necessary switches to transform phasing 0 into phasing 1 or vice versa.\n Positions with non-matching genotypes are omitted.\n \"\"\"\n assert len(phasing0) == len(phasing1)\n assert len(phasing0) >= 2\n assert len(phasing0[0]) == len(phasing1[0])\n assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))\n num_vars = len(phasing0[0])\n\n # If positions with matching genotypes are not precomputed, do it here!\n if matching_pos is None:\n matching_pos = compute_matching_genotype_pos(phasing0, phasing1)\n\n phasing0_matched = [\"\".join([hap[i] for i in matching_pos]) for hap in phasing0]\n phasing1_matched = [\"\".join([hap[i] for i in matching_pos]) for hap in phasing1]\n\n vector_error = compute_switch_flips_poly(\n phasing0_matched,\n phasing1_matched,\n switch_cost=1,\n flip_cost=2 * num_vars * len(phasing0) + 1,\n )\n assert vector_error.flips == 0\n\n return vector_error.switches\n\n\ndef compute_switch_flips_poly(phasing0, phasing1, switch_cost=1, flip_cost=1):\n \"\"\"\n Computes the combined number of switches and flips, which are needed to transform phasing 0 into\n phasing 1 or vice versa.\n \"\"\"\n (result, switches_in_column, flips_in_column, poswise_config) = compute_switch_flips_poly_bt(\n phasing0, phasing1, switch_cost=switch_cost, flip_cost=flip_cost\n )\n return result\n\n\ndef compute_switch_flips_poly_bt(\n phasing0, phasing1, report_error_positions=False, switch_cost=1, flip_cost=1\n):\n # Check input\n if len(phasing0) != len(phasing1):\n logger.error(\n \"Incompatible phasings. Number of haplotypes is not equal (\"\n + str(len(phasing0))\n + \" != \"\n + str(len(phasing1))\n + \").\"\n )\n assert len(phasing0) == len(phasing1)\n\n num_pos = len(phasing0[0])\n if num_pos == 0:\n return SwitchFlips(), None, None, None\n ploidy = len(phasing0)\n if ploidy == 0:\n return SwitchFlips(), None, None, None\n for i in range(0, len(phasing1)):\n if len(phasing1[i]) != num_pos:\n logger.error(\n \"Inconsistent input for phasing. Haplotypes have different lengths ( len(phasing1[0]=\"\n + str(num_pos)\n + \" != len(phasing1[\"\n + str(i)\n + \"]=\"\n + str(len(phasing1[i]))\n + \".\"\n )\n assert len(phasing1[i]) == num_pos\n if len(phasing0[i]) != num_pos:\n logger.error(\n \"Inconsistent input for phasing. Haplotypes have different lengths ( len(phasing1[0]=\"\n + str(num_pos)\n + \" != len(phasing0[\"\n + str(i)\n + \"]=\"\n + str(len(phasing0[i]))\n + \".\"\n )\n assert len(phasing1[i]) == num_pos\n if ploidy > 6:\n logger.warning(\n \"Computing vector error with more than 6 haplotypes. This may take very long ...\"\n )\n\n # Compute comparison\n calc = SwitchFlipCalculator(ploidy, switch_cost, flip_cost)\n result = SwitchFlips()\n (\n switches,\n flips,\n switches_in_column,\n flips_in_column,\n positionwise_config,\n ) = calc.compute_switch_flips_poly(phasing0, phasing1)\n\n # Aggregate results\n result.switches = switches / ploidy\n result.flips = flips / ploidy\n return result, switches_in_column, flips_in_column, positionwise_config\n\n\ndef poly_num_switches(perm0, perm1):\n cost = 0\n for i in range(len(perm0)):\n if perm0[i] != perm1[i]:\n cost += 1\n return cost\n\n\ndef compare_block(phasing0, phasing1):\n \"\"\" Input are two lists of haplotype sequences over {0,1}. \"\"\"\n assert len(phasing0) == len(phasing1)\n ploidy = len(phasing0)\n\n minimum_hamming_distance = float(\"inf\")\n # compute minimum hamming distance\n for permutation in permutations(phasing0):\n # compute sum of hamming distances\n total_hamming = 0\n for i in range(ploidy):\n total_hamming += hamming(phasing1[i], permutation[i])\n total_hamming /= float(ploidy)\n minimum_hamming_distance = min(minimum_hamming_distance, total_hamming)\n\n switches = float(\"inf\")\n switch_flips = SwitchFlips(float(\"inf\"), float(\"inf\"))\n matching_pos = compute_matching_genotype_pos(phasing0, phasing1)\n\n if ploidy == 2:\n # conversion to int is allowed, as there should be no fractional error counts for diploid comparisons\n switches = int(hamming(switch_encoding(phasing0[0]), switch_encoding(phasing1[0])))\n switch_flips = compute_switch_flips(phasing0[0], phasing1[0])\n minimum_hamming_distance = int(minimum_hamming_distance)\n else:\n switches = compute_switch_errors_poly(phasing0, phasing1, matching_pos)\n switch_flips = compute_switch_flips_poly(phasing0, phasing1)\n\n return PhasingErrors(\n switches=switches,\n hamming=minimum_hamming_distance,\n switch_flips=switch_flips,\n diff_genotypes=len(phasing0[0]) - len(matching_pos),\n )\n\n\ndef fraction2percentstr(nominator, denominator):\n if denominator == 0:\n return \"--\"\n else:\n return \"{:.2f}%\".format(nominator * 100.0 / denominator)\n\n\ndef safefraction(nominator, denominator):\n if denominator == 0:\n return float(\"nan\")\n else:\n return nominator / denominator\n\n\ndef create_bed_records(chromosome, phasing0, phasing1, positions, annotation_string):\n \"\"\"Determines positions of switch errors between two phasings\n and yields one BED record per switch error (encoded as a tuple).\n The annotation_string is added to each record.\"\"\"\n assert len(phasing0) == len(phasing1) == len(positions)\n switch_encoding0 = switch_encoding(phasing0)\n switch_encoding1 = switch_encoding(phasing1)\n for i, (sw0, sw1) in enumerate(zip(switch_encoding0, switch_encoding1)):\n if sw0 != sw1:\n yield (chromosome, positions[i] + 1, positions[i + 1] + 1, annotation_string)\n\n\ndef print_stat(text: str, value=None, value2=None, text_width=37):\n \"\"\"\n Print a line like this:\n\n text: value\n \"\"\"\n text = text.rjust(text_width)\n if value is None:\n assert value2 is None\n print(text)\n else:\n if value == \"-\":\n value = \"-\" * count_width\n else:\n value = str(value).rjust(count_width)\n if value2 is None:\n print(text + \":\", value)\n else:\n print(text + \":\", value, str(value2).rjust(count_width))\n\n\ndef print_errors(errors, phased_pairs):\n print_stat(\"phased pairs of variants assessed\", phased_pairs)\n print_stat(\"switch errors\", errors.switches)\n print_stat(\"switch error rate\", fraction2percentstr(errors.switches, phased_pairs))\n print_stat(\"switch/flip decomposition\", errors.switch_flips)\n print_stat(\n \"switch/flip rate\",\n fraction2percentstr(errors.switch_flips.switches + errors.switch_flips.flips, phased_pairs),\n )\n\n\[email protected]\nclass PairwiseComparisonResults:\n intersection_blocks: int\n covered_variants: int\n all_assessed_pairs: int\n all_switches: int\n all_switch_rate: float\n all_switchflips: SwitchFlips\n all_switchflip_rate: float\n blockwise_hamming: int\n blockwise_hamming_rate: int\n blockwise_diff_genotypes: int\n blockwise_diff_genotypes_rate: int\n largestblock_assessed_pairs: int\n largestblock_switches: int\n largestblock_switch_rate: float\n largestblock_switchflips: SwitchFlips\n largestblock_switchflip_rate: float\n largestblock_hamming: int\n largestblock_hamming_rate: float\n largestblock_diff_genotypes: int\n largestblock_diff_genotypes_rate: float\n\n\[email protected]\nclass BlockStats:\n variant_count: int\n span: int\n\n\ndef collect_common_variants(variant_tables: List[VariantTable], sample) -> Set[VcfVariant]:\n common_variants = None\n for variant_table in variant_tables:\n het_variants = [\n v\n for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))\n if not gt.is_homozygous()\n ]\n if common_variants is None:\n common_variants = set(het_variants)\n else:\n common_variants.intersection_update(het_variants)\n assert common_variants is not None\n return common_variants\n\n\ndef compare(variant_tables: List[VariantTable], sample: str, dataset_names: List[str], ploidy: int):\n \"\"\"\n Return a PairwiseComparisonResults object if the variant_tables has a length of 2.\n \"\"\"\n assert len(variant_tables) > 1\n\n common_variants = collect_common_variants(variant_tables, sample)\n assert common_variants is not None\n\n print_stat(\"common heterozygous variants\", len(common_variants))\n print_stat(\"(restricting to these below)\")\n phases = []\n sorted_variants = sorted(common_variants, key=lambda v: v.position)\n for variant_table in variant_tables:\n p = [\n phase\n for variant, phase in zip(variant_table.variants, variant_table.phases_of(sample))\n if variant in common_variants\n ]\n assert [v for v in variant_table.variants if v in common_variants] == sorted_variants\n assert len(p) == len(common_variants)\n phases.append(p)\n\n # blocks[variant_table_index][block_id] is a list of indices into common_variants\n blocks: List[DefaultDict[int, List[int]]] = [defaultdict(list) for _ in variant_tables]\n block_intersection = defaultdict(list)\n for variant_index in range(len(common_variants)):\n any_none = False\n for i in range(len(phases)):\n phase = phases[i][variant_index]\n if phase is None:\n any_none = True\n else:\n blocks[i][phase.block_id].append(variant_index)\n if not any_none:\n joint_block_id = tuple(\n phase[variant_index].block_id for phase in phases # type: ignore\n )\n block_intersection[joint_block_id].append(variant_index)\n\n # create statistics on each block in each data set\n block_stats = compute_block_stats(blocks, sorted_variants)\n\n for dataset_name, blck in zip(dataset_names, blocks):\n print_stat(\n \"non-singleton blocks in {}\".format(dataset_name),\n len([b for b in blck.values() if len(b) > 1]),\n )\n print_stat(\"--> covered variants\", sum(len(b) for b in blck.values() if len(b) > 1))\n\n intersection_block_count = sum(1 for b in block_intersection.values() if len(b) > 1)\n intersection_block_variants = sum(len(b) for b in block_intersection.values() if len(b) > 1)\n print_stat(\"non-singleton intersection blocks\", intersection_block_count)\n print_stat(\"--> covered variants\", intersection_block_variants)\n if len(variant_tables) == 2:\n (\n bed_records,\n longest_block_agreement,\n longest_block_positions,\n pairwise_comparison,\n ) = compare_pair(\n block_intersection,\n dataset_names,\n intersection_block_count,\n intersection_block_variants,\n phases,\n ploidy,\n sorted_variants,\n variant_tables,\n )\n\n return (\n pairwise_comparison,\n bed_records,\n block_stats,\n longest_block_positions,\n longest_block_agreement,\n None,\n )\n else:\n assert ploidy == 2\n multiway_results = compare_multiway(block_intersection, dataset_names, phases)\n return None, None, block_stats, None, None, multiway_results\n\n\ndef compare_pair(\n block_intersection,\n dataset_names,\n intersection_block_count,\n intersection_block_variants,\n phases,\n ploidy,\n sorted_variants,\n variant_tables,\n):\n longest_block = 0\n longest_block_errors = PhasingErrors()\n longest_block_positions = []\n longest_block_agreement = []\n phased_pairs = 0\n bed_records = []\n total_errors = PhasingErrors()\n total_compared_variants = 0\n for block in block_intersection.values():\n if len(block) < 2:\n continue\n phasing0 = []\n phasing1 = []\n for j in range(ploidy):\n p0 = \"\".join(str(phases[0][i].phase[j]) for i in block)\n p1 = \"\".join(str(phases[1][i].phase[j]) for i in block)\n phasing0.append(p0)\n phasing1.append(p1)\n block_positions = [sorted_variants[i].position for i in block]\n errors = compare_block(phasing0, phasing1)\n\n # TODO: extend to polyploid\n if ploidy == 2:\n bed_records.extend(\n create_bed_records(\n variant_tables[0].chromosome,\n phasing0[0],\n phasing1[0],\n block_positions,\n \"{}<-->{}\".format(*dataset_names),\n )\n )\n total_errors += errors\n phased_pairs += len(block) - 1\n total_compared_variants += len(block)\n if len(block) > longest_block:\n longest_block = len(block)\n longest_block_errors = errors\n longest_block_positions = block_positions\n # TODO: extend to polyploid\n if ploidy == 2:\n if hamming(phasing0, phasing1) < hamming(phasing0[0], complement(phasing1[0])):\n longest_block_agreement = [\n 1 * (p0 == p1) for p0, p1 in zip(phasing0[0], phasing1[0])\n ]\n else:\n longest_block_agreement = [\n 1 * (p0 != p1) for p0, p1 in zip(phasing0[0], phasing1[0])\n ]\n longest_block_assessed_pairs = max(longest_block - 1, 0)\n print_stat(\"ALL INTERSECTION BLOCKS\", \"-\")\n print_errors(total_errors, phased_pairs)\n print_stat(\"Block-wise Hamming distance\", total_errors.hamming)\n print_stat(\n \"Block-wise Hamming distance [%]\",\n fraction2percentstr(total_errors.hamming, total_compared_variants),\n )\n print_stat(\"Different genotypes\", total_errors.diff_genotypes)\n print_stat(\n \"Different genotypes [%]\",\n fraction2percentstr(total_errors.diff_genotypes, total_compared_variants),\n )\n print_stat(\"LARGEST INTERSECTION BLOCK\", \"-\")\n print_errors(longest_block_errors, longest_block_assessed_pairs)\n print_stat(\"Hamming distance\", longest_block_errors.hamming)\n print_stat(\n \"Hamming distance [%]\", fraction2percentstr(longest_block_errors.hamming, longest_block)\n )\n print_stat(\"Different genotypes\", longest_block_errors.diff_genotypes)\n print_stat(\n \"Different genotypes [%]\",\n fraction2percentstr(longest_block_errors.diff_genotypes, longest_block),\n )\n pcr = PairwiseComparisonResults(\n intersection_blocks=intersection_block_count,\n covered_variants=intersection_block_variants,\n all_assessed_pairs=phased_pairs,\n all_switches=total_errors.switches,\n all_switch_rate=safefraction(total_errors.switches, phased_pairs),\n all_switchflips=total_errors.switch_flips,\n all_switchflip_rate=safefraction(\n total_errors.switch_flips.switches + total_errors.switch_flips.flips, phased_pairs\n ),\n blockwise_hamming=total_errors.hamming,\n blockwise_hamming_rate=safefraction(total_errors.hamming, total_compared_variants),\n blockwise_diff_genotypes=total_errors.diff_genotypes,\n blockwise_diff_genotypes_rate=safefraction(\n total_errors.diff_genotypes, total_compared_variants\n ),\n largestblock_assessed_pairs=longest_block_assessed_pairs,\n largestblock_switches=longest_block_errors.switches,\n largestblock_switch_rate=safefraction(\n longest_block_errors.switches, longest_block_assessed_pairs\n ),\n largestblock_switchflips=longest_block_errors.switch_flips,\n largestblock_switchflip_rate=safefraction(\n longest_block_errors.switch_flips.switches + longest_block_errors.switch_flips.flips,\n longest_block_assessed_pairs,\n ),\n largestblock_hamming=longest_block_errors.hamming,\n largestblock_hamming_rate=safefraction(longest_block_errors.hamming, longest_block),\n largestblock_diff_genotypes=longest_block_errors.diff_genotypes,\n largestblock_diff_genotypes_rate=safefraction(\n longest_block_errors.diff_genotypes, longest_block\n ),\n )\n return bed_records, longest_block_agreement, longest_block_positions, pcr\n\n\ndef compare_multiway(block_intersection, dataset_names, phases):\n histogram = defaultdict(int)\n total_compared = 0\n for block in block_intersection.values():\n if len(block) < 2:\n continue\n total_compared += len(block) - 1\n phasings = [\"\".join(str(phases[j][i].phase[0]) for i in block) for j in range(len(phases))]\n switch_encodings = [switch_encoding(p) for p in phasings]\n for i in range(len(block) - 1):\n s = \"\".join(switch_encodings[j][i] for j in range(len(switch_encodings)))\n s = min(s, complement(s))\n histogram[s] += 1\n print_stat(\"Compared pairs of variants\", total_compared)\n bipartitions = list(histogram.keys())\n bipartitions.sort()\n multiway_results = {} # (dataset_list0, dataset_list1) --> count\n for i, s in enumerate(bipartitions):\n count = histogram[s]\n if i == 0:\n assert set(c for c in s) == set(\"0\")\n print(\"ALL AGREE\")\n elif i == 1:\n print(\"DISAGREEMENT\")\n left, right = [], []\n for name, leftright in zip(dataset_names, s):\n if leftright == \"0\":\n left.append(name)\n else:\n right.append(name)\n print_stat(\n (\"{%s} vs. {%s}\" % (\",\".join(left), \",\".join(right))),\n count,\n fraction2percentstr(count, total_compared),\n )\n multiway_results[(\",\".join(left), \",\".join(right))] = count\n return multiway_results\n\n\ndef compute_block_stats(\n blocks: List[DefaultDict[int, List[int]]], sorted_variants: List[VcfVariant]\n):\n block_stats = []\n for block in blocks:\n l = []\n for block_id, variant_indices in block.items():\n if len(variant_indices) < 2:\n continue\n span = (\n sorted_variants[variant_indices[-1]].position\n - sorted_variants[variant_indices[0]].position\n )\n l.append(BlockStats(len(variant_indices), span))\n block_stats.append(l)\n return block_stats\n\n\ndef create_blocksize_histogram(filename, block_stats, names, use_weights=False):\n try:\n import matplotlib\n import numpy\n\n matplotlib.use(\"pdf\")\n from matplotlib import pyplot\n from matplotlib.backends.backend_pdf import PdfPages\n except ImportError:\n raise CommandLineError(\n \"To use option --plot-blocksizes, you need to have numpy and matplotlib installed.\"\n )\n\n assert len(block_stats) == len(names)\n\n color_list = [\"#ffa347\", \"#0064c8\", \"#b42222\", \"#22a5b4\", \"#b47c22\", \"#6db6ff\"]\n if len(color_list) < len(block_stats):\n color_count = len(block_stats)\n color_list = pyplot.cm.Set1([n / color_count for n in range(color_count)])\n colors = color_list[: len(block_stats)]\n\n with PdfPages(filename) as pdf:\n for what, xlabel in [\n (lambda stats: stats.variant_count, \"variant count\"),\n (lambda stats: stats.span, \"span [bp]\"),\n ]:\n pyplot.figure(figsize=(10, 8))\n max_value = max(what(stats) for stats in chain(*block_stats))\n common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 50)\n for l, name, color in zip(block_stats, names, colors):\n x = [what(stats) for stats in l]\n n, bins, patches = pyplot.hist(\n x,\n bins=common_bins,\n alpha=0.6,\n color=color,\n label=name,\n weights=x if use_weights else None,\n )\n pyplot.xlabel(xlabel)\n pyplot.ylabel(\"Number of blocks\")\n pyplot.gca().set_xscale(\"log\")\n pyplot.gca().set_yscale(\"log\")\n pyplot.grid(True)\n pyplot.legend()\n pdf.savefig()\n pyplot.close()\n\n pyplot.figure(figsize=(10, 8))\n common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 25)\n x = [[what(stats) for stats in l] for l in block_stats]\n n, bins, patches = pyplot.hist(\n x,\n bins=common_bins,\n alpha=0.6,\n color=colors,\n label=names,\n weights=x if use_weights else None,\n )\n pyplot.xlabel(xlabel)\n pyplot.ylabel(\"Number of blocks\")\n pyplot.gca().set_xscale(\"log\")\n pyplot.gca().set_yscale(\"log\")\n pyplot.grid(True)\n pyplot.legend()\n pdf.savefig()\n pyplot.close()\n\n\ndef run_compare(\n vcf,\n ploidy,\n names=None,\n sample=None,\n tsv_pairwise=None,\n tsv_multiway=None,\n only_snvs=False,\n switch_error_bed=None,\n plot_blocksizes=None,\n plot_sum_of_blocksizes=None,\n longest_block_tsv=None,\n):\n vcf_readers = [VcfReader(f, indels=not only_snvs, phases=True, ploidy=ploidy) for f in vcf]\n if names:\n dataset_names = names.split(\",\")\n if len(dataset_names) != len(vcf):\n raise CommandLineError(\n \"Number of names given with --names does not equal number of VCFs.\"\n )\n else:\n dataset_names = [\"file{}\".format(i) for i in range(len(vcf))]\n longest_name = max(len(n) for n in dataset_names)\n\n sample = get_sample_to_work_on(vcf_readers, requested_sample=sample)\n\n with ExitStack() as stack:\n tsv_pairwise_file = tsv_multiway_file = longest_block_tsv_file = switch_error_bedfile = None\n if tsv_pairwise:\n tsv_pairwise_file = stack.enter_context(open(tsv_pairwise, \"w\"))\n\n if tsv_multiway:\n tsv_multiway_file = stack.enter_context(open(tsv_multiway, \"w\"))\n print(\n \"#sample\",\n \"chromosome\",\n \"dataset_list0\",\n \"dataset_list1\",\n \"count\",\n sep=\"\\t\",\n file=tsv_multiway_file,\n )\n\n if longest_block_tsv:\n longest_block_tsv_file = stack.enter_context(open(longest_block_tsv, \"w\"))\n print(\n \"#dataset_name0\",\n \"dataset_name1\",\n \"#sample\",\n \"chromosome\",\n \"position\",\n \"phase_agreeing\",\n sep=\"\\t\",\n file=longest_block_tsv_file,\n )\n\n print(\"Comparing phasings for sample\", sample)\n\n vcfs = get_variant_tables(vcf_readers, vcf)\n chromosomes = get_common_chromosomes(vcfs)\n if len(chromosomes) == 0:\n raise CommandLineError(\"No chromosome is contained in all VCFs. Aborting.\")\n logger.info(\"Chromosomes present in all VCFs: %s\", \", \".join(chromosomes))\n\n if tsv_pairwise_file:\n fields = [\n \"#sample\",\n \"chromosome\",\n \"dataset_name0\",\n \"dataset_name1\",\n \"file_name0\",\n \"file_name1\",\n ]\n field_names = [f.name for f in dataclasses.fields(PairwiseComparisonResults)]\n fields.extend(field_names)\n fields.extend([\"het_variants0\", \"only_snvs\"])\n print(*fields, sep=\"\\t\", file=tsv_pairwise_file)\n\n if switch_error_bed:\n switch_error_bedfile = stack.enter_context(open(switch_error_bed, \"w\"))\n\n print(\"FILENAMES\")\n for name, filename in zip(dataset_names, vcf):\n print(name.rjust(longest_name + 2), \"=\", filename)\n\n width = max(longest_name, 15) + 5\n\n all_block_stats = [[] for _ in vcfs]\n\n def add_block_stats(block_stats):\n assert len(block_stats) == len(all_block_stats)\n for big_list, new_list in zip(all_block_stats, block_stats):\n big_list.extend(new_list)\n\n for chromosome in sorted(chromosomes):\n print(\"---------------- Chromosome {} ----------------\".format(chromosome))\n all_bed_records = []\n variant_tables = [vcf[chromosome] for vcf in vcfs]\n all_variants_union = set()\n all_variants_intersection = None\n het_variants_union = set()\n het_variants_intersection = None\n het_variant_sets = []\n het_variants0 = None\n print(\"VARIANT COUNTS (heterozygous / all): \")\n for variant_table, name in zip(variant_tables, dataset_names):\n all_variants_union.update(variant_table.variants)\n het_variants = [\n v\n for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))\n if not gt.is_homozygous()\n ]\n if het_variants0 is None:\n het_variants0 = len(het_variants)\n het_variants_union.update(het_variants)\n if all_variants_intersection is None:\n all_variants_intersection = set(variant_table.variants)\n het_variants_intersection = set(het_variants)\n else:\n all_variants_intersection.intersection_update(variant_table.variants)\n het_variants_intersection.intersection_update(het_variants)\n het_variant_sets.append(set(het_variants))\n print(\n \"{}:\".format(name).rjust(width),\n str(len(het_variants)).rjust(count_width),\n \"/\",\n str(len(variant_table.variants)).rjust(count_width),\n )\n print(\n \"UNION:\".rjust(width),\n str(len(het_variants_union)).rjust(count_width),\n \"/\",\n str(len(all_variants_union)).rjust(count_width),\n )\n print(\n \"INTERSECTION:\".rjust(width),\n str(len(het_variants_intersection)).rjust(count_width),\n \"/\",\n str(len(all_variants_intersection)).rjust(count_width),\n )\n\n for i in range(len(vcfs)):\n for j in range(i + 1, len(vcfs)):\n print(\n \"PAIRWISE COMPARISON: {} <--> {}:\".format(\n dataset_names[i], dataset_names[j]\n )\n )\n (\n results,\n bed_records,\n block_stats,\n longest_block_positions,\n longest_block_agreement,\n multiway_results,\n ) = compare(\n [variant_tables[i], variant_tables[j]],\n sample,\n [dataset_names[i], dataset_names[j]],\n ploidy,\n )\n if len(vcfs) == 2:\n add_block_stats(block_stats)\n all_bed_records.extend(bed_records)\n if tsv_pairwise_file:\n fields = [\n sample,\n chromosome,\n dataset_names[i],\n dataset_names[j],\n vcf[i],\n vcf[j],\n ]\n fields.extend(dataclasses.astuple(results))\n fields.extend([het_variants0, int(only_snvs)])\n print(*fields, sep=\"\\t\", file=tsv_pairwise_file)\n if longest_block_tsv_file:\n assert ploidy == 2\n assert len(longest_block_positions) == len(longest_block_agreement)\n for position, phase_agreeing in zip(\n longest_block_positions, longest_block_agreement\n ):\n print(\n dataset_names[i],\n dataset_names[j],\n sample,\n chromosome,\n position,\n phase_agreeing,\n sep=\"\\t\",\n file=longest_block_tsv_file,\n )\n\n # if requested, write all switch errors found in the current chromosome to the bed file\n if switch_error_bedfile:\n assert ploidy == 2\n all_bed_records.sort()\n for record in all_bed_records:\n print(*record, sep=\"\\t\", file=switch_error_bedfile)\n\n if len(vcfs) > 2:\n assert ploidy == 2\n print(\"MULTIWAY COMPARISON OF ALL PHASINGS:\")\n (\n results,\n bed_records,\n block_stats,\n longest_block_positions,\n longest_block_agreement,\n multiway_results,\n ) = compare(variant_tables, sample, dataset_names, ploidy)\n add_block_stats(block_stats)\n if tsv_multiway_file:\n for ((dataset_list0, dataset_list1), count) in multiway_results.items():\n print(\n sample,\n chromosome,\n \"{\" + dataset_list0 + \"}\",\n \"{\" + dataset_list1 + \"}\",\n count,\n sep=\"\\t\",\n file=tsv_multiway_file,\n )\n\n if plot_blocksizes:\n create_blocksize_histogram(plot_blocksizes, all_block_stats, dataset_names)\n if plot_sum_of_blocksizes:\n create_blocksize_histogram(\n plot_sum_of_blocksizes, all_block_stats, dataset_names, use_weights=True\n )\n\n\ndef get_common_chromosomes(vcfs: List[Dict[str, VariantTable]]) -> List[str]:\n common = None\n for chrom_variant_table_map in vcfs:\n chromosomes = chrom_variant_table_map.keys()\n if common is None:\n common = set(chromosomes)\n else:\n common.intersection_update(chromosomes)\n if common is None:\n return []\n return sorted(common)\n\n\ndef get_variant_tables(\n vcf_readers: List[VcfReader], vcf_filenames: List[str]\n) -> List[Dict[str, VariantTable]]:\n vcfs = []\n for reader, filename in zip(vcf_readers, vcf_filenames):\n # create dict mapping chromosome names to VariantTables\n m = dict()\n logger.info(\"Reading phasing from %r\", filename)\n try:\n for variant_table in reader:\n m[variant_table.chromosome] = variant_table\n except PloidyError as e:\n raise CommandLineError(\"Provided ploidy is invalid: {}. Aborting.\".format(e))\n vcfs.append(m)\n return vcfs\n\n\ndef get_sample_to_work_on(vcf_readers: List[VcfReader], requested_sample: Optional[str]):\n all_samples = set()\n sample_intersection = None\n for vcf_reader in vcf_readers:\n if sample_intersection is None:\n sample_intersection = set(vcf_reader.samples)\n else:\n sample_intersection.intersection_update(vcf_reader.samples)\n all_samples.update(vcf_reader.samples)\n assert sample_intersection is not None\n if requested_sample:\n sample_intersection.intersection_update([requested_sample])\n if len(sample_intersection) == 0:\n raise CommandLineError(\n \"Sample {!r} requested on command-line not found in all VCFs\".format(\n requested_sample\n )\n )\n requested_sample = requested_sample\n else:\n if len(sample_intersection) == 0:\n raise CommandLineError(\"None of the samples is present in all VCFs\")\n elif len(sample_intersection) == 1:\n requested_sample = list(sample_intersection)[0]\n else:\n raise CommandLineError(\n \"More than one sample is present in all VCFs, please use\"\n \" --sample to specify which sample to work on.\"\n )\n return requested_sample\n\n\ndef main(args):\n run_compare(**vars(args))\n"
] | [
[
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
snoop2head/Open_stock_liquidity | [
"a5450774c50f14433915c37d057fa20a8196cdb9"
] | [
"app_pandas_to_dataframe_or_to_excel.py"
] | [
"import pandas as pd\nfrom pandas import ExcelWriter\n\n\ncode_df = pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n\n# 종목코드가 6자리이기 때문에 6자리를 맞춰주기 위해 설정해줌\ncode_df.종목코드 = code_df.종목코드.map('{:06d}'.format)\n\n# 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\ncode_df = code_df[['회사명', '종목코드']]\n\n# 한글로된 컬럼명을 영어로 바꿔준다.\ncode_df = code_df.rename(columns={'회사명': 'name', '종목코드': 'code'})\n#print(code_df.head())\n\n\ndef error_detector(item_name):\n code = code_df.query(\"name=='{}'\".format(item_name))['code'].to_string(index=False)\n #print(code)\n code_no_space = code[-6:] #code value is bit strange. This omits space in front of code integer value\n #print(code_no_space)\n if code_no_space == \"([], )\":\n print('클라가 종목 명을 [' +item_name+ \"]으로 잘못 입력했네!\")\n return 'endgame'\n else:\n print(\"i am iron man\")\n return 'i am iron man'\n\n\n\n# 종목 이름을 입력하면 종목에 해당하는 코드를 불러와\n# 네이버 금융(http://finance.naver.com)에 넣어줌\ndef get_url(item_name, code_df):\n code = code_df.query(\"name=='{}'\".format(item_name))['code'].to_string(index=False)\n #print(code)\n code_no_space = code[-6:] #code value is bit strange. This omits space in front of code integer value\n #print(code_no_space)\n url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code=code_no_space)\n #print(url)\n print(\"요청 URL = {}\".format(url))\n return url\n\ndef get_table_write_on_excel(item_name):\n #item name\n url = get_url(item_name, code_df)\n # defining data frame\n df = pd.DataFrame()\n\n # limiting the data range 20 pages\n for page in range(1, 31):\n pg_url = '{url}&page={page}'.format(url=url, page=page)\n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\n # df.dropna() dropping weird ones\n df = df.dropna()\n\n #file name setting\n file_name = item_name + \" 거래량\"\n\n #writing on excel\n writer = ExcelWriter(file_name+'.xlsx')\n df.to_excel(writer,'sheet1',index=False)\n writer.save()\n\ndef get_table(item_name):\n #item name\n url = get_url(item_name, code_df)\n # defining data frame\n df = pd.DataFrame()\n\n # limiting the data range 30 pages\n for page in range(1, 31):\n pg_url = '{url}&page={page}'.format(url=url, page=page)\n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\n # df.dropna() dropping weird ones\n df = df.dropna()\n return df\n\n\n"
] | [
[
"pandas.ExcelWriter",
"pandas.read_html",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
subhayuroy/vedo | [
"4c4686fe14f42b2d79d4e138afed362813ede4d3"
] | [
"vedo/mesh.py"
] | [
"import numpy as np\nimport os\nimport vtk\nimport vedo\nfrom vedo.colors import printc, getColor, colorMap\nfrom vedo.utils import isSequence, flatten, mag, buildPolyData, numpy2vtk, vtk2numpy\nfrom vedo.pointcloud import Points\nfrom deprecated import deprecated\n\n__doc__ = (\"\"\"Submodule to manage polygonal meshes.\"\"\" + vedo.docs._defs)\n\n__all__ = [\"Mesh\", \"merge\"]\n\n\n####################################################\ndef merge(*meshs, flag=False):\n \"\"\"\n Build a new mesh formed by the fusion of the input polygonal Meshes (or Points).\n\n Similar to Assembly, but in this case the input objects become a single mesh entity.\n\n To keep track of the original identities of the input mesh you can set flag.\n In this case a point array of IDs is added to the merged output mesh.\n\n .. hint:: |warp1.py|_ |value-iteration.py|_\n\n |warp1| |value-iteration|\n \"\"\"\n acts = [a for a in flatten(meshs) if a]\n\n if not acts:\n return None\n\n idarr = []\n polyapp = vtk.vtkAppendPolyData()\n for i, a in enumerate(acts):\n try:\n poly = a.polydata()\n except:\n # so a vtkPolydata can also be passed\n poly = a\n polyapp.AddInputData(poly)\n if flag:\n idarr += [i]*poly.GetNumberOfPoints()\n polyapp.Update()\n mpoly = polyapp.GetOutput()\n\n if flag:\n varr = numpy2vtk(idarr, dtype=np.uint16, name=\"OriginalMeshID\")\n mpoly.GetPointData().AddArray(varr)\n\n msh = Mesh(mpoly)\n if isinstance(acts[0], vtk.vtkActor):\n cprp = vtk.vtkProperty()\n cprp.DeepCopy(acts[0].GetProperty())\n msh.SetProperty(cprp)\n msh.property = cprp\n return msh\n\n\n####################################################\nclass Mesh(Points):\n \"\"\"\n Build an instance of object ``Mesh`` derived from ``PointCloud``.\n\n Finally input can be a list of vertices and their connectivity (faces of the polygonal mesh).\n For point clouds - e.i. no faces - just substitute the `faces` list with ``None``.\n\n E.g.: `Mesh( [ [[x1,y1,z1],[x2,y2,z2], ...], [[0,1,2], [1,2,3], ...] ] )`\n\n :param c: color in RGB format, hex, symbol or name\n :param float alpha: opacity value\n :param bool wire: show surface as wireframe\n :param bc: backface color of internal surface\n :param str texture: jpg file name or surface texture name\n :param bool computeNormals: compute point and cell normals at creation\n\n .. hint:: A mesh can be built from vertices and their connectivity. See e.g.:\n\n |buildmesh| |buildmesh.py|_\n \"\"\"\n def __init__(\n self,\n inputobj=None,\n c=None,\n alpha=1,\n computeNormals=False,\n ):\n Points.__init__(self)\n\n self.line_locator = None\n self._current_texture_name = '' # used by plotter._keypress\n\n self._mapper.SetInterpolateScalarsBeforeMapping(vedo.settings.interpolateScalarsBeforeMapping)\n\n if vedo.settings.usePolygonOffset:\n self._mapper.SetResolveCoincidentTopologyToPolygonOffset()\n pof, pou = vedo.settings.polygonOffsetFactor, vedo.settings.polygonOffsetUnits\n self._mapper.SetResolveCoincidentTopologyPolygonOffsetParameters(pof, pou)\n\n inputtype = str(type(inputobj))\n\n if inputobj is None:\n pass\n\n elif isinstance(inputobj, Mesh) or isinstance(inputobj, vtk.vtkActor):\n polyCopy = vtk.vtkPolyData()\n polyCopy.DeepCopy(inputobj.GetMapper().GetInput())\n self._data = polyCopy\n self._mapper.SetInputData(polyCopy)\n self._mapper.SetScalarVisibility(inputobj.GetMapper().GetScalarVisibility())\n pr = vtk.vtkProperty()\n pr.DeepCopy(inputobj.GetProperty())\n self.SetProperty(pr)\n self.property = pr\n\n elif isinstance(inputobj, vtk.vtkPolyData):\n if inputobj.GetNumberOfCells() == 0:\n carr = vtk.vtkCellArray()\n for i in range(inputobj.GetNumberOfPoints()):\n carr.InsertNextCell(1)\n carr.InsertCellPoint(i)\n inputobj.SetVerts(carr)\n self._data = inputobj # cache vtkPolyData and mapper for speed\n\n elif isinstance(inputobj, (vtk.vtkStructuredGrid, vtk.vtkRectilinearGrid)):\n if vedo.settings.visibleGridEdges:\n gf = vtk.vtkExtractEdges()\n gf.SetInputData(inputobj)\n else:\n gf = vtk.vtkGeometryFilter()\n gf.SetInputData(inputobj)\n gf.Update()\n self._data = gf.GetOutput()\n\n elif \"trimesh\" in inputtype:\n tact = vedo.utils.trimesh2vedo(inputobj, alphaPerCell=False)\n self._data = tact.polydata()\n\n elif \"meshio\" in inputtype: # meshio-4.0.11\n if len(inputobj.cells):\n mcells = []\n for cellblock in inputobj.cells:\n if cellblock.type in (\"triangle\", \"quad\"):\n mcells += cellblock.data.tolist()\n self._data = buildPolyData(inputobj.points, mcells)\n else:\n self._data = buildPolyData(inputobj.points, None)\n # add arrays:\n try:\n if len(inputobj.point_data):\n for k in inputobj.point_data.keys():\n vdata = numpy2vtk(inputobj.point_data[k])\n vdata.SetName(str(k))\n self._data.GetPointData().AddArray(vdata)\n except AssertionError:\n print(\"Could not add meshio point data, skip.\")\n try:\n if len(inputobj.cell_data):\n for k in inputobj.cell_data.keys():\n vdata = numpy2vtk(inputobj.cell_data[k])\n vdata.SetName(str(k))\n self._data.GetCellData().AddArray(vdata)\n except AssertionError:\n print(\"Could not add meshio cell data, skip.\")\n\n elif \"meshlab\" in inputtype:\n self._data = vedo.utils._meshlab2vedo(inputobj)\n\n elif isSequence(inputobj):\n ninp = len(inputobj)\n if ninp == 0:\n self._data = vtk.vtkPolyData()\n elif ninp == 2: # assume [vertices, faces]\n self._data = buildPolyData(inputobj[0], inputobj[1])\n else: # assume [vertices] or vertices\n self._data = buildPolyData(inputobj, None)\n\n elif hasattr(inputobj, \"GetOutput\"): # passing vtk object\n if hasattr(inputobj, \"Update\"): inputobj.Update()\n if isinstance(inputobj.GetOutput(), vtk.vtkPolyData):\n self._data = inputobj.GetOutput()\n else:\n gf = vtk.vtkGeometryFilter()\n gf.SetInputData(inputobj.GetOutput())\n gf.Update()\n self._data = gf.GetOutput()\n\n elif isinstance(inputobj, str):\n dataset = vedo.io.load(inputobj)\n self.filename = inputobj\n if \"TetMesh\" in str(type(dataset)):\n self._data = dataset.tomesh().polydata()\n else:\n self._data = dataset.polydata()\n\n else:\n try:\n gf = vtk.vtkGeometryFilter()\n gf.SetInputData(inputobj)\n gf.Update()\n self._data = gf.GetOutput()\n except:\n printc(\"Error: cannot build mesh from type:\\n\", inputtype, c='r')\n raise RuntimeError()\n\n\n if vedo.settings.computeNormals is not None:\n computeNormals = vedo.settings.computeNormals\n\n if self._data:\n if computeNormals:\n pdnorm = vtk.vtkPolyDataNormals()\n pdnorm.SetInputData(self._data)\n pdnorm.ComputePointNormalsOn()\n pdnorm.ComputeCellNormalsOn()\n pdnorm.FlipNormalsOff()\n pdnorm.ConsistencyOn()\n pdnorm.Update()\n self._data = pdnorm.GetOutput()\n\n self._mapper.SetInputData(self._data)\n\n self._bfprop = None # backface property holder\n\n self.property = self.GetProperty()\n self.property.SetInterpolationToPhong()\n\n # set the color by c or by scalar\n if self._data:\n\n arrexists = False\n\n if c is None:\n ptdata = self._data.GetPointData()\n cldata = self._data.GetCellData()\n exclude = ['normals', 'tcoord']\n\n if cldata.GetNumberOfArrays():\n for i in range(cldata.GetNumberOfArrays()):\n iarr = cldata.GetArray(i)\n if iarr:\n icname = iarr.GetName()\n if icname and all(s not in icname.lower() for s in exclude):\n cldata.SetActiveScalars(icname)\n self._mapper.ScalarVisibilityOn()\n self._mapper.SetScalarModeToUseCellData()\n self._mapper.SetScalarRange(iarr.GetRange())\n arrexists = True\n break # stop at first good one\n\n # point come after so it has priority\n if ptdata.GetNumberOfArrays():\n for i in range(ptdata.GetNumberOfArrays()):\n iarr = ptdata.GetArray(i)\n if iarr:\n ipname = iarr.GetName()\n if ipname and all(s not in ipname.lower() for s in exclude):\n ptdata.SetActiveScalars(ipname)\n self._mapper.ScalarVisibilityOn()\n self._mapper.SetScalarModeToUsePointData()\n self._mapper.SetScalarRange(iarr.GetRange())\n arrexists = True\n break # stop at first good one\n\n if not arrexists:\n if c is None:\n c = \"gold\"\n c = getColor(c)\n elif isinstance(c, float) and c<=1:\n c = colorMap(c, \"rainbow\", 0,1)\n else:\n c = getColor(c)\n self.property.SetColor(c)\n self.property.SetAmbient(0.1)\n self.property.SetDiffuse(1)\n self.property.SetSpecular(.05)\n self.property.SetSpecularPower(5)\n self._mapper.ScalarVisibilityOff()\n\n if alpha is not None:\n self.property.SetOpacity(alpha)\n return\n\n\n def faces(self):\n \"\"\"\n Get cell polygonal connectivity ids as a python ``list``.\n The output format is: [[id0 ... idn], [id0 ... idm], etc].\n \"\"\"\n arr1d = vtk2numpy(self._data.GetPolys().GetData())\n if arr1d is None:\n return []\n\n #Get cell connettivity ids as a 1D array. vtk format is:\n #[nids1, id0 ... idn, niids2, id0 ... idm, etc].\n if len(arr1d) == 0:\n arr1d = vtk2numpy(self._data.GetStrips().GetData())\n if arr1d is None:\n return []\n\n i = 0\n conn = []\n n = len(arr1d)\n if n:\n while True:\n cell = [arr1d[i+k] for k in range(1, arr1d[i]+1)]\n conn.append(cell)\n i += arr1d[i]+1\n if i >= n:\n break\n return conn # cannot always make a numpy array of it!\n\n def cells(self):\n \"\"\"Alias for ``faces()``.\"\"\"\n return self.faces()\n\n\n def lines(self, flat=False):\n \"\"\"Get lines connectivity ids as a numpy array.\n Default format is [[id0,id1], [id3,id4], ...]\n\n :param bool flat: 1D numpy array as [2, 10,20, 3, 10,11,12, 2, 70,80, ...]\n \"\"\"\n #Get cell connettivity ids as a 1D array. The vtk format is:\n # [nids1, id0 ... idn, niids2, id0 ... idm, etc].\n arr1d = vtk2numpy(self.polydata(False).GetLines().GetData())\n\n if arr1d is None:\n return []\n\n if flat:\n return arr1d\n\n i = 0\n conn = []\n n = len(arr1d)\n for idummy in range(n):\n cell = [arr1d[i+k+1] for k in range(arr1d[i])]\n conn.append(cell)\n i += arr1d[i]+1\n if i >= n:\n break\n\n return conn # cannot always make a numpy array of it!\n\n def texture(self, tname='',\n tcoords=None,\n interpolate=True,\n repeat=True,\n edgeClamp=False,\n scale=None,\n ushift=None,\n vshift=None,\n seamThreshold=None,\n ):\n \"\"\"\n Assign a texture to mesh from image file or predefined texture `tname`.\n If tname is set to ``None`` texture is disabled.\n If tname is set to '' then a png or jpg file is looked for with same name and path.\n Input tname can also be an array of shape (n,m,3).\n\n :param bool interpolate: turn on/off linear interpolation of the texture map when rendering.\n :param bool repeat: repeat of the texture when tcoords extend beyond the [0,1] range.\n :param bool edgeClamp: turn on/off the clamping of the texture map when\n the texture coords extend beyond the [0,1] range.\n Only used when repeat is False, and edge clamping is supported by the graphics card.\n\n :param bool scale: scale the texture image by this factor\n :param bool ushift: shift u-coordinates of texture by this amaount\n :param bool vshift: shift v-coordinates of texture by this amaount\n :param float seamThreshold: try to seal seams in texture by collapsing triangles\n (test values around 1.0, lower values = stronger collapse)\n \"\"\"\n pd = self.polydata(False)\n if tname is None:\n pd.GetPointData().SetTCoords(None)\n pd.GetPointData().Modified()\n return self\n ###########\n\n if isinstance(tname, str) and 'https' in tname:\n tname = vedo.io.download(tname, verbose=False)\n\n if isSequence(tname):\n from PIL import Image\n from tempfile import NamedTemporaryFile\n tmp_file = NamedTemporaryFile()\n im = Image.fromarray(tname)\n im.save(tmp_file.name+\".bmp\")\n tname = tmp_file.name+\".bmp\"\n\n if tname == '':\n ext = os.path.basename(str(self.filename)).split('.')[-1]\n tname = str(self.filename).replace('.'+ext, '.png')\n if not os.path.isfile(tname):\n tname = str(self.filename).replace('.'+ext, '.jpg')\n if not os.path.isfile(tname):\n printc(\"Error in texture(): default texture file must be png or jpg\",\n \"\\n e.g.\", tname, c='r')\n raise RuntimeError()\n\n if isinstance(tname, vtk.vtkTexture):\n tu = tname\n else:\n if tcoords is not None:\n if not isinstance(tcoords, np.ndarray):\n tcoords = np.array(tcoords)\n if tcoords.ndim != 2:\n printc('tcoords must be a 2-dimensional array', c='r')\n return self\n if tcoords.shape[0] != pd.GetNumberOfPoints():\n printc('Error in texture(): nr of texture coords must match nr of points', c='r')\n return self\n if tcoords.shape[1] != 2:\n printc('Error in texture(): vector must have 2 components', c='r')\n tarr = numpy2vtk(tcoords)\n tarr.SetName('TCoordinates')\n pd.GetPointData().SetTCoords(tarr)\n pd.GetPointData().Modified()\n else:\n if not pd.GetPointData().GetTCoords():\n tmapper = vtk.vtkTextureMapToPlane()\n tmapper.AutomaticPlaneGenerationOn()\n tmapper.SetInputData(pd)\n tmapper.Update()\n tc = tmapper.GetOutput().GetPointData().GetTCoords()\n if scale or ushift or vshift:\n ntc = vtk2numpy(tc)\n if scale: ntc *= scale\n if ushift: ntc[:,0] += ushift\n if vshift: ntc[:,1] += vshift\n tc = numpy2vtk(tc)\n pd.GetPointData().SetTCoords(tc)\n pd.GetPointData().Modified()\n\n fn = vedo.settings.textures_path + tname + \".jpg\"\n if os.path.exists(tname):\n fn = tname\n elif not os.path.exists(fn):\n printc(\"File does not exist or texture\", tname,\n \"not found in\", vedo.settings.textures_path, c=\"r\")\n printc(\"\\tin Available built-in textures:\", c=\"m\", end=\" \")\n for ff in os.listdir(vedo.settings.textures_path):\n printc(ff.split(\".\")[0], end=\" \", c=\"m\")\n print()\n return self\n\n fnl = fn.lower()\n if \".jpg\" in fnl or \".jpeg\" in fnl:\n reader = vtk.vtkJPEGReader()\n elif \".png\" in fnl:\n reader = vtk.vtkPNGReader()\n elif \".bmp\" in fnl:\n reader = vtk.vtkBMPReader()\n else:\n printc(\"Error in texture(): supported files, PNG, BMP or JPG\", c=\"r\")\n return self\n reader.SetFileName(fn)\n reader.Update()\n\n tu = vtk.vtkTexture()\n tu.SetInputData(reader.GetOutput())\n tu.SetInterpolate(interpolate)\n tu.SetRepeat(repeat)\n tu.SetEdgeClamp(edgeClamp)\n\n self.property.SetColor(1, 1, 1)\n self._mapper.ScalarVisibilityOff()\n self.SetTexture(tu)\n\n if seamThreshold is not None:\n tname = self._data.GetPointData().GetTCoords().GetName()\n grad = self.gradient(tname)\n ugrad, vgrad = np.split(grad, 2, axis=1)\n ugradm, vgradm = vedo.utils.mag2(ugrad), vedo.utils.mag2(vgrad)\n gradm = np.log(ugradm + vgradm)\n largegrad_ids = np.arange(len(grad))[gradm>seamThreshold*4]\n uvmap = self.pointdata[tname]\n # collapse triangles that have large gradient\n new_points = self.points(transformed=False)\n for f in self.faces():\n if np.isin(f, largegrad_ids).all():\n id1, id2, id3 = f\n uv1, uv2, uv3 = uvmap[f]\n d12 = vedo.mag2(uv1-uv2)\n d23 = vedo.mag2(uv2-uv3)\n d31 = vedo.mag2(uv3-uv1)\n idm = np.argmin([d12, d23, d31])\n if idm == 0:\n new_points[id1] = new_points[id3]\n new_points[id2] = new_points[id3]\n elif idm == 1:\n new_points[id2] = new_points[id1]\n new_points[id3] = new_points[id1]\n self.points(new_points)\n\n self.Modified()\n return self\n\n\n def computeNormals(self, points=True, cells=True, featureAngle=None, consistency=True):\n \"\"\"Compute cell and vertex normals for the mesh.\n\n :param bool points: do the computation for the vertices\n :param bool cells: do the computation for the cells\n\n :param float featureAngle: specify the angle that defines a sharp edge.\n If the difference in angle across neighboring polygons is greater than this value,\n the shared edge is considered \"sharp\" and it is splitted.\n\n :param bool consistency: turn on/off the enforcement of consistent polygon ordering.\n\n .. warning:: if featureAngle is set to a float the Mesh can be modified, and it\n can have a different nr. of vertices from the original.\n \"\"\"\n poly = self.polydata(False)\n pdnorm = vtk.vtkPolyDataNormals()\n pdnorm.SetInputData(poly)\n pdnorm.SetComputePointNormals(points)\n pdnorm.SetComputeCellNormals(cells)\n pdnorm.SetConsistency(consistency)\n pdnorm.FlipNormalsOff()\n if featureAngle:\n pdnorm.SetSplitting(True)\n pdnorm.SetFeatureAngle(featureAngle)\n else:\n pdnorm.SetSplitting(False)\n # print(pdnorm.GetNonManifoldTraversal())\n pdnorm.Update()\n return self._update(pdnorm.GetOutput())\n\n\n def reverse(self, cells=True, normals=False):\n \"\"\"\n Reverse the order of polygonal cells\n and/or reverse the direction of point and cell normals.\n Two flags are used to control these operations:\n\n - `cells=True` reverses the order of the indices in the cell connectivity list.\n\n - `normals=True` reverses the normals by multiplying the normal vector by -1\n (both point and cell normals, if present).\n \"\"\"\n poly = self.polydata(False)\n rev = vtk.vtkReverseSense()\n if cells:\n rev.ReverseCellsOn()\n else:\n rev.ReverseCellsOff()\n if normals:\n rev.ReverseNormalsOn()\n else:\n rev.ReverseNormalsOff()\n rev.SetInputData(poly)\n rev.Update()\n return self._update(rev.GetOutput())\n\n def wireframe(self, value=True):\n \"\"\"Set mesh's representation as wireframe or solid surface.\n Same as `mesh.wireframe()`.\"\"\"\n if value:\n self.property.SetRepresentationToWireframe()\n else:\n self.property.SetRepresentationToSurface()\n return self\n\n def flat(self):\n \"\"\"Set surface interpolation to Flat.\n\n |wikiphong|\n \"\"\"\n self.property.SetInterpolationToFlat()\n return self\n\n def phong(self):\n \"\"\"Set surface interpolation to Phong.\"\"\"\n self.property.SetInterpolationToPhong()\n return self\n\n def backFaceCulling(self, value=True):\n \"\"\"Set culling of polygons based on orientation\n of normal with respect to camera.\"\"\"\n self.property.SetBackfaceCulling(value)\n return self\n\n def renderLinesAsTubes(self, value=True):\n self.property.SetRenderLinesAsTubes(value)\n return self\n\n def frontFaceCulling(self, value=True):\n \"\"\"Set culling of polygons based on orientation of normal with respect to camera.\"\"\"\n self.property.SetFrontfaceCulling(value)\n return self\n\n def backColor(self, bc=None):\n \"\"\"\n Set/get mesh's backface color.\n \"\"\"\n backProp = self.GetBackfaceProperty()\n\n if bc is None:\n if backProp:\n return backProp.GetDiffuseColor()\n return self\n\n if self.property.GetOpacity() < 1:\n # printc(\"In backColor(): only active for alpha=1\", c=\"y\")\n return self\n\n if not backProp:\n backProp = vtk.vtkProperty()\n\n backProp.SetDiffuseColor(getColor(bc))\n backProp.SetOpacity(self.property.GetOpacity())\n self.SetBackfaceProperty(backProp)\n self._mapper.ScalarVisibilityOff()\n return self\n\n def bc(self, backColor=False):\n \"\"\"Shortcut for `mesh.backColor()`. \"\"\"\n return self.backColor(backColor)\n\n def lineWidth(self, lw=None):\n \"\"\"Set/get width of mesh edges. Same as `lw()`.\"\"\"\n if lw is not None:\n if lw == 0:\n self.property.EdgeVisibilityOff()\n self.property.SetRepresentationToSurface()\n return self\n self.property.EdgeVisibilityOn()\n self.property.SetLineWidth(lw)\n else:\n return self.property.GetLineWidth()\n return self\n\n def lw(self, lineWidth=None):\n \"\"\"Set/get width of mesh edges. Same as `lineWidth()`.\"\"\"\n return self.lineWidth(lineWidth)\n\n def lineColor(self, lc=None):\n \"\"\"Set/get color of mesh edges. Same as `lc()`.\"\"\"\n if lc is not None:\n# if \"ireframe\" in self.property.GetRepresentationAsString():\n# self.property.EdgeVisibilityOff()\n# self.color(lc)\n# return self\n self.property.EdgeVisibilityOn()\n self.property.SetEdgeColor(getColor(lc))\n else:\n return self.property.GetEdgeColor()\n return self\n\n def lc(self, lineColor=None):\n \"\"\"Set/get color of mesh edges. Same as `lineColor()`.\"\"\"\n return self.lineColor(lineColor)\n\n def volume(self):\n \"\"\"Get/set the volume occupied by mesh.\"\"\"\n mass = vtk.vtkMassProperties()\n mass.SetGlobalWarningDisplay(0)\n mass.SetInputData(self.polydata())\n mass.Update()\n return mass.GetVolume()\n\n def area(self):\n \"\"\"Get/set the surface area of mesh.\n\n .. hint:: |largestregion.py|_\n \"\"\"\n mass = vtk.vtkMassProperties()\n mass.SetGlobalWarningDisplay(0)\n mass.SetInputData(self.polydata())\n mass.Update()\n return mass.GetSurfaceArea()\n\n def isClosed(self):\n \"\"\"Return ``True`` if mesh is watertight.\"\"\"\n featureEdges = vtk.vtkFeatureEdges()\n featureEdges.BoundaryEdgesOn()\n featureEdges.FeatureEdgesOff()\n featureEdges.NonManifoldEdgesOn()\n featureEdges.SetInputData(self.polydata(False))\n featureEdges.Update()\n ne = featureEdges.GetOutput().GetNumberOfCells()\n return not bool(ne)\n\n\n def shrink(self, fraction=0.85):\n \"\"\"Shrink the triangle polydata in the representation of the input mesh.\n\n Example:\n .. code-block:: python\n\n from vedo import *\n pot = load(dataurl+'teapot.vtk').shrink(0.75)\n s = Sphere(r=0.2).pos(0,0,-0.5)\n show(pot, s)\n\n |shrink| |shrink.py|_\n \"\"\"\n shrink = vtk.vtkShrinkPolyData()\n shrink.SetInputData(self._data)\n shrink.SetShrinkFactor(fraction)\n shrink.Update()\n return self._update(shrink.GetOutput())\n\n\n def stretch(self, q1, q2):\n \"\"\"Stretch mesh between points `q1` and `q2`. Mesh is not affected.\n\n |aspring| |aspring.py|_\n\n .. note:: for ``Mesh`` objects like helices, Line, cylinders, cones etc.,\n two attributes ``mesh.base``, and ``mesh.top`` are already defined.\n \"\"\"\n if self.base is None:\n printc('Error in stretch(): Please define vectors', c='r')\n printc(' mesh.base and mesh.top at creation.', c='r')\n raise RuntimeError()\n\n p1, p2 = self.base, self.top\n q1, q2, z = np.array(q1), np.array(q2), np.array([0, 0, 1])\n plength = np.linalg.norm(p2 - p1)\n qlength = np.linalg.norm(q2 - q1)\n T = vtk.vtkTransform()\n T.PostMultiply()\n T.Translate(-p1)\n cosa = np.dot(p2 - p1, z) / plength\n n = np.cross(p2 - p1, z)\n T.RotateWXYZ(np.rad2deg(np.arccos(cosa)), n)\n T.Scale(1, 1, qlength / plength)\n\n cosa = np.dot(q2 - q1, z) / qlength\n n = np.cross(q2 - q1, z)\n T.RotateWXYZ(-np.rad2deg(np.arccos(cosa)), n)\n T.Translate(q1)\n\n self.SetUserMatrix(T.GetMatrix())\n if self.trail:\n self.updateTrail()\n self.addShadows()\n return self\n\n def crop(self,\n top=None, bottom=None, right=None, left=None, front=None, back=None,\n bounds=None,\n ):\n \"\"\"Crop an ``Mesh`` object.\n\n :param float top: fraction to crop from the top plane (positive z)\n :param float bottom: fraction to crop from the bottom plane (negative z)\n :param float front: fraction to crop from the front plane (positive y)\n :param float back: fraction to crop from the back plane (negative y)\n :param float right: fraction to crop from the right plane (positive x)\n :param float left: fraction to crop from the left plane (negative x)\n :param list bounds: direct list of bounds passed as [x0,x1, y0,y1, z0,z1]\n\n Example:\n .. code-block:: python\n\n from vedo import Sphere\n Sphere().crop(right=0.3, left=0.1).show()\n\n |cropped|\n \"\"\"\n cu = vtk.vtkBox()\n x0, x1, y0, y1, z0, z1 = self.GetBounds()\n pos = np.array(self.GetPosition())\n x0, y0, z0 = [x0, y0, z0] - pos\n x1, y1, z1 = [x1, y1, z1] - pos\n\n if bounds is None:\n dx, dy, dz = x1-x0, y1-y0, z1-z0\n if top: z1 = z1 - top*dz\n if bottom: z0 = z0 + bottom*dz\n if front: y1 = y1 - front*dy\n if back: y0 = y0 + back*dy\n if right: x1 = x1 - right*dx\n if left: x0 = x0 + left*dx\n bounds = (x0, x1, y0, y1, z0, z1)\n else:\n if bounds[0] is None: bounds[0] = x0\n if bounds[1] is None: bounds[1] = x1\n if bounds[2] is None: bounds[2] = y0\n if bounds[3] is None: bounds[3] = y1\n if bounds[4] is None: bounds[4] = z0\n if bounds[5] is None: bounds[5] = z1\n cu.SetBounds(bounds)\n\n clipper = vtk.vtkClipPolyData()\n clipper.SetInputData(self._data)\n clipper.SetClipFunction(cu)\n clipper.InsideOutOn()\n clipper.GenerateClippedOutputOff()\n clipper.GenerateClipScalarsOff()\n clipper.SetValue(0)\n clipper.Update()\n self._update(clipper.GetOutput())\n return self\n\n def cutWithPointLoop(self,\n points,\n invert=False,\n on='points',\n includeBoundary=False,\n ):\n \"\"\"\n Cut an ``Mesh`` object with a set of points forming a closed loop.\n\n :param bool invert: invert selection (inside-out)\n :param str on: if 'cells' will extract the whole cells lying inside\n (or outside) the point loop\n\n :param bool includeBoundary: include cells lying exactly on the\n boundary line. Only relevant on 'cells' mode.\n \"\"\"\n if isinstance(points, Points):\n vpts = points.polydata().GetPoints()\n points = points.points()\n else:\n vpts = vtk.vtkPoints()\n if len(points[0])==2: # make it 3d\n points = np.asarray(points)\n points = np.c_[points, np.zeros(len(points))]\n for p in points:\n vpts.InsertNextPoint(p)\n\n if 'cell' in on:\n ippd = vtk.vtkImplicitSelectionLoop()\n ippd.SetLoop(vpts)\n ippd.AutomaticNormalGenerationOn()\n clipper = vtk.vtkExtractPolyDataGeometry()\n clipper.SetInputData(self.polydata())\n clipper.SetImplicitFunction(ippd)\n clipper.SetExtractInside(not invert)\n clipper.SetExtractBoundaryCells(includeBoundary)\n else:\n spol = vtk.vtkSelectPolyData()\n spol.SetLoop(vpts)\n spol.GenerateSelectionScalarsOn()\n spol.GenerateUnselectedOutputOff()\n spol.SetInputData(self.polydata())\n spol.Update()\n clipper = vtk.vtkClipPolyData()\n clipper.SetInputData(spol.GetOutput())\n clipper.SetInsideOut(not invert)\n clipper.SetValue(0.0)\n clipper.Update()\n cpoly = clipper.GetOutput()\n\n if self.GetIsIdentity() or cpoly.GetNumberOfPoints() == 0:\n self._update(cpoly)\n else:\n # bring the underlying polydata to where _data is\n M = vtk.vtkMatrix4x4()\n M.DeepCopy(self.GetMatrix())\n M.Invert()\n tr = vtk.vtkTransform()\n tr.SetMatrix(M)\n tf = vtk.vtkTransformPolyDataFilter()\n tf.SetTransform(tr)\n tf.SetInputData(clipper.GetOutput())\n tf.Update()\n self._update(tf.GetOutput())\n return self\n\n\n def cap(self, returnCap=False):\n \"\"\"\n Generate a \"cap\" on a clipped mesh, or caps sharp edges.\n\n |cutAndCap| |cutAndCap.py|_\n \"\"\"\n poly = self._data\n\n fe = vtk.vtkFeatureEdges()\n fe.SetInputData(poly)\n fe.BoundaryEdgesOn()\n fe.FeatureEdgesOff()\n fe.NonManifoldEdgesOff()\n fe.ManifoldEdgesOff()\n fe.Update()\n\n stripper = vtk.vtkStripper()\n stripper.SetInputData(fe.GetOutput())\n stripper.Update()\n\n boundaryPoly = vtk.vtkPolyData()\n boundaryPoly.SetPoints(stripper.GetOutput().GetPoints())\n boundaryPoly.SetPolys(stripper.GetOutput().GetLines())\n\n rev = vtk.vtkReverseSense()\n rev.ReverseCellsOn()\n rev.SetInputData(boundaryPoly)\n rev.Update()\n\n tf = vtk.vtkTriangleFilter()\n tf.SetInputData(rev.GetOutput())\n tf.Update()\n\n if returnCap:\n m = Mesh(tf.GetOutput())\n # assign the same transformation to the copy\n m.SetOrigin(self.GetOrigin())\n m.SetScale(self.GetScale())\n m.SetOrientation(self.GetOrientation())\n m.SetPosition(self.GetPosition())\n return m\n else:\n polyapp = vtk.vtkAppendPolyData()\n polyapp.AddInputData(poly)\n polyapp.AddInputData(tf.GetOutput())\n polyapp.Update()\n return self._update(polyapp.GetOutput()).clean()\n\n\n def join(self, polys=True, reset=False):\n \"\"\"\n Generate triangle strips and/or polylines from\n input polygons, triangle strips, and lines.\n\n Input polygons are assembled into triangle strips only if they are triangles;\n other types of polygons are passed through to the output and not stripped.\n Use mesh.triangulate() to triangulate non-triangular polygons prior to running\n this filter if you need to strip all the data.\n\n Also note that if triangle strips or polylines are present in the input\n they are passed through and not joined nor extended.\n If you wish to strip these use mesh.triangulate() to fragment the input\n into triangles and lines prior to applying join().\n\n :param bool polys: polygonal segments will be joined if they are contiguous\n :param bool reset: reset points ordering\n\n :Warning:\n\n If triangle strips or polylines exist in the input data\n they will be passed through to the output data.\n This filter will only construct triangle strips if triangle polygons\n are available; and will only construct polylines if lines are available.\n\n :Example:\n .. code-block:: python\n\n from vedo import *\n c1 = Cylinder(pos=(0,0,0), r=2, height=3, axis=(1,.0,0), alpha=.1).triangulate()\n c2 = Cylinder(pos=(0,0,2), r=1, height=2, axis=(0,.3,1), alpha=.1).triangulate()\n intersect = c1.intersectWith(c2).join(reset=True)\n spline = Spline(intersect).c('blue').lw(5)\n show(c1, c2, spline, intersect.labels('id'), axes=1)\n \"\"\"\n sf = vtk.vtkStripper()\n sf.SetPassThroughCellIds(True)\n sf.SetPassThroughPointIds(True)\n sf.SetJoinContiguousSegments(polys)\n sf.SetInputData(self.polydata(False))\n sf.Update()\n if reset:\n poly = sf.GetOutput()\n cpd = vtk.vtkCleanPolyData()\n cpd.PointMergingOn()\n cpd.ConvertLinesToPointsOn()\n cpd.ConvertPolysToLinesOn()\n cpd.ConvertStripsToPolysOn()\n cpd.SetInputData(poly)\n cpd.Update()\n poly = cpd.GetOutput()\n vpts = poly.GetCell(0).GetPoints().GetData()\n poly.GetPoints().SetData(vpts)\n return self._update(poly)\n else:\n return self._update(sf.GetOutput())\n\n\n def triangulate(self, verts=True, lines=True):\n \"\"\"\n Converts mesh polygons into triangles.\n\n If the input mesh is only made of 2D lines (no faces) the output will be a triangulation\n that fills the internal area. The contours may be concave, and may even contain holes,\n i.e. a contour may contain an internal contour winding in the opposite\n direction to indicate that it is a hole.\n\n :param bool verts: if True, break input vertex cells into individual vertex cells\n (one point per cell). If False, the input vertex cells will be ignored.\n :param bool lines: if True, break input polylines into line segments.\n If False, input lines will be ignored and the output will have no lines.\n \"\"\"\n if self._data.GetNumberOfPolys() or self._data.GetNumberOfStrips():\n tf = vtk.vtkTriangleFilter()\n tf.SetPassLines(lines)\n tf.SetPassVerts(verts)\n tf.SetInputData(self._data)\n tf.Update()\n return self._update(tf.GetOutput())\n\n elif self._data.GetNumberOfLines():\n vct = vtk.vtkContourTriangulator()\n vct.SetInputData(self._data)\n vct.Update()\n return self._update(vct.GetOutput())\n\n else:\n #printc(\"Error in triangulate()\")\n return self\n\n @deprecated(reason=vedo.colors.red+\"Please use distanceTo()\"+vedo.colors.reset)\n def distanceToMesh(self, mesh, signed=False, negate=False):\n return self.distanceTo(mesh, signed=signed, negate=negate)\n\n def distanceTo(self, mesh, signed=False, negate=False):\n '''\n Computes the (signed) distance from one mesh to another.\n\n |distance2mesh| |distance2mesh.py|_\n '''\n # overrides pointcloud.distanceToMesh()\n poly1 = self.polydata()\n poly2 = mesh.polydata()\n df = vtk.vtkDistancePolyDataFilter()\n df.ComputeSecondDistanceOff()\n df.SetInputData(0, poly1)\n df.SetInputData(1, poly2)\n if signed:\n df.SignedDistanceOn()\n else:\n df.SignedDistanceOff()\n if negate:\n df.NegateDistanceOn()\n df.Update()\n\n scals = df.GetOutput().GetPointData().GetScalars()\n poly1.GetPointData().AddArray(scals)\n\n poly1.GetPointData().SetActiveScalars(scals.GetName())\n rng = scals.GetRange()\n self._mapper.SetScalarRange(rng[0], rng[1])\n self._mapper.ScalarVisibilityOn()\n return self\n\n def addCellArea(self, name=\"Area\"):\n \"\"\"Add to this mesh a cell data array containing the areas of the polygonal faces\"\"\"\n csf = vtk.vtkCellSizeFilter()\n csf.SetInputData(self.polydata(False))\n csf.SetComputeArea(True)\n csf.SetComputeVolume(False)\n csf.SetComputeLength(False)\n csf.SetComputeVertexCount(False)\n csf.SetAreaArrayName(name)\n csf.Update()\n return self._update(csf.GetOutput())\n\n\n def addCellVertexCount(self, name=\"VertexCount\"):\n \"\"\"Add to this mesh a cell data array containing the nr of vertices that a polygonal face has.\"\"\"\n csf = vtk.vtkCellSizeFilter()\n csf.SetInputData(self.polydata(False))\n csf.SetComputeArea(False)\n csf.SetComputeVolume(False)\n csf.SetComputeLength(False)\n csf.SetComputeVertexCount(True)\n csf.SetVertexCountArrayName(name)\n csf.Update()\n return self._update(csf.GetOutput())\n\n\n def addArcLength(self, mesh, name=\"ArcLength\"):\n \"\"\"Given a mesh, add the length of the arc intersecting each point of the line.\"\"\"\n arcl = vtk.vtkAppendArcLength()\n arcl.SetInputData(mesh.polydata())\n arcl.Update()\n return self._update(arcl.GetOutput())\n\n\n def addQuality(self, measure=6):\n \"\"\"\n Calculate functions of quality for the elements of a triangular mesh.\n This method adds to the mesh a cell array named \"Quality\".\n See class `vtkMeshQuality <https://vtk.org/doc/nightly/html/classvtkMeshQuality.html>`_\n for explanation.\n\n :param int measure: type of estimator\n\n - EDGE RATIO, 0\n - ASPECT RATIO, 1\n - RADIUS RATIO, 2\n - ASPECT FROBENIUS, 3\n - MED ASPECT FROBENIUS, 4\n - MAX ASPECT FROBENIUS, 5\n - MIN_ANGLE, 6\n - COLLAPSE RATIO, 7\n - MAX ANGLE, 8\n - CONDITION, 9\n - SCALED JACOBIAN, 10\n - SHEAR, 11\n - RELATIVE SIZE SQUARED, 12\n - SHAPE, 13\n - SHAPE AND SIZE, 14\n - DISTORTION, 15\n - MAX EDGE RATIO, 16\n - SKEW, 17\n - TAPER, 18\n - VOLUME, 19\n - STRETCH, 20\n - DIAGONAL, 21\n - DIMENSION, 22\n - ODDY, 23\n - SHEAR AND SIZE, 24\n - JACOBIAN, 25\n - WARPAGE, 26\n - ASPECT GAMMA, 27\n - AREA, 28\n - ASPECT BETA, 29\n\n |meshquality| |meshquality.py|_\n \"\"\"\n qf = vtk.vtkMeshQuality()\n qf.SetInputData(self.polydata(False))\n qf.SetTriangleQualityMeasure(measure)\n qf.SaveCellQualityOn()\n qf.Update()\n pd = qf.GetOutput()\n self._update(pd)\n return self\n\n\n def addCurvatureScalars(self, method=0):\n \"\"\"\n Add scalars to ``Mesh`` that contains the\n curvature calculated in three different ways.\n\n :param int method: 0-gaussian, 1-mean, 2-max, 3-min curvature.\n :param lut: optional vtkLookUpTable up table.\n\n :Example:\n .. code-block:: python\n\n from vedo import Torus\n Torus().addCurvatureScalars().addScalarBar().show(axes=1)\n\n |curvature|\n \"\"\"\n curve = vtk.vtkCurvatures()\n curve.SetInputData(self._data)\n curve.SetCurvatureType(method)\n curve.Update()\n self._update(curve.GetOutput())\n self._mapper.ScalarVisibilityOn()\n return self\n\n def addConnectivity(self):\n \"\"\"\n Flag a mesh by connectivity: each disconnected region will receive a different Id.\n You can access the array of ids through ``mesh.pointdata[\"RegionId\"]``.\n \"\"\"\n cf = vtk.vtkConnectivityFilter()\n cf.SetInputData(self.polydata(False))\n cf.SetExtractionModeToAllRegions()\n cf.ColorRegionsOn()\n cf.Update()\n return self._update(cf.GetOutput())\n\n\n def addElevationScalars(self, lowPoint=(0,0,0), highPoint=(0,0,1), vrange=(0,1)):\n \"\"\"\n Add to ``Mesh`` a scalar array that contains distance along a specified direction.\n\n :param list lowPoint: one end of the line (small scalar values). Default (0,0,0).\n :param list highPoint: other end of the line (large scalar values). Default (0,0,1).\n :param list vrange: set the range of the scalar. Default is (0, 1).\n\n :Example:\n .. code-block:: python\n\n from vedo import Sphere\n s = Sphere().addElevationScalars(lowPoint=(0,0,0), highPoint=(1,1,1))\n s.addScalarBar().show(axes=1)\n\n |elevation|\n \"\"\"\n ef = vtk.vtkElevationFilter()\n ef.SetInputData(self.polydata())\n ef.SetLowPoint(lowPoint)\n ef.SetHighPoint(highPoint)\n ef.SetScalarRange(vrange)\n ef.Update()\n self._update(ef.GetOutput())\n self._mapper.ScalarVisibilityOn()\n return self\n\n\n def addShadow(self, plane=None, point=None, direction=None, clip=False, c=(0.6,0.6,0.6), alpha=1, culling=1):\n \"\"\"\n Generate a shadow out of an ``Mesh`` on one of the three Cartesian planes.\n The output is a new ``Mesh`` representing the shadow.\n This new mesh is accessible through `mesh.shadow`.\n By default the shadow mesh is placed on the bottom wall of the bounding box.\n\n See pointcloud.projectOnPlane.\n\n :param str,Plane plane: if plane is `str`, plane can be one of ['x', 'y', 'z'],\n represents x-plane, y-plane and z-plane, respectively.\n Otherwise, plane should be an instance of `vedo.shapes.Plane`.\n\n :param float,array point: if plane is `str`, point should be a float represents the intercept.\n Otherwise, point is the camera point of perspective projection\n\n :param array direction: direction of oblique projection\n\n # TODO\n :param bool clip: if true, remove the outside projection points\n\n :param int culling: choose between front [1] or backface [-1] culling or None.\n\n |shadow| |shadow.py|_\n\n |airplanes| |airplanes.py|_\n \"\"\"\n if 'x' == plane:\n shad = self.clone().projectOnPlane('x')\n if point is not None:\n shad.x(point)\n elif 'y' == plane:\n shad = self.clone().projectOnPlane('y')\n if point is not None:\n shad.y(point)\n elif 'z' == plane:\n shad = self.clone().projectOnPlane('z')\n if point is not None:\n shad.z(point)\n else:\n shad = self.clone().projectOnPlane(plane, point, direction, clip)\n\n shad.c(c).alpha(alpha).wireframe(False).flat()\n if culling==1:\n shad.frontFaceCulling()\n elif culling==-1:\n shad.backFaceCulling()\n shad.GetProperty().LightingOff()\n shad.SetPickable(False)\n shad.SetUseBounds(True)\n if shad not in self.shadows:\n self.shadows.append(shad)\n self.shadowsArgs.append(dict(plane=plane, point=point, direction=direction))\n return self\n\n def _updateShadow(self):\n p = self.GetPosition()\n for idx, shad in enumerate(self.shadows):\n args = self.shadowsArgs[idx]\n shad.SetPosition(*Points([p]).projectOnPlane(**args).GetPosition())\n return self\n\n\n def subdivide(self, N=1, method=0, mel=None):\n \"\"\"Increase the number of vertices of a surface mesh.\n\n :param int N: number of subdivisions.\n :param int method: Loop(0), Linear(1), Adaptive(2), Butterfly(3)\n :param float mel: Maximum Edge Length for Adaptive method only.\n \"\"\"\n triangles = vtk.vtkTriangleFilter()\n triangles.SetInputData(self._data)\n triangles.Update()\n originalMesh = triangles.GetOutput()\n if method == 0:\n sdf = vtk.vtkLoopSubdivisionFilter()\n elif method == 1:\n sdf = vtk.vtkLinearSubdivisionFilter()\n elif method == 2:\n sdf = vtk.vtkAdaptiveSubdivisionFilter()\n if mel is None:\n mel = self.diagonalSize() / np.sqrt(self._data.GetNumberOfPoints())/N\n sdf.SetMaximumEdgeLength(mel)\n elif method == 3:\n sdf = vtk.vtkButterflySubdivisionFilter()\n else:\n printc(\"Error in subdivide: unknown method.\", c=\"r\")\n raise RuntimeError()\n if method != 2:\n sdf.SetNumberOfSubdivisions(N)\n sdf.SetInputData(originalMesh)\n sdf.Update()\n return self._update(sdf.GetOutput())\n\n def decimate(self, fraction=0.5, N=None, method='quadric', boundaries=False):\n \"\"\"\n Downsample the number of vertices in a mesh to `fraction`.\n\n :param float fraction: the desired target of reduction.\n :param int N: the desired number of final points\n (**fraction** is recalculated based on it).\n :param str method: can be either 'quadric' or 'pro'. In the first case triagulation\n will look like more regular, irrespective of the mesh origianl curvature.\n In the second case triangles are more irregular but mesh is more precise on more\n curved regions.\n :param bool boundaries: (True), in `pro` mode decide whether\n to leave boundaries untouched or not.\n\n .. note:: Setting ``fraction=0.1`` leaves 10% of the original nr of vertices.\n\n |skeletonize| |skeletonize.py|_\n \"\"\"\n poly = self._data\n if N: # N = desired number of points\n Np = poly.GetNumberOfPoints()\n fraction = float(N) / Np\n if fraction >= 1:\n return self\n\n if 'quad' in method:\n decimate = vtk.vtkQuadricDecimation()\n # decimate.SetAttributeErrorMetric(True)\n # if self.GetTexture():\n # decimate.TCoordsAttributeOn()\n # else:\n # pass\n # decimate.SetVolumePreservation(True)\n else:\n decimate = vtk.vtkDecimatePro()\n decimate.PreserveTopologyOn()\n if boundaries:\n decimate.BoundaryVertexDeletionOff()\n else:\n decimate.BoundaryVertexDeletionOn()\n decimate.SetInputData(poly)\n decimate.SetTargetReduction(1 - fraction)\n decimate.Update()\n return self._update(decimate.GetOutput())\n\n @deprecated(reason=vedo.colors.red+\"Please use smooth()\"+vedo.colors.reset)\n def smoothLaplacian(self, niter=15, relaxfact=0.1, edgeAngle=15, featureAngle=60, boundary=False):\n return self.smooth(niter, passBand=0.1, edgeAngle=edgeAngle, boundary=boundary)\n\n def smooth(self, niter=15, passBand=0.1, edgeAngle=15, featureAngle=60, boundary=False):\n \"\"\"\n Adjust mesh point positions using the `Windowed Sinc` function interpolation kernel.\n\n :param int niter: number of iterations.\n :param float passBand: set the passband value for the windowed sinc filter.\n :param float edgeAngle: edge angle to control smoothing along edges\n (either interior or boundary).\n :param float featureAngle: specifies the feature angle for sharp edge identification.\n\n |mesh_smoother1| |mesh_smoother1.py|_\n \"\"\"\n poly = self._data\n cl = vtk.vtkCleanPolyData()\n cl.SetInputData(poly)\n cl.Update()\n smoothFilter = vtk.vtkWindowedSincPolyDataFilter()\n smoothFilter.SetInputData(cl.GetOutput())\n smoothFilter.SetNumberOfIterations(niter)\n smoothFilter.SetEdgeAngle(edgeAngle)\n smoothFilter.SetFeatureAngle(featureAngle)\n smoothFilter.SetPassBand(passBand)\n smoothFilter.NormalizeCoordinatesOn()\n smoothFilter.NonManifoldSmoothingOn()\n smoothFilter.FeatureEdgeSmoothingOn()\n smoothFilter.SetBoundarySmoothing(boundary)\n smoothFilter.Update()\n return self._update(smoothFilter.GetOutput())\n\n\n def fillHoles(self, size=None):\n \"\"\"Identifies and fills holes in input mesh.\n Holes are identified by locating boundary edges, linking them together into loops,\n and then triangulating the resulting loops.\n\n :param float size: approximate limit to the size of the hole that can be filled.\n\n Example: |fillholes.py|_\n \"\"\"\n fh = vtk.vtkFillHolesFilter()\n if not size:\n mb = self.maxBoundSize()\n size = mb / 10\n fh.SetHoleSize(size)\n fh.SetInputData(self._data)\n fh.Update()\n return self._update(fh.GetOutput())\n\n\n def isInside(self, point, tol=0.0001):\n \"\"\"\n Return True if point is inside a polydata closed surface.\n \"\"\"\n poly = self.polydata()\n points = vtk.vtkPoints()\n points.InsertNextPoint(point)\n pointsPolydata = vtk.vtkPolyData()\n pointsPolydata.SetPoints(points)\n sep = vtk.vtkSelectEnclosedPoints()\n sep.SetTolerance(tol)\n sep.CheckSurfaceOff()\n sep.SetInputData(pointsPolydata)\n sep.SetSurfaceData(poly)\n sep.Update()\n return sep.IsInside(0)\n\n\n def insidePoints(self, pts, invert=False, tol=1e-05, returnIds=False):\n \"\"\"\n Return the point cloud that is inside mesh surface.\n\n |pca| |pca.py|_\n \"\"\"\n if isinstance(pts, Points):\n pointsPolydata = pts.polydata()\n pts = pts.points()\n else:\n vpoints = vtk.vtkPoints()\n pts = np.ascontiguousarray(pts)\n vpoints.SetData(numpy2vtk(pts, dtype=float))\n pointsPolydata = vtk.vtkPolyData()\n pointsPolydata.SetPoints(vpoints)\n\n sep = vtk.vtkSelectEnclosedPoints()\n sep.SetTolerance(tol)\n sep.SetInputData(pointsPolydata)\n sep.SetSurfaceData(self.polydata())\n sep.SetInsideOut(invert)\n sep.Update()\n\n mask = Mesh(sep.GetOutput()).pointdata[0].astype(np.bool)\n ids = np.array(range(len(pts)))[mask]\n\n if returnIds:\n return ids\n else:\n pcl = Points(pts[ids])\n pcl.name = \"insidePoints\"\n return pcl\n\n def boundaries(self,\n boundaryEdges=True,\n nonManifoldEdges=False,\n featureAngle=180,\n returnPointIds=False,\n returnCellIds=False,\n ):\n \"\"\"\n Return a ``Mesh`` that shows the boundary lines of an input mesh.\n\n :param bool boundaryEdges: Turn on/off the extraction of boundary edges.\n :param bool nonManifoldEdges: Turn on/off the extraction of non-manifold edges.\n :param float featureAngle: Specify the min angle btw 2 faces for extracting edges.\n :param bool returnPointIds: return a numpy array of point indices\n :param bool returnCellIds: return a numpy array of cell indices\n \"\"\"\n fe = vtk.vtkFeatureEdges()\n fe.SetBoundaryEdges(boundaryEdges)\n fe.SetFeatureAngle(featureAngle)\n fe.SetNonManifoldEdges(nonManifoldEdges)\n fe.ColoringOff()\n\n if returnPointIds or returnCellIds:\n\n idf = vtk.vtkIdFilter()\n idf.SetInputData(self.polydata())\n idf.SetIdsArrayName(\"BoundaryIds\")\n idf.SetPointIds(returnPointIds)\n idf.SetCellIds(returnCellIds)\n idf.Update()\n fe.SetInputData(idf.GetOutput())\n fe.ManifoldEdgesOff()\n fe.NonManifoldEdgesOff()\n fe.BoundaryEdgesOn()\n fe.FeatureEdgesOff()\n fe.Update()\n if returnPointIds:\n vid = fe.GetOutput().GetPointData().GetArray(\"BoundaryIds\")\n if returnCellIds:\n vid = fe.GetOutput().GetCellData().GetArray(\"BoundaryIds\")\n npid = vtk2numpy(vid).astype(int)\n return npid\n\n else:\n\n fe.SetInputData(self.polydata())\n fe.Update()\n return Mesh(fe.GetOutput(), c=\"p\").lw(5).lighting('off')\n\n\n def imprint(self, loopline, tol=0.01):\n \"\"\"\n Imprint the contact surface of one object onto another surface.\n\n Parameters\n ----------\n loopline : vedo.shapes.Line\n a Line object to be imprinted onto the mesh.\n tol : TYPE, optional\n projection tolerance which controls how close the imprint surface must be to the target.\n The default is 0.01.\n\n :Example:\n\n .. code-block:: python\n\n from vedo import *\n grid = Grid()#.triangulate()\n circle = Circle(r=0.3, res=24).pos(0.11,0.12)\n line = Line(circle, closed=True, lw=4, c='r4')\n grid.imprint(line)\n show(grid, line, axes=1)\n \"\"\"\n loop = vtk.vtkContourLoopExtraction()\n loop.SetInputData(loopline.polydata())\n loop.Update()\n\n cleanLoop = vtk.vtkCleanPolyData()\n cleanLoop.SetInputData(loop.GetOutput())\n cleanLoop.Update()\n\n imp = vtk.vtkImprintFilter()\n imp.SetTargetData(self.polydata())\n imp.SetImprintData(cleanLoop.GetOutput())\n imp.SetTolerance(tol)\n imp.BoundaryEdgeInsertionOn()\n imp.TriangulateOutputOn()\n imp.Update()\n return self._update(imp.GetOutput())\n\n\n def connectedVertices(self, index, returnIds=False):\n \"\"\"Find all vertices connected to an input vertex specified by its index.\n\n :param bool returnIds: return vertex IDs instead of vertex coordinates.\n\n |connVtx| |connVtx.py|_\n \"\"\"\n poly = self._data\n\n cellIdList = vtk.vtkIdList()\n poly.GetPointCells(index, cellIdList)\n\n idxs = []\n for i in range(cellIdList.GetNumberOfIds()):\n pointIdList = vtk.vtkIdList()\n poly.GetCellPoints(cellIdList.GetId(i), pointIdList)\n for j in range(pointIdList.GetNumberOfIds()):\n idj = pointIdList.GetId(j)\n if idj == index:\n continue\n if idj in idxs:\n continue\n idxs.append(idj)\n\n if returnIds:\n return idxs\n else:\n trgp = []\n for i in idxs:\n p = [0, 0, 0]\n poly.GetPoints().GetPoint(i, p)\n trgp.append(p)\n return np.array(trgp)\n\n\n def connectedCells(self, index, returnIds=False):\n \"\"\"Find all cellls connected to an input vertex specified by its index.\"\"\"\n\n # Find all cells connected to point index\n dpoly = self._data\n cellPointIds = vtk.vtkIdList()\n dpoly.GetPointCells(index, cellPointIds)\n\n ids = vtk.vtkIdTypeArray()\n ids.SetNumberOfComponents(1)\n rids = []\n for k in range(cellPointIds.GetNumberOfIds()):\n cid = cellPointIds.GetId(k)\n ids.InsertNextValue(cid)\n rids.append(int(cid))\n if returnIds:\n return rids\n\n selectionNode = vtk.vtkSelectionNode()\n selectionNode.SetFieldType(vtk.vtkSelectionNode.CELL)\n selectionNode.SetContentType(vtk.vtkSelectionNode.INDICES)\n selectionNode.SetSelectionList(ids)\n selection = vtk.vtkSelection()\n selection.AddNode(selectionNode)\n extractSelection = vtk.vtkExtractSelection()\n extractSelection.SetInputData(0, dpoly)\n extractSelection.SetInputData(1, selection)\n extractSelection.Update()\n gf = vtk.vtkGeometryFilter()\n gf.SetInputData(extractSelection.GetOutput())\n gf.Update()\n return Mesh(gf.GetOutput()).lw(1)\n\n def intersectWithLine(self, p0, p1=None, returnIds=False, tol=0):\n \"\"\"Return the list of points intersecting the mesh\n along the segment defined by two points `p0` and `p1`.\n\n :param bool returnIds: return the cell ids instead of point coords\n :param float tol: tolerance/precision of the computation (0 = auto).\n\n :Example:\n .. code-block:: python\n\n from vedo import *\n s = Spring()\n pts = s.intersectWithLine([0,0,0], [1,0.1,0])\n ln = Line([0,0,0], [1,0.1,0], c='blue')\n ps = Points(pts, r=10, c='r')\n show(s, ln, ps, bg='white')\n\n |intline|\n \"\"\"\n if isinstance(p0, Points):\n p0, p1 = p0.points()\n\n if not self.line_locator:\n self.line_locator = vtk.vtkOBBTree()\n self.line_locator.SetDataSet(self.polydata())\n if not tol:\n tol = mag(np.asarray(p1)-np.asarray(p0))/10000\n self.line_locator.SetTolerance(tol)\n self.line_locator.BuildLocator()\n\n intersectPoints = vtk.vtkPoints()\n idlist = vtk.vtkIdList()\n self.line_locator.IntersectWithLine(p0, p1, intersectPoints, idlist)\n pts = []\n for i in range(intersectPoints.GetNumberOfPoints()):\n intersection = [0, 0, 0]\n intersectPoints.GetPoint(i, intersection)\n pts.append(intersection)\n pts = np.array(pts)\n\n if returnIds:\n pts_ids = []\n for i in range(idlist.GetNumberOfIds()):\n cid = idlist.GetId(i)\n pts_ids.append([pts[i], cid])\n return np.array(pts_ids)\n else:\n return pts\n\n\n def silhouette(self, direction=None, borderEdges=True, featureAngle=False):\n \"\"\"\n Return a new line ``Mesh`` which corresponds to the outer `silhouette`\n of the input as seen along a specified `direction`, this can also be\n a ``vtkCamera`` object.\n\n :param list direction: viewpoint direction vector.\n If *None* this is guessed by looking at the minimum\n of the sides of the bounding box.\n :param bool borderEdges: enable or disable generation of border edges\n :param float featureAngle: minimal angle for sharp edges detection.\n If set to `False` the functionality is disabled.\n\n |silhouette| |silhouette.py|_\n \"\"\"\n sil = vtk.vtkPolyDataSilhouette()\n sil.SetInputData(self.polydata())\n sil.SetBorderEdges(borderEdges)\n if featureAngle is False:\n sil.SetEnableFeatureAngle(0)\n else:\n sil.SetEnableFeatureAngle(1)\n sil.SetFeatureAngle(featureAngle)\n\n if (direction is None\n and vedo.settings.plotter_instance\n and vedo.settings.plotter_instance.camera):\n sil.SetCamera(vedo.settings.plotter_instance.camera)\n m = Mesh()\n m._mapper.SetInputConnection(sil.GetOutputPort())\n\n elif isinstance(direction, vtk.vtkCamera):\n sil.SetCamera(direction)\n m = Mesh()\n m._mapper.SetInputConnection(sil.GetOutputPort())\n\n elif direction == '2d':\n sil.SetVector(3.4,4.5,5.6) # random\n sil.SetDirectionToSpecifiedVector()\n sil.Update()\n m = Mesh(sil.GetOutput())\n\n elif isSequence(direction):\n sil.SetVector(direction)\n sil.SetDirectionToSpecifiedVector()\n sil.Update()\n m = Mesh(sil.GetOutput())\n else:\n printc('Error in silhouette(): direction is', [direction], c='r')\n printc(' render the scene with show() or specify camera/direction', c='r')\n return self\n\n m.lw(2).c((0,0,0)).lighting('off')\n m._mapper.SetResolveCoincidentTopologyToPolygonOffset()\n return m\n\n\n def followCamera(self, cam=None):\n \"\"\"\n Mesh object will follow camera movements and stay locked to it.\n Use ``mesh.followCamera(False)`` to disable it.\n\n :param vtkCamera cam: if `None` the text will auto-orient itself to the active camera.\n A ``vtkCamera`` object can also be passed.\n \"\"\"\n if cam is False:\n self.SetCamera(None)\n return self\n if isinstance(cam, vtk.vtkCamera):\n self.SetCamera(cam)\n else:\n plt = vedo.settings.plotter_instance\n if plt and plt.camera:\n self.SetCamera(plt.camera)\n else:\n # postpone to show() call\n self._set2actcam=True\n return self\n\n\n def isobands(self, n=10, vmin=None, vmax=None):\n \"\"\"\n Return a new ``Mesh`` representing the isobands of the active scalars.\n This is a new mesh where the scalar is now associated to cell faces and\n used to colorize the mesh.\n\n :param int n: number of isolines in the range\n :param float vmin: minimum of the range\n :param float vmax: maximum of the range\n\n |isolines| |isolines.py|_\n \"\"\"\n r0, r1 = self._data.GetScalarRange()\n if vmin is None:\n vmin = r0\n if vmax is None:\n vmax = r1\n\n # --------------------------------\n bands = []\n dx = (vmax - vmin)/float(n)\n b = [vmin, vmin + dx / 2.0, vmin + dx]\n i = 0\n while i < n:\n bands.append(b)\n b = [b[0] + dx, b[1] + dx, b[2] + dx]\n i += 1\n\n # annotate, use the midpoint of the band as the label\n lut = self.mapper().GetLookupTable()\n labels = []\n for b in bands:\n labels.append('{:4.2f}'.format(b[1]))\n values = vtk.vtkVariantArray()\n for la in labels:\n values.InsertNextValue(vtk.vtkVariant(la))\n for i in range(values.GetNumberOfTuples()):\n lut.SetAnnotation(i, values.GetValue(i).ToString())\n\n bcf = vtk.vtkBandedPolyDataContourFilter()\n bcf.SetInputData(self.polydata())\n # Use either the minimum or maximum value for each band.\n for i in range(len(bands)):\n bcf.SetValue(i, bands[i][2])\n # We will use an indexed lookup table.\n bcf.SetScalarModeToIndex()\n bcf.GenerateContourEdgesOff()\n bcf.Update()\n bcf.GetOutput().GetCellData().GetScalars().SetName(\"IsoBands\")\n m1 = Mesh(bcf.GetOutput()).computeNormals(cells=True)\n m1.mapper().SetLookupTable(lut)\n return m1\n\n\n def isolines(self, n=10, vmin=None, vmax=None):\n \"\"\"\n Return a new ``Mesh`` representing the isolines of the active scalars.\n\n :param int n: number of isolines in the range\n :param float vmin: minimum of the range\n :param float vmax: maximum of the range\n\n |isolines| |isolines.py|_\n \"\"\"\n bcf = vtk.vtkContourFilter()\n bcf.SetInputData(self.polydata())\n r0, r1 = self._data.GetScalarRange()\n if vmin is None:\n vmin = r0\n if vmax is None:\n vmax = r1\n bcf.GenerateValues(n, vmin, vmax)\n bcf.Update()\n sf = vtk.vtkStripper()\n sf.SetJoinContiguousSegments(True)\n sf.SetInputData(bcf.GetOutput())\n sf.Update()\n cl = vtk.vtkCleanPolyData()\n cl.SetInputData(sf.GetOutput())\n cl.Update()\n msh = Mesh(cl.GetOutput(), c=\"k\").lighting('off')\n msh._mapper.SetResolveCoincidentTopologyToPolygonOffset()\n return msh\n\n\n def extrude(self, zshift=1, rotation=0, dR=0, cap=True, res=1):\n \"\"\"\n Sweep a polygonal data creating a \"skirt\" from free edges and lines, and lines from vertices.\n The input dataset is swept around the z-axis to create new polygonal primitives.\n For example, sweeping a line results in a cylindrical shell, and sweeping a circle creates a torus.\n\n You can control whether the sweep of a 2D object (i.e., polygon or triangle strip)\n is capped with the generating geometry.\n Also, you can control the angle of rotation, and whether translation along the z-axis\n is performed along with the rotation. (Translation is useful for creating \"springs\").\n You also can adjust the radius of the generating geometry using the \"dR\" keyword.\n\n The skirt is generated by locating certain topological features.\n Free edges (edges of polygons or triangle strips only used by one polygon or triangle strips)\n generate surfaces. This is true also of lines or polylines. Vertices generate lines.\n\n This filter can be used to model axisymmetric objects like cylinders, bottles, and wine glasses;\n or translational/rotational symmetric objects like springs or corkscrews.\n\n Warning:\n\n Some polygonal objects have no free edges (e.g., sphere). When swept, this will result\n in two separate surfaces if capping is on, or no surface if capping is off.\n\n |extrude| |extrude.py|_\n \"\"\"\n if isSequence(zshift):\n # ms = [] # todo\n # poly0 = self.clone().polydata()\n # for i in range(len(zshift)-1):\n # rf = vtk.vtkRotationalExtrusionFilter()\n # rf.SetInputData(poly0)\n # rf.SetResolution(res)\n # rf.SetCapping(0)\n # rf.SetAngle(rotation)\n # rf.SetTranslation(zshift)\n # rf.SetDeltaRadius(dR)\n # rf.Update()\n # poly1 = rf.GetOutput()\n return self\n else:\n rf = vtk.vtkRotationalExtrusionFilter()\n # rf = vtk.vtkLinearExtrusionFilter()\n rf.SetInputData(self.polydata(False)) #must not be transformed\n rf.SetResolution(res)\n rf.SetCapping(cap)\n rf.SetAngle(rotation)\n rf.SetTranslation(zshift)\n rf.SetDeltaRadius(dR)\n rf.Update()\n m = Mesh(rf.GetOutput(), c=self.c(), alpha=self.alpha())\n prop = vtk.vtkProperty()\n prop.DeepCopy(self.property)\n m.SetProperty(prop)\n m.property = prop\n # assign the same transformation\n m.SetOrigin(self.GetOrigin())\n m.SetScale(self.GetScale())\n m.SetOrientation(self.GetOrientation())\n m.SetPosition(self.GetPosition())\n return m.computeNormals(cells=False).phong()\n\n\n def splitByConnectivity(self, maxdepth=1000):\n \"\"\"\n Split a mesh by connectivity and order the pieces by increasing area.\n\n :param int maxdepth: only consider this number of mesh parts.\n\n :param bool addRegions\n\n |splitmesh| |splitmesh.py|_\n \"\"\"\n pd = self.polydata(False)\n cf = vtk.vtkConnectivityFilter()\n cf.SetInputData(pd)\n cf.SetExtractionModeToAllRegions()\n cf.ColorRegionsOn()\n cf.Update()\n a = Mesh(cf.GetOutput())\n alist = []\n\n for t in range(max(a.pointdata[\"RegionId\"]) + 1):\n if t == maxdepth:\n break\n suba = a.clone().threshold(\"RegionId\", t - 0.1, t + 0.1)\n area = suba.area()\n # print('splitByConnectivity piece:', t, ' area:', area, ' N:',suba.N())\n alist.append([suba, area])\n\n alist.sort(key=lambda x: x[1])\n alist.reverse()\n blist = []\n for i, l in enumerate(alist):\n l[0].color(i + 1).phong()\n l[0].mapper().ScalarVisibilityOff()\n blist.append(l[0])\n return blist\n\n\n def extractLargestRegion(self):\n \"\"\"\n Extract the largest connected part of a mesh and discard all the smaller pieces.\n\n .. hint:: |largestregion.py|_\n \"\"\"\n conn = vtk.vtkConnectivityFilter()\n conn.SetExtractionModeToLargestRegion()\n conn.ScalarConnectivityOff()\n conn.SetInputData(self._data)\n conn.Update()\n m = Mesh(conn.GetOutput())\n pr = vtk.vtkProperty()\n pr.DeepCopy(self.property)\n m.SetProperty(pr)\n m.property = pr\n # assign the same transformation\n m.SetOrigin(self.GetOrigin())\n m.SetScale(self.GetScale())\n m.SetOrientation(self.GetOrientation())\n m.SetPosition(self.GetPosition())\n vis = self._mapper.GetScalarVisibility()\n m._mapper.SetScalarVisibility(vis)\n return m\n\n def boolean(self, operation, mesh2):\n \"\"\"Volumetric union, intersection and subtraction of surfaces.\n\n :param str operation: allowed operations: ``'plus'``, ``'intersect'``, ``'minus'``.\n\n |boolean| |boolean.py|_\n \"\"\"\n bf = vtk.vtkBooleanOperationPolyDataFilter()\n poly1 = self.computeNormals().polydata()\n poly2 = mesh2.computeNormals().polydata()\n if operation.lower() == \"plus\" or operation.lower() == \"+\":\n bf.SetOperationToUnion()\n elif operation.lower() == \"intersect\":\n bf.SetOperationToIntersection()\n elif operation.lower() == \"minus\" or operation.lower() == \"-\":\n bf.SetOperationToDifference()\n #bf.ReorientDifferenceCellsOn()\n bf.SetInputData(0, poly1)\n bf.SetInputData(1, poly2)\n bf.Update()\n mesh = Mesh(bf.GetOutput(), c=None)\n mesh.flat()\n mesh.name = self.name+operation+mesh2.name\n return mesh\n\n\n def intersectWith(self, mesh2, tol=1e-06):\n \"\"\"\n Intersect this Mesh with the input surface to return a line.\n\n .. hint:: |surfIntersect.py|_\n \"\"\"\n bf = vtk.vtkIntersectionPolyDataFilter()\n if isinstance(self, Mesh):\n poly1 = self.polydata()\n else:\n poly1 = self.GetMapper().GetInput()\n if isinstance(mesh2, Mesh):\n poly2 = mesh2.polydata()\n else:\n poly2 = mesh2.GetMapper().GetInput()\n bf.SetInputData(0, poly1)\n bf.SetInputData(1, poly2)\n bf.Update()\n mesh = Mesh(bf.GetOutput(), \"k\", 1).lighting('off')\n mesh.GetProperty().SetLineWidth(3)\n mesh.name = \"surfaceIntersection\"\n return mesh\n\n\n def geodesic(self, start, end):\n \"\"\"Dijkstra algorithm to compute the geodesic line.\n Takes as input a polygonal mesh and performs a single source\n shortest path calculation.\n\n :param int,list start: start vertex index or close point `[x,y,z]`\n :param int,list end: end vertex index or close point `[x,y,z]`\n\n |geodesic| |geodesic.py|_\n \"\"\"\n if isSequence(start):\n cc = self.points()\n pa = Points(cc)\n start = pa.closestPoint(start, returnPointId=True)\n end = pa.closestPoint(end, returnPointId=True)\n\n dijkstra = vtk.vtkDijkstraGraphGeodesicPath()\n dijkstra.SetInputData(self.polydata())\n dijkstra.SetStartVertex(end) # inverted in vtk\n dijkstra.SetEndVertex(start)\n dijkstra.Update()\n\n weights = vtk.vtkDoubleArray()\n dijkstra.GetCumulativeWeights(weights)\n\n idlist = dijkstra.GetIdList()\n ids = [idlist.GetId(i) for i in range(idlist.GetNumberOfIds())]\n\n length = weights.GetMaxId() + 1\n arr = np.zeros(length)\n for i in range(length):\n arr[i] = weights.GetTuple(i)[0]\n\n poly = dijkstra.GetOutput()\n\n vdata = numpy2vtk(arr)\n vdata.SetName(\"CumulativeWeights\")\n poly.GetPointData().AddArray(vdata)\n\n vdata2 = numpy2vtk(ids, dtype=np.uint)\n vdata2.SetName(\"VertexIDs\")\n poly.GetPointData().AddArray(vdata2)\n poly.GetPointData().Modified()\n\n dmesh = Mesh(poly, c='k')\n prop = vtk.vtkProperty()\n prop.DeepCopy(self.property)\n prop.SetLineWidth(3)\n prop.SetOpacity(1)\n dmesh.SetProperty(prop)\n dmesh.property = prop\n dmesh.name = \"geodesicLine\"\n return dmesh\n\n\n\n"
] | [
[
"numpy.dot",
"numpy.split",
"numpy.log",
"numpy.ascontiguousarray",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.arccos",
"numpy.argmin",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.isin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cuishuhao/HDA | [
"1733ca74eee7839b455e9ffd7a169bc54b272745"
] | [
"scripts/train_ssda.py"
] | [
"import argparse\nimport os\nimport os.path as osp\nimport sys\nsys.path.append(\".\")\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\nimport random\nimport pdb\nimport math\nfrom distutils.version import LooseVersion\n\n\nimport network.network as network\nimport utils.loss as loss\nimport utils.lr_schedule as lr_schedule\nimport dataset.preprocess as prep\nfrom dataset.dataloader import ImageList\n\ndef image_classification_test(loader, model, heuristic=False):\n start_test = True\n with torch.no_grad():\n iter_test = iter(loader[\"test\"])\n for i in range(len(loader['test'])):\n data = iter_test.next()\n inputs = data[0]\n labels = data[1]\n inputs = inputs.cuda()\n labels = labels.cuda()\n _, outputs ,_ = model(inputs,heuristic=heuristic) \n if start_test:\n all_output = outputs.float()\n all_label = labels.float()\n start_test = False\n else:\n all_output = torch.cat((all_output, outputs.float()), 0)\n all_label = torch.cat((all_label, labels.float()), 0)\n _, predict = torch.max(all_output, 1)\n accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])\n return accuracy\n\n##calculate the gaussianity\ndef nogauss(a):\n num = a.shape[1]\n std = torch.std(a, dim=1, keepdim=True).repeat(1,num)\n mean = torch.mean(a, dim=1, keepdim=True).repeat(1,num)\n cal = (a-mean)/std\n y = torch.mean(torch.pow(cal,4),1)-3*torch.pow(torch.mean(torch.pow(cal,2),1),2)\n return torch.mean(torch.abs(y))\n\ndef train_ssda(config):\n ## set pre-process\n prep_dict = {}\n dsets = {}\n dset_loaders = {}\n data_config = config[\"data\"]\n prep_config = config[\"prep\"]\n prep_dict[\"source\"] = prep.image_target(**config[\"prep\"]['params'])\n prep_dict[\"target1\"] = prep.image_target(**config[\"prep\"]['params'])\n prep_dict[\"target2\"] = prep.image_target(**config[\"prep\"]['params'])\n prep_dict[\"test\"] = prep.image_test(**config[\"prep\"]['params'])\n\n ## prepare data\n train_bs = data_config[\"source\"][\"batch_size\"]\n test_bs = data_config[\"test\"][\"batch_size\"]\n dsets[\"source\"] = ImageList(open(data_config[\"source\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"source\"])\n dset_loaders[\"source\"] = DataLoader(dsets[\"source\"], batch_size=train_bs, \\\n shuffle=True, num_workers=4, drop_last=True)\n dsets[\"target1\"] = ImageList(open(data_config[\"target1\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"target1\"])\n dset_loaders[\"target1\"] = DataLoader(dsets[\"target1\"], batch_size=train_bs, \\\n shuffle=True, num_workers=4, drop_last=True)\n\n dsets[\"target2\"] = ImageList(open(data_config[\"target2\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"target2\"])\n dset_loaders[\"target2\"] = DataLoader(dsets[\"target2\"], batch_size=train_bs, \\\n shuffle=True, num_workers=4, drop_last=True)\n\n dsets[\"test\"] = ImageList(open(data_config[\"test\"][\"list_path\"]).readlines(), \\\n transform=prep_dict[\"test\"])\n dset_loaders[\"test\"] = DataLoader(dsets[\"test\"], batch_size=test_bs, \\\n shuffle=False, num_workers=4)\n\n ## set base network\n class_num = config[\"network\"][\"params\"][\"class_num\"]\n net_config = config[\"network\"]\n base_network = net_config[\"name\"](**net_config[\"params\"])\n base_network = base_network.cuda()\n\n ## add additional network for some methods\n ad_net = network.AdversarialNetwork( class_num, 1024,multi=3)\n ad_net = ad_net.cuda()\n \n ## set optimizer\n parameter_list = base_network.get_parameters() + ad_net.get_parameters()\n optimizer_config = config[\"optimizer\"]\n optimizer = optimizer_config[\"type\"](parameter_list, \\\n **(optimizer_config[\"optim_params\"]))\n param_lr = []\n for param_group in optimizer.param_groups:\n param_lr.append(param_group[\"lr\"])\n schedule_param = optimizer_config[\"lr_param\"]\n lr_scheduler = lr_schedule.schedule_dict[optimizer_config[\"lr_type\"]]\n\n #multi gpu\n gpus = config['gpu'].split(',')\n if len(gpus) > 1:\n ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i,k in enumerate(gpus)])\n base_network = nn.DataParallel(base_network, device_ids=[int(i) for i,k in enumerate(gpus)])\n \n ## train \n len_train_source = len(dset_loaders[\"source\"])\n len_train_target1 = len(dset_loaders[\"target1\"])\n len_train_target2 = len(dset_loaders[\"target2\"])\n transfer_loss_value = classifier_loss_value = total_loss_value = 0.0\n for i in range(config[\"num_iterations\"]):\n #test\n if (i % config[\"test_interval\"] == config[\"test_interval\"] - 1) or (i>int(config[\"num_iterations\"]*0.9) and i % (config[\"test_interval\"]/2) == (config[\"test_interval\"]/2) - 1):\n base_network.train(False)\n temp_acc = image_classification_test(dset_loaders, base_network, heuristic=config[\"heuristic\"])\n temp_model = nn.Sequential(base_network)\n log_str = \"iter: {:05d}, precision: {:.5f}\".format(i, temp_acc)\n config[\"out_file\"].write(log_str+\"\\n\")\n config[\"out_file\"].flush()\n print(log_str)\n #save model\n if i % config[\"snapshot_interval\"] == 0 and i:\n torch.save(base_network.state_dict(), osp.join(config[\"output_path\"], \\\n \"iter_{:05d}_model.pth.tar\".format(i)))\n \n ## train one iter\n base_network.train(True)\n ad_net.train(True)\n loss_params = config[\"loss\"] \n optimizer = lr_scheduler(optimizer, i, **schedule_param)\n optimizer.zero_grad()\n\n #dataloader\n if i % len_train_source == 0:\n iter_source = iter(dset_loaders[\"source\"])\n if i % len_train_target1 == 0:\n iter_target1 = iter(dset_loaders[\"target1\"])\n if i % len_train_target2 == 0:\n iter_target2 = iter(dset_loaders[\"target2\"])\n\n #data \n inputs_source, labels_source = iter_source.next()\n inputs_target1, labels_target1 = iter_target1.next()\n inputs_target2, _ = iter_target2.next()\n inputs_source, inputs_target1, labels_source = inputs_source.cuda(), inputs_target1.cuda(), labels_source.cuda()\n inputs_target2, labels_target1 = inputs_target2.cuda(), labels_target1.cuda()\n inputs_st = torch.cat((inputs_source,inputs_target1),0)\n labels_st = torch.cat((labels_source,labels_target1),0)\n\n #network\n features_st, outputs_st, focal_st = base_network(inputs_st,heuristic=config[\"heuristic\"])\n features_t2, outputs_t2, focal_t2 = base_network(inputs_target2,heuristic=config[\"heuristic\"])\n focals = torch.cat((focal_st, focal_t2),dim=0)\n outputs = torch.cat((outputs_st, outputs_t2), dim=0)\n softmax_out = nn.Softmax(dim=1)(outputs)\n\n #loss calculation\n transfer_loss, mean_entropy, heuristic = loss.HDA_SSDA([softmax_out,focals], ad_net, network.calc_coeff(i))\n \n #similarity\n sim_st = torch.sum(outputs_st *focal_st,1)/torch.sqrt(torch.sum(torch.pow(outputs_st,2),1))/torch.sqrt(torch.sum(torch.pow(focal_st,2),1))\n sim_t2 = torch.sum(outputs_t2 *focal_t2,1)/torch.sqrt(torch.sum(torch.pow(outputs_t2,2),1))/torch.sqrt(torch.sum(torch.pow(focal_t2,2),1))\n relate_source = torch.mean(torch.abs(sim_st))\n relate_target = torch.mean(torch.abs(sim_t2))\n relate_all = relate_source + relate_target\n\n #calculate theta\n #theta = torch.acos(torch.cat((sim_st,sim_t2)))\n #m_theta = torch.mean(theta)\n #s_theta = torch.std(theta)\n\n #gaussianity\n gaussian = torch.abs(nogauss(outputs) - nogauss(outputs+focals))\n\n classifier_loss = nn.CrossEntropyLoss()(outputs_st, labels_st)\n total_loss = loss_params[\"trade_off\"] * transfer_loss + classifier_loss + config[\"heuristic\"] * heuristic #+ gaussian *config[\"gauss\"]\n\n total_loss.backward()\n optimizer.step()\n\n #if i % (5*config[\"print_num\"]) == 0 or (i %(config[\"print_num\"])==0 and i<4*config[\"print_num\"]):\n if i % config[\"print_num\"] == 0 :\n log_str = \"iter:{:05d},transfer:{:.5f},classifier:{:.5f},heuristic:{:.5f},relate:{:.5f},gaussian:{:.5f}\".format(i, transfer_loss, classifier_loss, heuristic, relate_all, gaussian)\n config[\"out_file\"].write(log_str+\"\\n\")\n config[\"out_file\"].flush()\n print(log_str)\n\n"
] | [
[
"torch.abs",
"torch.mean",
"torch.nn.Sequential",
"torch.nn.Softmax",
"torch.max",
"torch.nn.CrossEntropyLoss",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.std",
"torch.no_grad",
"torch.pow",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chrinide/pyscf | [
"8ea26f650566faac6621af0101441becaf7fe399"
] | [
"pyscf/hessian/rks.py"
] | [
"#!/usr/bin/env python\n# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Author: Qiming Sun <[email protected]>\n#\n\n'''\nNon-relativistic RKS analytical Hessian\n'''\n\nimport time\nimport numpy\nfrom pyscf import lib\nfrom pyscf.lib import logger\nfrom pyscf.hessian import rhf as rhf_hess\nfrom pyscf.grad import rks as rks_grad\nfrom pyscf.dft import numint\n\n\n# import pyscf.grad.rks to activate nuc_grad_method method\nfrom pyscf.grad import rks\n\n\ndef partial_hess_elec(hessobj, mo_energy=None, mo_coeff=None, mo_occ=None,\n atmlst=None, max_memory=4000, verbose=None):\n log = logger.new_logger(hessobj, verbose)\n time0 = t1 = (time.clock(), time.time())\n\n mol = hessobj.mol\n mf = hessobj.base\n if mo_energy is None: mo_energy = mf.mo_energy\n if mo_occ is None: mo_occ = mf.mo_occ\n if mo_coeff is None: mo_coeff = mf.mo_coeff\n if atmlst is None: atmlst = range(mol.natm)\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n nocc = mocc.shape[1]\n dm0 = numpy.dot(mocc, mocc.T) * 2\n # Energy weighted density matrix\n dme0 = numpy.einsum('pi,qi,i->pq', mocc, mocc, mo_energy[mo_occ>0]) * 2\n\n hcore_deriv = hessobj.hcore_generator(mol)\n s1aa, s1ab, s1a = rhf_hess.get_ovlp(mol)\n\n if mf.nlc != '':\n raise NotImplementedError\n #enabling range-separated hybrids\n omega, alpha, beta = mf._numint.rsh_coeff(mf.xc)\n if abs(omega) > 1e-10:\n hyb = alpha + beta\n else:\n hyb = mf._numint.hybrid_coeff(mf.xc, spin=mol.spin)\n\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mf.max_memory*.9-mem_now)\n veff_diag = _get_vxc_diag(hessobj, mo_coeff, mo_occ, max_memory)\n if abs(hyb) > 1e-10:\n vj1, vk1 = rhf_hess._get_jk(mol, 'int2e_ipip1', 9, 's2kl',\n ['lk->s1ij', dm0, # vj1\n 'jk->s1il', dm0]) # vk1\n veff_diag += (vj1 - hyb * .5 * vk1).reshape(3,3,nao,nao)\n if abs(omega) > 1e-10:\n with mol.with_range_coulomb(omega):\n vk1 = rhf_hess._get_jk(mol, 'int2e_ipip1', 9, 's2kl',\n ['jk->s1il', dm0])[0]\n veff_diag -= (alpha-hyb)*.5 * vk1.reshape(3,3,nao,nao)\n else:\n vj1 = rhf_hess._get_jk(mol, 'int2e_ipip1', 9, 's2kl',\n ['lk->s1ij', dm0])[0]\n veff_diag += vj1.reshape(3,3,nao,nao)\n vj1 = vk1 = None\n t1 = log.timer_debug1('contracting int2e_ipip1', *t1)\n\n aoslices = mol.aoslice_by_atom()\n de2 = numpy.zeros((mol.natm,mol.natm,3,3)) # (A,B,dR_A,dR_B)\n vxc = _get_vxc_deriv2(hessobj, mo_coeff, mo_occ, max_memory)\n for i0, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n\n shls_slice = (shl0, shl1) + (0, mol.nbas)*3\n veff = vxc[ia]\n if abs(hyb) > 1e-10:\n vj1, vk1, vk2 = rhf_hess._get_jk(mol, 'int2e_ip1ip2', 9, 's1',\n ['ji->s1kl', dm0[:,p0:p1], # vj1\n 'li->s1kj', dm0[:,p0:p1], # vk1\n 'lj->s1ki', dm0 ], # vk2\n shls_slice=shls_slice)\n veff += (vj1 * 2 - hyb * .5 * vk1).reshape(3,3,nao,nao)\n veff[:,:,:,p0:p1] -= (hyb * .5 * vk2).reshape(3,3,nao,p1-p0)\n if abs(omega) > 1e-10:\n with mol.with_range_coulomb(omega):\n vk1, vk2 = rhf_hess._get_jk(mol, 'int2e_ip1ip2', 9, 's1',\n ['li->s1kj', dm0[:,p0:p1], # vk1\n 'lj->s1ki', dm0 ], # vk2\n shls_slice=shls_slice)\n veff -= (alpha-hyb)*.5 * vk1.reshape(3,3,nao,nao)\n veff[:,:,:,p0:p1] -= (alpha-hyb)*.5 * vk2.reshape(3,3,nao,p1-p0)\n t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)\n\n vj1, vk1 = rhf_hess._get_jk(mol, 'int2e_ipvip1', 9, 's2kl',\n ['lk->s1ij', dm0, # vj1\n 'li->s1kj', dm0[:,p0:p1]], # vk1\n shls_slice=shls_slice)\n veff[:,:,:,p0:p1] += vj1.transpose(0,2,1).reshape(3,3,nao,p1-p0)\n veff -= hyb * .5 * vk1.transpose(0,2,1).reshape(3,3,nao,nao)\n if abs(omega) > 1e-10:\n with mol.with_range_coulomb(omega):\n vk1 = rhf_hess._get_jk(mol, 'int2e_ipvip1', 9, 's2kl',\n ['li->s1kj', dm0[:,p0:p1]], # vk1\n shls_slice=shls_slice)[0]\n veff -= (alpha-hyb)*.5 * vk1.transpose(0,2,1).reshape(3,3,nao,nao)\n t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)\n else:\n vj1 = rhf_hess._get_jk(mol, 'int2e_ip1ip2', 9, 's1',\n ['ji->s1kl', dm0[:,p0:p1]],\n shls_slice=shls_slice)[0]\n veff += vj1.reshape(3,3,nao,nao) * 2\n t1 = log.timer_debug1('contracting int2e_ip1ip2 for atom %d'%ia, *t1)\n\n vj1 = rhf_hess._get_jk(mol, 'int2e_ipvip1', 9, 's2kl',\n ['lk->s1ij', dm0], shls_slice=shls_slice)[0]\n veff[:,:,:,p0:p1] += vj1.transpose(0,2,1).reshape(3,3,nao,p1-p0)\n t1 = log.timer_debug1('contracting int2e_ipvip1 for atom %d'%ia, *t1)\n vj1 = vk1 = vk2 = None\n\n s1ao = numpy.zeros((3,nao,nao))\n s1ao[:,p0:p1] += s1a[:,p0:p1]\n s1ao[:,:,p0:p1] += s1a[:,p0:p1].transpose(0,2,1)\n s1oo = numpy.einsum('xpq,pi,qj->xij', s1ao, mocc, mocc)\n\n de2[i0,i0] += numpy.einsum('xypq,pq->xy', veff_diag[:,:,p0:p1], dm0[p0:p1])*2\n de2[i0,i0] -= numpy.einsum('xypq,pq->xy', s1aa[:,:,p0:p1], dme0[p0:p1])*2\n\n for j0, ja in enumerate(atmlst[:i0+1]):\n q0, q1 = aoslices[ja][2:]\n de2[i0,j0] += numpy.einsum('xypq,pq->xy', veff[:,:,q0:q1], dm0[q0:q1])*2\n de2[i0,j0] -= numpy.einsum('xypq,pq->xy', s1ab[:,:,p0:p1,q0:q1], dme0[p0:p1,q0:q1])*2\n\n h1ao = hcore_deriv(ia, ja)\n de2[i0,j0] += numpy.einsum('xypq,pq->xy', h1ao, dm0)\n\n for j0 in range(i0):\n de2[j0,i0] = de2[i0,j0].T\n\n log.timer('RKS partial hessian', *time0)\n return de2\n\ndef make_h1(hessobj, mo_coeff, mo_occ, chkfile=None, atmlst=None, verbose=None):\n mol = hessobj.mol\n if atmlst is None:\n atmlst = range(mol.natm)\n\n nao, nmo = mo_coeff.shape\n mocc = mo_coeff[:,mo_occ>0]\n dm0 = numpy.dot(mocc, mocc.T) * 2\n hcore_deriv = hessobj.base.nuc_grad_method().hcore_generator(mol)\n\n mf = hessobj.base\n ni = mf._numint\n ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True)\n omega, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin)\n\n mem_now = lib.current_memory()[0]\n max_memory = max(2000, mf.max_memory*.9-mem_now)\n h1ao = _get_vxc_deriv1(hessobj, mo_coeff, mo_occ, max_memory)\n aoslices = mol.aoslice_by_atom()\n for i0, ia in enumerate(atmlst):\n shl0, shl1, p0, p1 = aoslices[ia]\n shls_slice = (shl0, shl1) + (0, mol.nbas)*3\n if abs(hyb) > 1e-10:\n vj1, vj2, vk1, vk2 = \\\n rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',\n ['ji->s2kl', -dm0[:,p0:p1], # vj1\n 'lk->s1ij', -dm0 , # vj2\n 'li->s1kj', -dm0[:,p0:p1], # vk1\n 'jk->s1il', -dm0 ], # vk2\n shls_slice=shls_slice)\n veff = vj1 - hyb * .5 * vk1\n veff[:,p0:p1] += vj2 - hyb * .5 * vk2\n if abs(omega) > 1e-10:\n with mol.with_range_coulomb(omega):\n vk1, vk2 = \\\n rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',\n ['li->s1kj', -dm0[:,p0:p1], # vk1\n 'jk->s1il', -dm0 ], # vk2\n shls_slice=shls_slice)\n veff -= (alpha-hyb) * .5 * vk1\n veff[:,p0:p1] -= (alpha-hyb) * .5 * vk2\n else:\n vj1, vj2 = rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl',\n ['ji->s2kl', -dm0[:,p0:p1], # vj1\n 'lk->s1ij', -dm0 ], # vj2\n shls_slice=shls_slice)\n veff = vj1\n veff[:,p0:p1] += vj2\n\n h1ao[ia] += veff + veff.transpose(0,2,1)\n h1ao[ia] += hcore_deriv(ia)\n\n if chkfile is None:\n return h1ao\n else:\n for ia in atmlst:\n lib.chkfile.save(chkfile, 'scf_f1ao/%d'%ia, h1ao[ia])\n return chkfile\n\nXX, XY, XZ = 4, 5, 6\nYX, YY, YZ = 5, 7, 8\nZX, ZY, ZZ = 6, 8, 9\nXXX, XXY, XXZ, XYY, XYZ, XZZ = 10, 11, 12, 13, 14, 15\nYYY, YYZ, YZZ, ZZZ = 16, 17, 18, 19\n\ndef _get_vxc_diag(hessobj, mo_coeff, mo_occ, max_memory):\n mol = hessobj.mol\n mf = hessobj.base\n if hessobj.grids is not None:\n grids = hessobj.grids\n else:\n grids = mf.grids\n if grids.coords is None:\n grids.build(with_non0tab=True)\n\n nao, nmo = mo_coeff.shape\n ni = mf._numint\n xctype = ni._xc_type(mf.xc)\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_nr()\n\n vmat = numpy.zeros((6,nao,nao))\n if xctype == 'LDA':\n ao_deriv = 2\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')\n vxc = ni.eval_xc(mf.xc, rho, 0, deriv=1)[1]\n vrho = vxc[0]\n aow = numpy.einsum('pi,p->pi', ao[0], weight*vrho)\n for i in range(6):\n vmat[i] += numint._dot_ao_ao(mol, ao[i+4], aow, mask, shls_slice, ao_loc)\n aow = None\n\n elif xctype == 'GGA':\n def contract_(mat, ao, aoidx, wv, mask):\n aow = numpy.einsum('pi,p->pi', ao[aoidx[0]], wv[1])\n aow+= numpy.einsum('pi,p->pi', ao[aoidx[1]], wv[2])\n aow+= numpy.einsum('pi,p->pi', ao[aoidx[2]], wv[3])\n mat += numint._dot_ao_ao(mol, aow, ao[0], mask, shls_slice, ao_loc)\n\n ao_deriv = 3\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')\n vxc = ni.eval_xc(mf.xc, rho, 0, deriv=1)[1]\n\n wv = numint._rks_gga_wv0(rho, vxc, weight)\n # *2 because v.T is not applied. Only v is computed in the next _dot_ao_ao \n wv[0] *= 2\n aow = numpy.einsum('npi,np->pi', ao[:4], wv)\n for i in range(6):\n vmat[i] += numint._dot_ao_ao(mol, ao[i+4], aow, mask, shls_slice, ao_loc)\n\n contract_(vmat[0], ao, [XXX,XXY,XXZ], wv, mask)\n contract_(vmat[1], ao, [XXY,XYY,XYZ], wv, mask)\n contract_(vmat[2], ao, [XXZ,XYZ,XZZ], wv, mask)\n contract_(vmat[3], ao, [XYY,YYY,YYZ], wv, mask)\n contract_(vmat[4], ao, [XYZ,YYZ,YZZ], wv, mask)\n contract_(vmat[5], ao, [XZZ,YZZ,ZZZ], wv, mask)\n rho = vxc = wv = aow = None\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n vmat = vmat[[0,1,2,\n 1,3,4,\n 2,4,5]]\n return vmat.reshape(3,3,nao,nao)\n\ndef _make_dR_rho1(ao, ao_dm0, atm_id, aoslices):\n p0, p1 = aoslices[atm_id][2:]\n ngrids = ao[0].shape[0]\n rho1 = numpy.zeros((3,4,ngrids))\n ao_dm0_0 = ao_dm0[0][:,p0:p1]\n # (d_X \\nabla_x mu) nu DM_{mu,nu}\n rho1[:,0] = numpy.einsum('xpi,pi->xp', ao[1:4,:,p0:p1], ao_dm0_0)\n rho1[0,1]+= numpy.einsum('pi,pi->p', ao[XX,:,p0:p1], ao_dm0_0)\n rho1[0,2]+= numpy.einsum('pi,pi->p', ao[XY,:,p0:p1], ao_dm0_0)\n rho1[0,3]+= numpy.einsum('pi,pi->p', ao[XZ,:,p0:p1], ao_dm0_0)\n rho1[1,1]+= numpy.einsum('pi,pi->p', ao[YX,:,p0:p1], ao_dm0_0)\n rho1[1,2]+= numpy.einsum('pi,pi->p', ao[YY,:,p0:p1], ao_dm0_0)\n rho1[1,3]+= numpy.einsum('pi,pi->p', ao[YZ,:,p0:p1], ao_dm0_0)\n rho1[2,1]+= numpy.einsum('pi,pi->p', ao[ZX,:,p0:p1], ao_dm0_0)\n rho1[2,2]+= numpy.einsum('pi,pi->p', ao[ZY,:,p0:p1], ao_dm0_0)\n rho1[2,3]+= numpy.einsum('pi,pi->p', ao[ZZ,:,p0:p1], ao_dm0_0)\n # (d_X mu) (\\nabla_x nu) DM_{mu,nu}\n rho1[:,1] += numpy.einsum('xpi,pi->xp', ao[1:4,:,p0:p1], ao_dm0[1][:,p0:p1])\n rho1[:,2] += numpy.einsum('xpi,pi->xp', ao[1:4,:,p0:p1], ao_dm0[2][:,p0:p1])\n rho1[:,3] += numpy.einsum('xpi,pi->xp', ao[1:4,:,p0:p1], ao_dm0[3][:,p0:p1])\n # *2 for |mu> DM <d_X nu|\n return rho1 * 2\n\ndef _d1d2_dot_(vmat, mol, ao1, ao2, mask, ao_loc, dR1_on_bra=True):\n shls_slice = (0, mol.nbas)\n if dR1_on_bra: # (d/dR1 bra) * (d/dR2 ket)\n for d1 in range(3):\n for d2 in range(3):\n vmat[d1,d2] += numint._dot_ao_ao(mol, ao1[d1], ao2[d2], mask,\n shls_slice, ao_loc)\n else: # (d/dR2 bra) * (d/dR1 ket)\n for d1 in range(3):\n for d2 in range(3):\n vmat[d1,d2] += numint._dot_ao_ao(mol, ao1[d2], ao2[d1], mask,\n shls_slice, ao_loc)\n\ndef _get_vxc_deriv2(hessobj, mo_coeff, mo_occ, max_memory):\n mol = hessobj.mol\n mf = hessobj.base\n if hessobj.grids is not None:\n grids = hessobj.grids\n else:\n grids = mf.grids\n if grids.coords is None:\n grids.build(with_non0tab=True)\n\n nao, nmo = mo_coeff.shape\n ni = mf._numint\n xctype = ni._xc_type(mf.xc)\n aoslices = mol.aoslice_by_atom()\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_nr()\n dm0 = mf.make_rdm1(mo_coeff, mo_occ)\n\n vmat = numpy.zeros((mol.natm,3,3,nao,nao))\n ipip = numpy.zeros((3,3,nao,nao))\n if xctype == 'LDA':\n ao_deriv = 1\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')\n vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]\n vrho = vxc[0]\n frr = fxc[0]\n aow = numpy.einsum('xpi,p->xpi', ao[1:4], weight*vrho)\n _d1d2_dot_(ipip, mol, aow, ao[1:4], mask, ao_loc, False)\n\n ao_dm0 = numint._dot_ao_dm(mol, ao[0], dm0, mask, shls_slice, ao_loc)\n for ia in range(mol.natm):\n p0, p1 = aoslices[ia][2:]\n # *2 for \\nabla|ket> in rho1\n rho1 = numpy.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0[:,p0:p1]) * 2\n # aow ~ rho1 ~ d/dR1\n aow = numpy.einsum('pi,xp->xpi', ao[0], weight*frr*rho1)\n _d1d2_dot_(vmat[ia], mol, ao[1:4], aow, mask, ao_loc, False)\n ao_dm0 = aow = None\n\n for ia in range(mol.natm):\n p0, p1 = aoslices[ia][2:]\n vmat[ia,:,:,:,p0:p1] += ipip[:,:,:,p0:p1]\n\n elif xctype == 'GGA':\n ao_deriv = 2\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')\n vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]\n\n wv = numint._rks_gga_wv0(rho, vxc, weight)\n aow = rks_grad._make_dR_dao_w(ao, wv)\n _d1d2_dot_(ipip, mol, aow, ao[1:4], mask, ao_loc, False)\n\n ao_dm0 = [numint._dot_ao_dm(mol, ao[i], dm0, mask, shls_slice, ao_loc)\n for i in range(4)]\n for ia in range(mol.natm):\n wv = dR_rho1 = _make_dR_rho1(ao, ao_dm0, ia, aoslices)\n wv[0] = numint._rks_gga_wv1(rho, dR_rho1[0], vxc, fxc, weight)\n wv[1] = numint._rks_gga_wv1(rho, dR_rho1[1], vxc, fxc, weight)\n wv[2] = numint._rks_gga_wv1(rho, dR_rho1[2], vxc, fxc, weight)\n\n aow = rks_grad._make_dR_dao_w(ao, wv[0])\n rks_grad._d1_dot_(vmat[ia,0], mol, aow, ao[0], mask, ao_loc, True)\n aow = rks_grad._make_dR_dao_w(ao, wv[1])\n rks_grad._d1_dot_(vmat[ia,1], mol, aow, ao[0], mask, ao_loc, True)\n aow = rks_grad._make_dR_dao_w(ao, wv[2])\n rks_grad._d1_dot_(vmat[ia,2], mol, aow, ao[0], mask, ao_loc, True)\n\n aow = numpy.einsum('npi,Xnp->Xpi', ao[:4], wv)\n _d1d2_dot_(vmat[ia], mol, ao[1:4], aow, mask, ao_loc, False)\n ao_dm0 = aow = None\n\n for ia in range(mol.natm):\n p0, p1 = aoslices[ia][2:]\n vmat[ia,:,:,:,p0:p1] += ipip[:,:,:,p0:p1]\n vmat[ia,:,:,:,p0:p1] += ipip[:,:,p0:p1].transpose(1,0,3,2)\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n return vmat\n\ndef _get_vxc_deriv1(hessobj, mo_coeff, mo_occ, max_memory):\n mol = hessobj.mol\n mf = hessobj.base\n if hessobj.grids is not None:\n grids = hessobj.grids\n else:\n grids = mf.grids\n if grids.coords is None:\n grids.build(with_non0tab=True)\n\n nao, nmo = mo_coeff.shape\n ni = mf._numint\n xctype = ni._xc_type(mf.xc)\n aoslices = mol.aoslice_by_atom()\n shls_slice = (0, mol.nbas)\n ao_loc = mol.ao_loc_nr()\n dm0 = mf.make_rdm1(mo_coeff, mo_occ)\n\n vmat = numpy.zeros((mol.natm,3,nao,nao))\n max_memory = max(2000, max_memory-vmat.size*8/1e6)\n if xctype == 'LDA':\n ao_deriv = 1\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA')\n vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]\n vrho = vxc[0]\n frr = fxc[0]\n ao_dm0 = numint._dot_ao_dm(mol, ao[0], dm0, mask, shls_slice, ao_loc)\n aow1 = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho)\n for ia in range(mol.natm):\n p0, p1 = aoslices[ia][2:]\n# First order density = rho1 * 2. *2 is not applied because + c.c. in the end\n rho1 = numpy.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0[:,p0:p1])\n aow = numpy.einsum('pi,xp->xpi', ao[0], weight*frr*rho1)\n aow[:,:,p0:p1] += aow1[:,:,p0:p1]\n rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True)\n ao_dm0 = aow = aow1 = None\n\n for ia in range(mol.natm):\n vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1)\n\n elif xctype == 'GGA':\n ao_deriv = 2\n v_ip = numpy.zeros((3,nao,nao))\n for ao, mask, weight, coords \\\n in ni.block_loop(mol, grids, nao, ao_deriv, max_memory):\n rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA')\n vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3]\n\n wv = numint._rks_gga_wv0(rho, vxc, weight)\n rks_grad._gga_grad_sum_(v_ip, mol, ao, wv, mask, ao_loc)\n\n ao_dm0 = [numint._dot_ao_dm(mol, ao[i], dm0, mask, shls_slice, ao_loc)\n for i in range(4)]\n for ia in range(mol.natm):\n wv = dR_rho1 = _make_dR_rho1(ao, ao_dm0, ia, aoslices)\n wv[0] = numint._rks_gga_wv1(rho, dR_rho1[0], vxc, fxc, weight)\n wv[1] = numint._rks_gga_wv1(rho, dR_rho1[1], vxc, fxc, weight)\n wv[2] = numint._rks_gga_wv1(rho, dR_rho1[2], vxc, fxc, weight)\n aow = numpy.einsum('npi,Xnp->Xpi', ao[:4], wv)\n rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True)\n ao_dm0 = aow = None\n\n for ia in range(mol.natm):\n p0, p1 = aoslices[ia][2:]\n vmat[ia,:,p0:p1] += v_ip[:,p0:p1]\n vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1)\n\n elif xctype == 'MGGA':\n raise NotImplementedError('meta-GGA')\n\n return vmat\n\n\nclass Hessian(rhf_hess.Hessian):\n '''Non-relativistic RKS hessian'''\n def __init__(self, mf):\n rhf_hess.Hessian.__init__(self, mf)\n self.grids = None\n self._keys = self._keys.union(['grids'])\n\n partial_hess_elec = partial_hess_elec\n make_h1 = make_h1\n\nfrom pyscf import dft\ndft.rks.RKS.Hessian = dft.rks_symm.RKS.Hessian = lib.class_as_method(Hessian)\n\nif __name__ == '__main__':\n from pyscf import gto\n from pyscf import dft\n #dft.numint.NumInt.libxc = dft.xcfun\n #xc_code = 'lda,vwn'\n xc_code = 'wb97x'\n #xc_code = 'b3lyp'\n\n mol = gto.Mole()\n mol.verbose = 0\n mol.output = None\n mol.atom = [\n [1 , (1. , 0. , 0.000)],\n [1 , (0. , 1. , 0.000)],\n [1 , (0. , -1.517 , 1.177)],\n [1 , (0. , 1.517 , 1.177)],\n ]\n mol.basis = '631g'\n mol.unit = 'B'\n mol.build()\n mf = dft.RKS(mol)\n mf.grids.level = 4\n mf.grids.prune = False\n mf.xc = xc_code\n mf.conv_tol = 1e-14\n mf.kernel()\n n3 = mol.natm * 3\n hobj = mf.Hessian()\n e2 = hobj.kernel().transpose(0,2,1,3).reshape(n3,n3)\n print(lib.finger(e2) - -0.42286447944621297)\n print(lib.finger(e2) - -0.45453541215680582)\n print(lib.finger(e2) - -0.41385249055285972)\n\n def grad_full(ia, inc):\n coord = mol.atom_coord(ia).copy()\n ptr = mol._atm[ia,gto.PTR_COORD]\n de = []\n for i in range(3):\n mol._env[ptr+i] = coord[i] + inc\n mf = dft.RKS(mol).set(conv_tol=1e-14, xc=xc_code).run()\n e1a = mf.nuc_grad_method().set(grid_response=True).kernel()\n mol._env[ptr+i] = coord[i] - inc\n mf = dft.RKS(mol).set(conv_tol=1e-14, xc=xc_code).run()\n e1b = mf.nuc_grad_method().set(grid_response=True).kernel()\n mol._env[ptr+i] = coord[i]\n de.append((e1a-e1b)/(2*inc))\n return de\n e2ref = [grad_full(ia, .5e-4) for ia in range(mol.natm)]\n e2ref = numpy.asarray(e2ref).reshape(n3,n3)\n print(numpy.linalg.norm(e2-e2ref))\n print(abs(e2-e2ref).max())\n print(numpy.allclose(e2,e2ref,atol=1e-4))\n\n# \\partial^2 E / \\partial R \\partial R'\n e2 = hobj.partial_hess_elec(mf.mo_energy, mf.mo_coeff, mf.mo_occ)\n e2 += hobj.hess_nuc(mol)\n e2 = e2.transpose(0,2,1,3).reshape(n3,n3)\n def grad_partial_R(ia, inc):\n coord = mol.atom_coord(ia).copy()\n ptr = mol._atm[ia,gto.PTR_COORD]\n de = []\n for i in range(3):\n mol._env[ptr+i] = coord[i] + inc\n e1a = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i] - inc\n e1b = mf.nuc_grad_method().kernel()\n mol._env[ptr+i] = coord[i]\n de.append((e1a-e1b)/(2*inc))\n return de\n e2ref = [grad_partial_R(ia, .5e-4) for ia in range(mol.natm)]\n e2ref = numpy.asarray(e2ref).reshape(n3,n3)\n print(numpy.linalg.norm(e2-e2ref))\n print(abs(e2-e2ref).max())\n print(numpy.allclose(e2,e2ref,atol=1e-8))\n"
] | [
[
"numpy.dot",
"numpy.allclose",
"numpy.einsum",
"numpy.asarray",
"numpy.linalg.norm",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShanghuoLi/Adam-Ginsburg-pyspeckit | [
"841e8f1d742f2ee6ff0fac5dc097e598ba62d74a"
] | [
"pyspeckit/spectrum/plotters.py"
] | [
"\"\"\"\n=======\nPlotter\n=======\n\n.. moduleauthor:: Adam Ginsburg <[email protected]>\n\"\"\"\nfrom __future__ import print_function\nimport matplotlib\nimport matplotlib.figure\nimport numpy as np\nimport astropy.units as u\nimport copy\nimport inspect\nfrom astropy import log\n\ntry:\n from matplotlib.cbook import BoundMethodProxy\nexcept ImportError:\n from matplotlib.cbook import _BoundMethodProxy as BoundMethodProxy\n\nfrom . import widgets\nfrom ..specwarnings import warn\n\ninteractive_help_message = \"\"\"\nInteractive key commands for plotter. An additional help message may appear if\nyou have initiated the fitter.\n'?' - bring up this message\n'f' - initiate the /f/itter\n'b' - initiate the /b/aseliner\n'B' - initiate the /b/aseliner (reset the selection too)\n'r' - re-attach matplotlib keys\n'R' - redraw the plot cleanly\n'i' : individual components / show each fitted component\n\"\"\"\n\nxlabel_table = {'speed': 'Velocity'}\n\nclass Plotter(object):\n \"\"\"\n Class to plot a spectrum\n \"\"\"\n\n\n def __init__(self, Spectrum, autorefresh=True, title=\"\", xlabel=None,\n silent=True, plotscale=1.0, **kwargs):\n\n import matplotlib.pyplot\n self._pyplot = matplotlib.pyplot\n\n self.figure = None\n self.axis = None\n self.Spectrum = Spectrum\n # plot parameters\n self.offset = 0.0 # vertical offset\n self.autorefresh = autorefresh\n self.xlabel = xlabel\n self.title = title\n self.errorplot = None\n self.plotkwargs = kwargs\n self._xlim = [None,None]\n self._ylim = [None,None]\n self.debug = False\n\n self.keyclick = None\n self.silent = silent\n self.plotscale = plotscale\n\n self._xclick1 = None\n self._xclick2 = None\n\n self.automake_fitter_tool = False\n\n self._active_gui = None\n\n @property\n def _xunit(self):\n return self.Spectrum.xarr.unit\n\n def _get_prop(xy, minmax):\n def getprop(self):\n if xy == 'x':\n if minmax == 'min':\n if self._xlim[0] is not None and self._xunit:\n try:\n self._xlim[0]._unit = self._xunit\n except AttributeError:\n self._xlim[0] = u.Quantity(self._xlim[0], self._xunit)\n return self._xlim[0]\n elif minmax == 'max':\n if self._xlim[1] is not None and self._xunit:\n try:\n self._xlim[1]._unit = self._xunit\n except AttributeError:\n self._xlim[1] = u.Quantity(self._xlim[1], self._xunit)\n return self._xlim[1]\n elif xy == 'y':\n if minmax == 'min':\n return self._ylim[0]\n elif minmax == 'max':\n return self._ylim[1]\n return getprop\n\n def _set_prop(xy, minmax):\n def setprop(self, value):\n if self.debug:\n frm = inspect.stack()\n print(frm[1],\"Setting %s%s to %s\" % (xy,minmax,value))\n if xy == 'x':\n if minmax == 'min':\n self._xlim[0] = value\n elif minmax == 'max':\n self._xlim[1] = value\n elif xy == 'y':\n if minmax == 'min':\n self._ylim[0] = value\n elif minmax == 'max':\n self._ylim[1] = value\n return setprop\n\n xmin = property(fget=_get_prop('x','min'),fset=_set_prop('x','min'))\n xmax = property(fget=_get_prop('x','max'),fset=_set_prop('x','max'))\n ymin = property(fget=_get_prop('y','min'),fset=_set_prop('y','min'))\n ymax = property(fget=_get_prop('y','max'),fset=_set_prop('y','max'))\n\n\n def _disconnect_matplotlib_keys(self):\n \"\"\"\n Disconnected the matplotlib key-press callbacks\n \"\"\"\n if self.figure is not None:\n cbs = self.figure.canvas.callbacks.callbacks\n # this may cause problems since the dict of key press events is a\n # dict, i.e. not ordered, and we want to pop the first one...\n mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id\n try:\n self._mpl_key_callbacks = {mpl_keypress_handler:\n cbs['key_press_event'].pop(mpl_keypress_handler)}\n except KeyError:\n bmp = BoundMethodProxy(self.figure.canvas.manager.key_press)\n self._mpl_key_callbacks = {mpl_keypress_handler:\n bmp}\n\n def _reconnect_matplotlib_keys(self):\n \"\"\"\n Reconnect the previously disconnected matplotlib keys\n \"\"\"\n if self.figure is not None and hasattr(self,'_mpl_key_callbacks'):\n self.figure.canvas.callbacks.callbacks['key_press_event'].update(self._mpl_key_callbacks)\n elif self.figure is not None:\n mpl_keypress_handler = self.figure.canvas.manager.key_press_handler_id\n bmp = BoundMethodProxy(self.figure.canvas.manager.key_press)\n self.figure.canvas.callbacks.callbacks['key_press_event'].update({mpl_keypress_handler:\n bmp})\n\n def __call__(self, figure=None, axis=None, clear=True, autorefresh=None,\n plotscale=1.0, override_plotkwargs=False, **kwargs):\n \"\"\"\n Plot a spectrum\n\n Keywords:\n figure - either a matplotlib figure instance or a figure number\n to pass into pyplot.figure.\n axis - Alternative to figure, can pass an axis instance and use\n it as the plotting canvas\n clear - Clear the axis before plotting?\n \"\"\"\n\n # figure out where to put the plot\n if isinstance(figure,matplotlib.figure.Figure):\n self.figure = figure\n self.axis = self.figure.gca()\n elif type(figure) is int:\n self.figure = self._pyplot.figure(figure)\n self.axis = self.figure.gca()\n elif self.figure is None:\n if isinstance(axis,matplotlib.axes.Axes):\n self.axis = axis\n self.figure = axis.figure\n else:\n self.figure = self._pyplot.figure()\n\n if hasattr(self.figure, 'number') and not self._pyplot.fignum_exists(self.figure.number):\n self.figure = self._pyplot.figure(self.figure.number)\n\n # always re-connect the interactive keys to avoid frustration...\n self._mpl_reconnect()\n\n if axis is not None:\n #self._mpl_disconnect()\n self.axis = axis\n self.figure = axis.figure\n #self._mpl_connect()\n elif len(self.figure.axes) > 0 and self.axis is None:\n self.axis = self.figure.axes[0] # default to first axis\n elif self.axis is None:\n self.axis = self.figure.gca()\n\n # A check to deal with issue #117: if you close the figure, the axis\n # still exists, but it cannot be reattached to a figure\n if (hasattr(self.axis.get_figure(), 'number') and\n not (self.axis.get_figure() is self._pyplot.figure(self.axis.get_figure().number))):\n self.axis = self.figure.gca()\n\n if self.axis is not None and self.axis not in self.figure.axes:\n # if you've cleared the axis, but the figure is still open, you\n # need a new axis\n self.figure.add_axes(self.axis)\n\n\n if clear and self.axis is not None:\n self.axis.clear()\n # Need to empty the stored model plots\n if hasattr(self.Spectrum, 'fitter'):\n self.Spectrum.fitter.clear()\n\n if autorefresh is not None:\n self.autorefresh = autorefresh\n\n self.plotscale = plotscale\n\n if self.plotkwargs and not override_plotkwargs:\n self.plotkwargs.update(kwargs)\n else:\n self.plotkwargs = kwargs\n\n self.plot(**kwargs)\n\n def _mpl_connect(self):\n if self.keyclick is None:\n self.keyclick = self.figure.canvas.mpl_connect('key_press_event',self.parse_keys)\n\n def _mpl_disconnect(self):\n self.figure.canvas.mpl_disconnect(self.keyclick)\n self.keyclick = None\n\n def disconnect(self):\n \"\"\"\n Disconnect the matplotlib interactivity of this pyspeckit plotter.\n \"\"\"\n self._mpl_disconnect()\n\n def connect(self):\n \"\"\"\n Connect to the matplotlib key-parsing interactivity\n \"\"\"\n self._mpl_connect()\n\n def _mpl_reconnect(self):\n self._mpl_disconnect()\n self._mpl_connect()\n # disable fullscreen & grid\n self._pyplot.rcParams['keymap.fullscreen'] = 'ctrl+f'\n self._pyplot.rcParams['keymap.grid'] = 'ctrl+g'\n\n def plot(self, offset=0.0, xoffset=0.0, color='k', linestyle='steps-mid',\n linewidth=0.5, errstyle=None, erralpha=0.2, errcolor=None,\n silent=None, reset=True, refresh=True, use_window_limits=None,\n useOffset=False, **kwargs):\n \"\"\"\n Plot the spectrum!\n\n Tries to automatically find a reasonable plotting range if one is not\n set.\n\n Parameters\n ----------\n offset : float\n vertical offset to add to the spectrum before plotting. Useful if\n you want to overlay multiple spectra on a single plot\n xoffset: float\n An x-axis shift. I don't know why you'd want this...\n color : str\n default to plotting spectrum in black\n linestyle : 'steps-mid' or str\n 'steps-mid' for histogram-style plotting. See matplotlib's plot\n for more information\n linewidth : float\n Line width in pixels. Narrow lines are helpful when histo-plotting\n errstyle : 'fill', 'bars', or None\n can be \"fill\", which draws partially transparent boxes around the\n data to show the error region, or \"bars\" which draws standard\n errorbars. ``None`` will display no errorbars\n useOffset : bool\n Use offset-style X/Y coordinates (e.g., 1 + 1.483e10)? Defaults to\n False because these are usually quite annoying.\n xmin/xmax/ymin/ymax : float\n override defaults for plot range. Once set, these parameters are\n sticky (i.e., replotting will use the same ranges). Passed to\n `reset_limits`\n reset_[xy]limits : bool\n Reset the limits to \"sensible defaults\". Passed to `reset_limits`\n ypeakscale : float\n Scale up the Y maximum value. Useful to keep the annotations away\n from the data. Passed to `reset_limits`\n reset : bool\n Reset the x/y axis limits? If set, `reset_limits` will be called.\n \"\"\"\n\n if self.axis is None:\n raise Exception(\"You must call the Plotter class to initiate the canvas before plotting.\")\n\n self.offset = offset\n\n # there is a bug where this only seems to update the second time it is called\n self.label(**kwargs)\n self.label(**kwargs)\n for arg in ['title','xlabel','ylabel']:\n if arg in kwargs:\n kwargs.pop(arg)\n\n reset_kwargs = {}\n for arg in ['xmin', 'xmax', 'ymin', 'ymax', 'reset_xlimits',\n 'reset_ylimits', 'ypeakscale']:\n if arg in kwargs:\n reset_kwargs[arg] = kwargs.pop(arg)\n\n if (use_window_limits is None and any(k in reset_kwargs for k in\n ('xmin','xmax','reset_xlimits'))):\n use_window_limits = False\n\n if use_window_limits:\n self._stash_window_limits()\n\n # for filled errorbars, order matters.\n inds = np.argsort(self.Spectrum.xarr)\n\n if errstyle is not None:\n if errcolor is None:\n errcolor = color\n if errstyle == 'fill':\n self.errorplot = [self.axis.fill_between(steppify(self.Spectrum.xarr.value[inds]+xoffset, isX=True),\n steppify((self.Spectrum.data*self.plotscale+self.offset-self.Spectrum.error*self.plotscale)[inds]),\n steppify((self.Spectrum.data*self.plotscale+self.offset+self.Spectrum.error*self.plotscale)[inds]),\n facecolor=errcolor, edgecolor=errcolor, alpha=erralpha, **kwargs)]\n elif errstyle == 'bars':\n self.errorplot = self.axis.errorbar(self.Spectrum.xarr[inds].value+xoffset,\n self.Spectrum.data[inds]*self.plotscale+self.offset,\n yerr=self.Spectrum.error[inds]*self.plotscale,\n ecolor=errcolor, fmt='none',\n **kwargs)\n\n self._spectrumplot = self.axis.plot(self.Spectrum.xarr.value[inds]+xoffset,\n self.Spectrum.data[inds]*self.plotscale+self.offset,\n color=color,\n linestyle=linestyle,\n linewidth=linewidth, **kwargs)\n\n self.axis.ticklabel_format(useOffset=useOffset)\n\n if use_window_limits:\n self._reset_to_stashed_limits()\n\n if silent is not None:\n self.silent = silent\n\n if reset:\n self.reset_limits(use_window_limits=use_window_limits, **reset_kwargs)\n\n if self.autorefresh and refresh:\n self.refresh()\n\n # Maybe it's OK to call 'plot' when there is an active gui tool\n # (e.g., baseline or specfit)?\n #if self._active_gui:\n # self._active_gui = None\n # warn(\"An active GUI was found while initializing the \"\n # \"plot. This is somewhat dangerous and may result \"\n # \"in broken interactivity.\")\n\n\n def _stash_window_limits(self):\n self._window_limits = self.axis.get_xlim(),self.axis.get_ylim()\n if self.debug:\n print(\"Stashed window limits: \",self._window_limits)\n\n def _reset_to_stashed_limits(self):\n self.axis.set_xlim(*self._window_limits[0])\n self.axis.set_ylim(*self._window_limits[1])\n self.xmin,self.xmax = self._window_limits[0]\n self.ymin,self.ymax = self._window_limits[1]\n if self.debug:\n print(\"Recovered window limits: \",self._window_limits)\n\n def reset_limits(self, xmin=None, xmax=None, ymin=None, ymax=None,\n reset_xlimits=True, reset_ylimits=True, ypeakscale=1.2,\n silent=None, use_window_limits=False, **kwargs):\n \"\"\"\n Automatically or manually reset the plot limits\n \"\"\"\n # if not use_window_limits: use_window_limits = False\n if self.debug:\n frame = inspect.currentframe()\n args, _, _, values = inspect.getargvalues(frame)\n print(zip(args,values))\n\n if use_window_limits:\n # this means DO NOT reset!\n # it simply sets self.[xy][min/max] = current value\n self.set_limits_from_visible_window()\n else:\n if silent is not None:\n self.silent = silent\n\n # if self.xmin and self.xmax:\n if (reset_xlimits or self.Spectrum.xarr.min().value < self.xmin or self.Spectrum.xarr.max().value > self.xmax):\n if not self.silent:\n warn(\"Resetting X-axis min/max because the plot is out of bounds.\")\n self.xmin = None\n self.xmax = None\n if xmin is not None:\n self.xmin = u.Quantity(xmin, self._xunit)\n elif self.xmin is None:\n self.xmin = u.Quantity(self.Spectrum.xarr.min().value, self._xunit)\n if xmax is not None:\n self.xmax = u.Quantity(xmax, self._xunit)\n elif self.xmax is None:\n self.xmax = u.Quantity(self.Spectrum.xarr.max().value, self._xunit)\n\n xpixmin = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmin.value))\n xpixmax = np.argmin(np.abs(self.Spectrum.xarr.value-self.xmax.value))\n if xpixmin>xpixmax:\n xpixmin,xpixmax = xpixmax,xpixmin\n elif xpixmin == xpixmax:\n if reset_xlimits:\n raise Exception(\"Infinite recursion error. Maybe there are no valid data?\")\n if not self.silent:\n warn(\"ERROR: the X axis limits specified were invalid. Resetting.\")\n self.reset_limits(reset_xlimits=True, ymin=ymin, ymax=ymax,\n reset_ylimits=reset_ylimits,\n ypeakscale=ypeakscale, **kwargs)\n return\n\n if self.ymin is not None and self.ymax is not None:\n # this is utter nonsense....\n if (np.nanmax(self.Spectrum.data) < self.ymin or np.nanmin(self.Spectrum.data) > self.ymax\n or reset_ylimits):\n if not self.silent and not reset_ylimits:\n warn(\"Resetting Y-axis min/max because the plot is out of bounds.\")\n self.ymin = None\n self.ymax = None\n\n if ymin is not None:\n self.ymin = ymin\n elif self.ymin is None:\n yminval = np.nanmin(self.Spectrum.data[xpixmin:xpixmax])\n # Increase the range fractionally. This means dividing a positive #, multiplying a negative #\n if yminval < 0:\n self.ymin = float(yminval)*float(ypeakscale)\n else:\n self.ymin = float(yminval)/float(ypeakscale)\n\n if ymax is not None:\n self.ymax = ymax\n elif self.ymax is None:\n ymaxval = (np.nanmax(self.Spectrum.data[xpixmin:xpixmax])-self.ymin)\n if ymaxval > 0:\n self.ymax = float(ymaxval) * float(ypeakscale) + self.ymin\n else:\n self.ymax = float(ymaxval) / float(ypeakscale) + self.ymin\n\n self.ymin += self.offset\n self.ymax += self.offset\n\n self.axis.set_xlim(self.xmin.value if hasattr(self.xmin, 'value') else self.xmin,\n self.xmax.value if hasattr(self.xmax, 'value') else self.xmax)\n self.axis.set_ylim(self.ymin, self.ymax)\n\n\n def label(self, title=None, xlabel=None, ylabel=None, verbose_label=False,\n **kwargs):\n \"\"\"\n Label the plot, with an attempt to parse standard units into nice latex labels\n\n Parameters\n ----------\n title : str\n xlabel : str\n ylabel : str\n verbose_label: bool\n \"\"\"\n\n if title is not None:\n self.title = title\n elif hasattr(self.Spectrum,'specname'):\n self.title = self.Spectrum.specname\n if self.title is not \"\":\n self.axis.set_title(self.title)\n\n if xlabel is not None:\n log.debug(\"setting xlabel={0}\".format(xlabel))\n self.xlabel = xlabel\n elif self._xunit:\n try:\n self.xlabel = xlabel_table[self._xunit.physical_type.lower()]\n except KeyError:\n self.xlabel = self._xunit.physical_type.title()\n # WAS: self.xlabel += \" (\"+u.Unit(self._xunit).to_string()+\")\"\n self.xlabel += \" ({0})\".format(self._xunit.to_string())\n log.debug(\"xunit is {1}. set xlabel={0}\".format(self.xlabel,\n self._xunit))\n\n if verbose_label:\n self.xlabel = \"%s %s\" % (self.Spectrum.xarr.velocity_convention.title(),\n self.xlabel)\n else:\n log.warn(\"Plotter: xlabel was not set\")\n\n if self.xlabel is not None:\n self.axis.set_xlabel(self.xlabel)\n\n if ylabel is not None:\n self.axis.set_ylabel(ylabel)\n elif self.Spectrum.unit in ['Ta*','Tastar']:\n self.axis.set_ylabel(\"$T_A^*$ (K)\")\n elif self.Spectrum.unit in ['K']:\n self.axis.set_ylabel(\"Brightness Temperature $T$ (K)\")\n elif self.Spectrum.unit == 'mJy':\n self.axis.set_ylabel(\"$S_\\\\nu$ (mJy)\")\n elif self.Spectrum.unit == 'Jy':\n self.axis.set_ylabel(\"$S_\\\\nu$ (Jy)\")\n else:\n if isinstance(self.Spectrum.unit, str) and \"$\" in self.Spectrum.unit:\n # assume LaTeX already\n self.axis.set_ylabel(self.Spectrum.unit)\n elif isinstance(self.Spectrum.unit, str):\n self.axis.set_ylabel(self.Spectrum.unit)\n else:\n label_units = self.Spectrum.unit.to_string(format='latex')\n if 'mathring{A}' in label_units:\n label_units = label_units.replace('\\mathring{A}', 'A')\n if '\\overset' in label_units:\n label_units = label_units.replace('\\overset', '^')\n self.axis.set_ylabel(label_units)\n\n @property\n def ylabel(self):\n return self.axis.get_ylabel()\n\n def refresh(self):\n if self.axis is not None:\n self.axis.figure.canvas.draw()\n\n\n def savefig(self,fname,bbox_inches='tight',**kwargs):\n \"\"\"\n simple wrapper of maplotlib's savefig.\n \"\"\"\n self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)\n\n def parse_keys(self,event):\n \"\"\"\n Parse key commands entered from the keyboard\n \"\"\"\n if hasattr(event,'key'):\n if event.key == '?':\n print(interactive_help_message)\n elif event.key == 'f':\n print(\"\\n\\nFitter initiated from the interactive plotter.\")\n # extra optional text:\n # Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'\"\n if self._active_gui == self.Spectrum.specfit and self._active_gui._check_connections(verbose=False):\n print(\"Fitter is already active. Use 'q' to quit the fitter.\")\n elif self._active_gui == self.Spectrum.specfit and not self._active_gui._check_connections(verbose=False):\n # forcibly clear connections\n self._active_gui.clear_all_connections()\n # the 'clear_all_connections' code *explicitly* makes the\n # following line correct, except in the case that there is\n # no canvas...\n assert self._active_gui is None\n self.activate_interactive_fitter()\n else:\n self.activate_interactive_fitter()\n\n assert self._active_gui == self.Spectrum.specfit\n assert self._active_gui._check_connections(verbose=False)\n\n if not hasattr(self,'FitterTool') and self.automake_fitter_tool:\n self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)\n elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums():\n self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)\n elif event.key is not None and event.key.lower() == 'b':\n if event.key == 'b':\n print(\"\\n\\nBaseline initiated from the interactive plotter\")\n elif event.key == 'B':\n print(\"\\n\\nBaseline initiated from the interactive plotter (with reset)\")\n print(\"Matplotlib shortcut keys ('g','l','p',etc.) are disabled. Re-enable with 'r'\")\n self.activate_interactive_baseline_fitter(reset_selection=(event.key=='B'))\n\n if not hasattr(self,'FitterTool') and self.automake_fitter_tool:\n self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)\n elif hasattr(self,'FitterTool') and self.FitterTool.toolfig.number not in self._pyplot.get_fignums():\n self.FitterTool = widgets.FitterTools(self.Spectrum.specfit, self.figure)\n elif event.key == 'r':\n # print(\"\\n\\nReconnected matplotlib shortcut keys.\")\n self._reconnect_matplotlib_keys()\n elif event.key == 'R':\n self()\n elif event.key == 'i':\n self.Spectrum.specfit.plot_fit(show_components=True)\n\n def get_two_clicks(self,event):\n\n if self._xclick1 is None:\n self._xclick1 = event.xdata\n elif self._xclick2 is None:\n self._xclick2 = event.xdata\n\n def set_limits_from_visible_window(self, debug=False):\n \"\"\" Hopefully self-descriptive: set the x and y limits from the\n currently visible window (use this if you use the pan/zoom tools or\n manually change the limits) \"\"\"\n if debug:\n print(\"Changing x limits from {},{} to {},{}\".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1]))\n print(\"Changing y limits from {},{} to {},{}\".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1]))\n self.xmin, self.xmax = self.axis.get_xlim()\n self.ymin, self.ymax = self.axis.get_ylim()\n if debug:\n print(\"New x limits {},{} == {},{}\".format(self.xmin,self.xmax,self.axis.get_xlim()[0],self.axis.get_xlim()[1]))\n print(\"New y limits {},{} == {},{}\".format(self.ymin,self.ymax,self.axis.get_ylim()[0],self.axis.get_ylim()[1]))\n\n def copy(self, parent=None):\n \"\"\"\n Create a copy of the plotter with blank (uninitialized) axis & figure\n\n [ parent ]\n A spectroscopic axis instance that is the parent of the specfit\n instance. This needs to be specified at some point, but defaults\n to None to prevent overwriting a previous plot.\n \"\"\"\n\n newplotter = copy.copy(self)\n newplotter.Spectrum = parent\n newplotter.axis = None\n newplotter.figure = None\n\n return newplotter\n\n def line_ids(self, line_names, line_xvals, xval_units=None, auto_yloc=True,\n velocity_offset=None, velocity_convention='radio',\n auto_yloc_fraction=0.9, **kwargs):\n \"\"\"\n Add line ID labels to a plot using lineid_plot\n http://oneau.wordpress.com/2011/10/01/line-id-plot/\n https://github.com/phn/lineid_plot\n http://packages.python.org/lineid_plot/\n\n Parameters\n ----------\n line_names : list\n A list of strings to label the specified x-axis values\n line_xvals : list\n List of x-axis values (e.g., wavelengths) at which to label the lines.\n Can be a list of quantities.\n xval_units : string\n The unit of the line_xvals if they are not given as quantities\n velocity_offset : quantity\n A velocity offset to apply to the inputs if they are in frequency\n or wavelength units\n velocity_convention : 'radio' or 'optical' or 'doppler'\n Used if the velocity offset is given\n auto_yloc : bool\n If set, overrides box_loc and arrow_tip (the vertical position of\n the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot\n range\n auto_yloc_fraction: float in range [0,1]\n The fraction of the plot (vertically) at which to place labels\n\n Examples\n --------\n >>> import numpy as np\n >>> import pyspeckit\n >>> sp = pyspeckit.Spectrum(\n xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101),\n unit='km/s', refX=6562.8, refX_unit='angstrom'),\n data=np.random.randn(101), error=np.ones(101))\n >>> sp.plotter()\n >>> sp.plotter.line_ids(['H$\\\\alpha$'],[6562.8],xval_units='angstrom')\n \"\"\"\n import lineid_plot\n\n if velocity_offset is not None:\n assert velocity_offset.unit.is_equivalent(u.km/u.s)\n\n doppler = getattr(u, 'doppler_{0}'.format(velocity_convention))\n if self.Spectrum.xarr.refX is not None:\n equivalency = doppler(self.Spectrum.xarr.refX)\n else:\n equivalency = doppler(self.Spectrum.xarr.as_unit(u.GHz)[0])\n\n xvals = []\n linenames_toplot = []\n for xv,ln in zip(line_xvals, line_names):\n if hasattr(xv, 'unit'):\n pass\n else:\n xv = u.Quantity(xv, xval_units)\n\n xv = xv.to(u.km/u.s,\n equivalencies=equivalency)\n if velocity_offset is not None:\n xv = xv + velocity_offset\n xv = xv.to(self.Spectrum.xarr.unit, equivalencies=equivalency)\n\n if self.Spectrum.xarr.in_range(xv):\n xvals.append(xv.value)\n linenames_toplot.append(ln)\n\n if len(xvals) != len(line_xvals):\n log.warn(\"Skipped {0} out-of-bounds lines when plotting line IDs.\"\n .format(len(line_xvals)-len(xvals)))\n\n if auto_yloc:\n yr = self.axis.get_ylim()\n kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0]\n kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0]\n\n lineid_plot.plot_line_ids(self.Spectrum.xarr,\n self.Spectrum.data,\n xvals,\n linenames_toplot,\n ax=self.axis,\n **kwargs)\n\n def line_ids_from_measurements(self, auto_yloc=True,\n auto_yloc_fraction=0.9, **kwargs):\n \"\"\"\n Add line ID labels to a plot using lineid_plot\n http://oneau.wordpress.com/2011/10/01/line-id-plot/\n https://github.com/phn/lineid_plot\n http://packages.python.org/lineid_plot/\n\n Parameters\n ----------\n auto_yloc : bool\n If set, overrides box_loc and arrow_tip (the vertical position of\n the lineid labels) in kwargs to be `auto_yloc_fraction` of the plot\n range\n auto_yloc_fraction: float in range [0,1]\n The fraction of the plot (vertically) at which to place labels\n\n Examples\n --------\n >>> import numpy as np\n >>> import pyspeckit\n >>> sp = pyspeckit.Spectrum(\n xarr=pyspeckit.units.SpectroscopicAxis(np.linspace(-50,50,101),\n units='km/s', refX=6562.8, refX_unit='angstroms'),\n data=np.random.randn(101), error=np.ones(101))\n >>> sp.plotter()\n >>> sp.specfit(multifit=None, fittype='gaussian', guesses=[1,0,1]) # fitting noise....\n >>> sp.measure()\n >>> sp.plotter.line_ids_from_measurements()\n \"\"\"\n import lineid_plot\n\n if hasattr(self.Spectrum,'measurements'):\n measurements = self.Spectrum.measurements\n\n if auto_yloc:\n yr = self.axis.get_ylim()\n kwargs['box_loc'] = (yr[1]-yr[0])*auto_yloc_fraction + yr[0]\n kwargs['arrow_tip'] = (yr[1]-yr[0])*(auto_yloc_fraction*0.9) + yr[0]\n\n lineid_plot.plot_line_ids(self.Spectrum.xarr, self.Spectrum.data,\n [v['pos'] for v in\n measurements.lines.values()],\n measurements.lines.keys(), ax=self.axis,\n **kwargs)\n else:\n warn(\"Cannot add line IDs from measurements unless measurements have been made!\")\n\n def activate_interactive_fitter(self):\n \"\"\"\n Attempt to activate the interactive fitter\n \"\"\"\n if self._active_gui is not None:\n # This should not be reachable. Clearing connections is the\n # \"right\" behavior if this becomes reachable, but I'd rather raise\n # an exception because I don't want to get here ever\n self._active_gui.clear_all_connections()\n raise ValueError(\"GUI was active when 'f' key pressed\")\n\n self._activate_interactive(self.Spectrum.specfit, interactive=True)\n\n def activate_interactive_baseline_fitter(self, **kwargs):\n \"\"\"\n Attempt to activate the interactive baseline fitter\n \"\"\"\n if self._active_gui is not None:\n # This should not be reachable. Clearing connections is the\n # \"right\" behavior if this becomes reachable, but I'd rather raise\n # an exception because I don't want to get here ever\n gui_was = self._active_gui\n self._active_gui.clear_all_connections()\n raise ValueError(\"GUI {0} was active when 'b' key pressed\"\n .format(gui_was))\n\n self._activate_interactive(self.Spectrum.baseline, interactive=True,\n **kwargs)\n\n def _activate_interactive(self, object_to_activate, **kwargs):\n self._disconnect_matplotlib_keys()\n\n self._active_gui = object_to_activate\n\n # activating the gui calls clear_all_connections, which disconnects the\n # gui\n try:\n self._active_gui(**kwargs)\n self._active_gui = object_to_activate\n assert self._active_gui is not None\n except Exception as ex:\n self._active_gui = None\n raise ex\n\ndef parse_units(labelstring):\n import re\n labelstring = re.sub(\"um\",\"$\\mu$m\",labelstring)\n labelstring = re.sub(\"-1\",\"$^{-1}$\",labelstring)\n labelstring = re.sub(\"-2\",\"$^{-2}$\",labelstring)\n labelstring = re.sub(\"-3\",\"$^{-3}$\",labelstring)\n labelstring = re.sub(\"ergss\",\"ergs s\",labelstring)\n return labelstring\n\ndef parse_norm(norm):\n \"\"\"\n Expected format: norm = 10E15\n \"\"\"\n\n try:\n base, exp = norm.split('E')\n except ValueError:\n base, exp = norm.split('e')\n\n if float(base) == 1.0:\n norm = '10'\n else:\n norm = base\n\n norm += '^{%s}' % exp\n\n return norm\n\ndef steppify(arr,isX=False):\n \"\"\"\n *support function*\n Converts an array to double-length for step plotting\n \"\"\"\n if isX:\n interval = abs(arr[1:]-arr[:-1]) / 2.0\n newarr = np.array(list(zip(arr[:-1]-interval,arr[:-1]+interval))).ravel()\n newarr = np.concatenate([newarr,2*[newarr[-1]+interval[-1]]])\n else:\n newarr = np.array(list(zip(arr,arr))).ravel()\n return newarr\n"
] | [
[
"numpy.nanmax",
"matplotlib.cbook._BoundMethodProxy",
"numpy.abs",
"numpy.nanmin",
"numpy.concatenate",
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
abdelq/pybaselines | [
"043aa7875efe1ca01c3e8e9ae7c57a67274aff06"
] | [
"tests/test_optimizers.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"Tests for pybaselines.optimizers.\n\n@author: Donald Erb\nCreated on March 20, 2021\n\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal\nimport pytest\n\nfrom pybaselines import optimizers\n\nfrom .conftest import get_data, AlgorithmTester\n\n\[email protected]('method', ('collab_pls', 'COLLAB_pls'))\ndef test_get_function(method):\n optimizers._get_function(method, [optimizers])\n\n\ndef test_get_function_fails_wrong_method():\n with pytest.raises(AttributeError):\n optimizers._get_function('unknown function', [optimizers])\n\n\ndef test_get_function_fails_no_module():\n with pytest.raises(AttributeError):\n optimizers._get_function('collab_pls', [])\n\n\nclass TestCollabPLS(AlgorithmTester):\n \"\"\"Class for testing collab_pls baseline.\"\"\"\n\n func = optimizers.collab_pls\n\n @staticmethod\n def _stack(data):\n return np.vstack((data, data))\n\n def test_unchanged_data(self, data_fixture):\n x, y = get_data()\n\n data_x, data_y = data_fixture\n stacked_data = (self._stack(data_x), self._stack(data_y))\n stacked_y = self._stack(y)\n super()._test_unchanged_data(stacked_data, stacked_y, None, stacked_y)\n\n def test_output(self):\n stacked_y = self._stack(self.y)\n super()._test_output(stacked_y, stacked_y)\n\n def test_list_input(self):\n y_list = self.y.tolist()\n stacked_y = self._stack(self.y)\n super()._test_algorithm_list(array_args=(stacked_y,), list_args=([y_list, y_list],))\n\n @pytest.mark.parametrize(\n 'method',\n ('asls', 'iasls', 'airpls', 'mpls', 'arpls', 'drpls', 'iarpls', 'aspls', 'psalsa')\n )\n def test_all_methods(self, method):\n super()._call_func(self._stack(self.y), method=method)\n\n def test_unknown_method_fails(self):\n with pytest.raises(AttributeError):\n super()._call_func(self._stack(self.y), method='unknown function')\n\n\nclass TestOptimizeExtendedRange(AlgorithmTester):\n \"\"\"Class for testing optimize_extended_range baseline.\"\"\"\n\n func = optimizers.optimize_extended_range\n\n @pytest.mark.parametrize('side', ('left', 'right', 'both'))\n def test_unchanged_data(self, data_fixture, side):\n x, y = get_data()\n super()._test_unchanged_data(data_fixture, y, x, y, x, side=side)\n\n def test_no_x(self):\n super()._test_algorithm_no_x(\n with_args=(self.y, self.x), without_args=(self.y, None)\n )\n\n def test_x_ordering(self):\n \"\"\"Ensures arrays are correctly sorted within the function.\"\"\"\n reverse_x = self.x[::-1]\n reverse_y = self.y[::-1]\n regular_inputs_result = self._call_func(self.y, self.x)[0]\n reverse_inputs_result = self._call_func(reverse_y, reverse_x)[0]\n\n assert_array_almost_equal(regular_inputs_result, reverse_inputs_result[::-1])\n\n def test_output(self):\n super()._test_output(self.y, self.y, None)\n\n def test_list_input(self):\n y_list = self.y.tolist()\n super()._test_algorithm_list(\n array_args=(self.y, None), list_args=(y_list, None)\n )\n\n @pytest.mark.parametrize(\n 'method',\n ('asls', 'iasls', 'airpls', 'mpls', 'arpls', 'drpls', 'iarpls', 'aspls', 'psalsa',\n 'poly', 'modpoly', 'imodpoly', 'penalized_poly')\n )\n def test_all_methods(self, method):\n super()._call_func(self.y, self.x, method=method)\n\n def test_unknown_method_fails(self):\n with pytest.raises(AttributeError):\n super()._call_func(self.y, self.x, method='unknown function')\n\n\nclass TestAdaptiveMinMax(AlgorithmTester):\n \"\"\"Class for testing adaptive minmax baseline.\"\"\"\n\n func = optimizers.adaptive_minmax\n\n def test_unchanged_data(self, data_fixture):\n x, y = get_data()\n super()._test_unchanged_data(data_fixture, y, x, y, x)\n\n def test_no_x(self):\n super()._test_algorithm_no_x(with_args=(self.y, self.x), without_args=(self.y,))\n\n def test_output(self):\n super()._test_output(self.y, self.y)\n\n def test_list_output(self):\n y_list = self.y.tolist()\n super()._test_algorithm_list(array_args=(self.y,), list_args=(y_list,))\n\n @pytest.mark.parametrize('method', ('modpoly', 'imodpoly'))\n def test_methods(self, method):\n super()._test_output(self.y, self.y, self.x, method=method)\n\n def test_unknown_method_fails(self):\n with pytest.raises(KeyError):\n super()._test_output(self.y, self.y, method='unknown')\n\n @pytest.mark.parametrize('poly_order', (None, 0, [0], (0, 1)))\n def test_polyorder_inputs(self, poly_order):\n super()._test_output(self.y, self.y, self.x, poly_order)\n"
] | [
[
"numpy.vstack",
"numpy.testing.assert_array_almost_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kipolovnikov/cooltools | [
"986fe95f96978f669204226d99e3c0a6fd5be208"
] | [
"cooltools/lib/common.py"
] | [
"import numpy as np\nimport pandas as pd\n\n\ndef assign_supports(features, supports, labels=False, suffix=\"\"):\n \"\"\"\n Assign support regions to a table of genomic intervals.\n\n Parameters\n ----------\n features : DataFrame\n Dataframe with columns `chrom`, `start`, `end`\n or `chrom1`, `start1`, `end1`, `chrom2`, `start2`, `end2`\n supports : array-like\n Support areas\n\n \"\"\"\n features = features.copy()\n supp_col = pd.Series(index=features.index, data=np.nan)\n\n c = \"chrom\" + suffix\n s = \"start\" + suffix\n e = \"end\" + suffix\n for col in (c, s, e):\n if col not in features.columns:\n raise ValueError(\n 'Column \"{}\" not found in features data frame.'.format(col)\n )\n\n for i, region in enumerate(supports):\n # single-region support\n if len(region) in [3, 4]:\n sel = (features[c] == region[0]) & (features[e] > region[1])\n if region[2] is not None:\n sel &= features[s] < region[2]\n # paired-region support\n elif len(region) == 2:\n region1, region2 = region\n sel1 = (features[c] == region1[0]) & (features[e] > region1[1])\n if region1[2] is not None:\n sel1 &= features[s] < region1[2]\n sel2 = (features[c] == region2[0]) & (features[e] > region2[1])\n if region2[2] is not None:\n sel2 &= features[s] < region2[2]\n sel = sel1 | sel2\n supp_col.loc[sel] = i\n\n if labels:\n supp_col = supp_col.map(lambda i: supports[int(i)], na_action=\"ignore\")\n\n return supp_col\n\n\ndef assign_regions_to_bins(bin_ids, regions_span):\n\n regions_binsorted = (\n regions_span[(regions_span[\"bin_start\"] >= 0) & (regions_span[\"bin_end\"] >= 0)]\n .sort_values([\"bin_start\", \"bin_end\"])\n .reset_index()\n )\n\n bin_reg_idx_lo = regions_span[\"bin_start\"].searchsorted(bin_ids, \"right\") - 1\n bin_reg_idx_hi = regions_span[\"bin_end\"].searchsorted(bin_ids, \"right\")\n mask_assigned = (bin_reg_idx_lo == bin_reg_idx_hi) & (bin_reg_idx_lo >= 0)\n\n region_ids = pd.array([pd.NA] * len(bin_ids))\n region_ids[mask_assigned] = regions_span[\"name\"][bin_reg_idx_lo[mask_assigned]]\n\n return region_ids\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
msakai/chainer-compiler | [
"77190561408911b33904a20c47f734f38790cfdf"
] | [
"scripts/gen_extra_test.py"
] | [
"\"\"\"Yet another ONNX test generator for custom ops and new ops.\"\"\"\n\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\nimport numpy as np\nimport onnx\n\nimport onnx_script\nimport test_case\n\nimport gen_chainercv_op_tests\nimport sentiment\n\n\n_extract_value_info = onnx_script._extract_value_info\nmake_constant_node = onnx_script.make_constant_node\ngen_test = onnx_script.gen_test\nSeq = onnx_script.Seq\n\n\ndef V(a):\n return chainer.variable.Variable(np.array(a))\n\n\ndef aranges(*shape):\n r = np.prod(shape)\n return np.arange(r).reshape(shape).astype(np.float32)\n\n\ndef expect(node, inputs, outputs, name):\n present_inputs = [x for x in node.input if (x != '')]\n present_outputs = [x for x in node.output if (x != '')]\n inputs = [v.array if hasattr(v, 'array') else v for v in inputs]\n outputs = [v.array if hasattr(v, 'array') else v for v in outputs]\n inputs = list(map(np.array, inputs))\n outputs = list(map(np.array, outputs))\n\n assert len(present_inputs) == len(inputs)\n assert len(present_outputs) == len(outputs)\n inputs = list(zip(present_inputs, inputs))\n outputs = list(zip(present_outputs, outputs))\n inputs_vi = [_extract_value_info(a, n) for n, a in inputs]\n outputs_vi = [_extract_value_info(a, n) for n, a in outputs]\n\n graph = onnx.helper.make_graph(\n nodes=[node],\n name=name,\n inputs=inputs_vi,\n outputs=outputs_vi)\n gen_test(graph, inputs, outputs, name)\n\n\ndef gen_negative_reshape_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n v = np.array([2, 3, 4])\n v_v = gb.const(v)\n shape_v = gb.const([-1, 3])\n reshaped_v = gb.Reshape([v_v, shape_v])\n gb.output(reshaped_v, v.reshape((-1, 3)))\n gb.gen_test()\n\n\ndef gen_inf_nan_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n one_v = gb.const(1.0)\n none_v = gb.const(-1.0)\n zero_v = gb.const(0.0)\n inf_v = gb.Div([one_v, zero_v])\n ninf_v = gb.Div([none_v, zero_v])\n nan_v = gb.Log([none_v])\n gb.output(inf_v, np.inf)\n gb.output(ninf_v, -np.inf)\n gb.output(nan_v, -np.nan)\n gb.gen_test()\n\n\ndef gen_select_item_test(test_name):\n input = V(aranges(4, 3))\n indices = V([1, 2, 0, 1])\n output = F.select_item(input, indices)\n\n node = onnx.helper.make_node(\n 'ChainerSelectItem',\n inputs=['input', 'indices'],\n outputs=['output'])\n expect(node, inputs=[input, indices], outputs=[output], name=test_name)\n\n\ndef gen_scan_sum_test(test_name):\n # TODO(hamaji): Rewrite with onnx_script.\n inputs1 = np.array([[4, 5, 6], [-4, -6, -5]])\n inputs2 = np.array([[1, 2, 3], [-3, -2, -1]])\n state = np.array(0)\n out_state = []\n outputs = []\n out_all_states = []\n for bi1, bi2 in zip(inputs1, inputs2):\n st = state\n outs = []\n all_states = []\n for a, b in zip(bi1, bi2):\n ab = a - b\n r = ab + st\n outs.append(ab)\n all_states.append(st)\n st = r\n outputs.append(outs)\n out_state.append(st)\n out_all_states.append(all_states)\n outputs = np.array(outputs)\n\n inputs_vi = [_extract_value_info(inputs1[0][0], n)\n for n in ['s', 'a', 'b']]\n outputs_vi = [_extract_value_info(outputs[0][0], n)\n for n in ['r', 'ab', 'so']]\n\n sub = onnx.helper.make_node('Sub', inputs=['a', 'b'], outputs=['ab'])\n add = onnx.helper.make_node('Add', inputs=['ab', 's'], outputs=['r'])\n ident = onnx.helper.make_node('Identity', inputs=['s'], outputs=['so'])\n body = onnx.helper.make_graph(\n nodes=[sub, add, ident],\n name='body',\n inputs=inputs_vi,\n outputs=outputs_vi)\n\n node = onnx.helper.make_node(\n 'Scan',\n body=body,\n num_scan_inputs=2,\n inputs=['state', 'inputs1', 'inputs2'],\n outputs=['out_state', 'outputs', 'out_all_states'])\n expect(node,\n inputs=[state, inputs1, inputs2],\n outputs=[out_state, outputs, out_all_states],\n name=test_name)\n\n\ndef gen_if_test(cond):\n def fn(test_name):\n tb = onnx_script.GraphBuilder(test_name + '_true')\n for i in [42, 99]:\n true_value_v = tb.const(i)\n tb.output(true_value_v, i)\n\n fb = onnx_script.GraphBuilder(test_name + '_false')\n for i in [-42, -99]:\n false_value_v = fb.const(i)\n fb.output(false_value_v, i)\n\n gb = onnx_script.GraphBuilder(test_name)\n cond_v = gb.input('cond', cond)\n out1_v, out2_v = gb.If([cond_v],\n then_branch=tb.make_graph(),\n else_branch=fb.make_graph(),\n outputs=['42', '99'])\n gb.output(out1_v, 42 if cond else -42)\n gb.output(out2_v, 99 if cond else -99)\n gb.gen_test()\n\n return fn\n\n\ndef gen_if_with_input_test(cond):\n def fn(test_name):\n tb = onnx_script.GraphBuilder(test_name + '_true')\n input_v = tb.input('input', 42)\n tb.output(tb.Identity([input_v]), 42)\n\n fb = onnx_script.GraphBuilder(test_name + '_false')\n input_v = fb.input('input', 42)\n fb.output(fb.Neg([input_v]), 42)\n\n gb = onnx_script.GraphBuilder(test_name)\n cond_v = gb.input('cond', cond)\n in_v = gb.input('in', 42)\n out_v = gb.If([cond_v, in_v],\n then_branch=tb.make_graph(),\n else_branch=fb.make_graph())\n gb.output(out_v, 42 if cond else -42)\n gb.gen_test()\n\n return fn\n\n\ndef gen_if_with_external_test(cond):\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n in0_v = gb.input('in0', 42)\n in1_v = gb.input('in1', 99)\n in2_v = gb.input('in2', 100)\n\n tb = onnx_script.GraphBuilder(test_name + '_true')\n tb.output(tb.Add([in0_v, in1_v]), 42)\n\n fb = onnx_script.GraphBuilder(test_name + '_false')\n fb.output(fb.Sub([in1_v, in2_v]), 42)\n\n cond_v = gb.input('cond', cond)\n out_v = gb.If([cond_v],\n then_branch=tb.make_graph(),\n else_branch=fb.make_graph())\n gb.output(out_v, 42 + 99 if cond else 99 - 100)\n gb.gen_test()\n\n return fn\n\n\ndef gen_loop_test(max_trip_count=7,\n cond_trip_count=6,\n terminal_condition=True,\n has_scan_outputs=False):\n # TODO(hamaji): Rewrite with onnx_script.\n def fn(test_name):\n input_state = np.array(42)\n state = input_state\n\n trip_counts = []\n if max_trip_count is not None:\n trip_counts.append(max_trip_count)\n if terminal_condition is False:\n trip_counts.append(0)\n elif terminal_condition is not None:\n # `cond_trip_count` is not checked until the first\n # iteration finishes.\n trip_counts.append(max(cond_trip_count, 1))\n trip_count = min(trip_counts)\n\n output = np.array(sum(range(trip_count)) + 11 * trip_count + 42)\n scan_outputs = []\n if has_scan_outputs:\n scan_outputs = [\n np.array([sum(range(i + 1)) + 11 * (i + 1) + 42\n for i in range(trip_count)]),\n np.array([i * i for i in range(trip_count)]),\n np.array(list(range(trip_count)))]\n\n iter_vi = _extract_value_info(np.array(0), 'iter')\n cond_in_vi = _extract_value_info(np.array(True), 'cond_in')\n cond_vi = _extract_value_info(np.array(True), 'cond')\n inputs_vi = [_extract_value_info(state, 'in')]\n outputs_vi = [_extract_value_info(output, 'out')]\n nodes = []\n if has_scan_outputs:\n outputs_vi.append(_extract_value_info(output, 'out2'))\n outputs_vi.append(_extract_value_info(output, 'square'))\n outputs_vi.append(_extract_value_info(output, 'iter2'))\n nodes.append(onnx.helper.make_node('Identity', inputs=['out'],\n outputs=['out2']))\n nodes.append(onnx.helper.make_node('Identity', inputs=['iter'],\n outputs=['iter2']))\n nodes.append(onnx.helper.make_node('Mul', inputs=['iter', 'iter'],\n outputs=['square']))\n\n nodes.append(make_constant_node(\n 'const_11', onnx.TensorProto.INT64, 11))\n nodes.append(onnx.helper.make_node('Sum',\n inputs=['in', 'iter', 'const_11'],\n outputs=['out']))\n nodes.append(make_constant_node(\n 'loop_cnt', onnx.TensorProto.INT64, cond_trip_count - 1))\n nodes.append(onnx.helper.make_node('Less', inputs=['iter', 'loop_cnt'],\n outputs=['cond']))\n body = onnx.helper.make_graph(\n nodes=nodes,\n name='body',\n inputs=[iter_vi] + [cond_in_vi] + inputs_vi,\n outputs=[cond_vi] + outputs_vi)\n\n max_trip_cnt_sym = ''\n max_trip_cnt_value = []\n if max_trip_count is not None:\n max_trip_cnt_sym = 'max_trip_cnt'\n max_trip_cnt_value = [np.array(max_trip_count)]\n\n terminal_cond_sym = ''\n terminal_cond_value = []\n if terminal_condition is not None:\n terminal_cond_sym = 'terminal_cond'\n terminal_cond_value = [np.array(terminal_condition)]\n\n output_syms = ['output']\n output_values = [output]\n if has_scan_outputs:\n output_syms += ['history', 'square', 'range']\n output_values += scan_outputs\n\n node = onnx.helper.make_node(\n 'Loop',\n body=body,\n inputs=[max_trip_cnt_sym, terminal_cond_sym, 'state'],\n outputs=output_syms)\n expect(node,\n inputs=max_trip_cnt_value + terminal_cond_value + [state],\n outputs=output_values,\n name=test_name)\n\n return fn\n\n\ndef gen_loop_use_enclosing_test():\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n init = np.array(10, np.float32)\n init_v = gb.param('init', init)\n external = np.array(42, np.float32)\n external_v = gb.param('external', external)\n\n bb = onnx_script.GraphBuilder(test_name + '_body')\n iter_v = bb.input('iter', np.array(0))\n cond_v = bb.input('cond', np.array(True))\n\n state_v = bb.input('state', init)\n result_v = bb.Add([state_v, external_v])\n cond_v = bb.const(True)\n bb.output(cond_v, np.array(True))\n bb.output(result_v, init)\n\n num_iter_v = gb.const(5)\n true_v = gb.const(True)\n out_v = gb.Loop([num_iter_v, true_v, init_v],\n body=bb.make_graph())\n\n expected = float(5 * 42 + 10)\n gb.output(out_v, expected)\n\n gb.gen_test()\n\n return fn\n\n\ndef gen_backprop_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = np.array(42, np.float32)\n j = np.array(99, np.float32)\n\n i_v = gb.param('i', i)\n j_v = gb.param('j', j)\n\n r_v = gb.Mul([i_v, j_v])\n\n gb.output(r_v, i * j)\n gb.gradient(i_v, j)\n gb.gradient(j_v, i)\n gb.gen_test()\n\n\ndef gen_concat_backprop_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = np.array([42], np.float32)\n j = np.array([99], np.float32)\n\n i_v = gb.param('i', i)\n j_v = gb.param('j', j)\n\n concat_v = gb.Concat([i_v, j_v], axis=0)\n m = np.array([2, 3], np.float32)\n r_v = gb.Mul([concat_v, gb.const(m)])\n r = np.concatenate([i, j]) * m\n\n gb.output(r_v, r)\n gb.gradient(i_v, np.array([2], np.float32))\n gb.gradient(j_v, np.array([3], np.float32))\n gb.gen_test()\n\n\n# Borrowed from: https://github.com/tensorflow/tensorflow/blob/master/tensorflow/cc/framework/while_gradients_test.cc\ndef gen_loop_backprop_test(ii, ji, ki, gi, gj, gk):\n i, j, k = ii, ji, ki\n while i < 10:\n i += j\n j += 1\n expected = np.array(i + j + k, np.float32)\n\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = np.array(ii, np.float32)\n j = np.array(ji, np.float32)\n k = np.array(ki, np.float32)\n i_v = gb.param('i', i)\n j_v = gb.param('j', j)\n k_v = gb.param('k', k)\n\n bb = onnx_script.GraphBuilder(test_name + '_body')\n iter_v = bb.input('iter', np.array(0))\n cond_v = bb.input('cond', np.array(True))\n bi_v = bb.input('bi', i)\n bj_v = bb.input('bj', j)\n bk_v = bb.input('bk', k)\n one_v = bb.const(1.0)\n ni_v = bb.Add([bi_v, bj_v])\n nj_v = bb.Add([bj_v, one_v])\n nk_v = bb.Identity([bk_v])\n ten_v = bb.const(10.0)\n cond_v = bb.Less([ni_v, ten_v])\n bb.output(cond_v, np.array(True))\n bb.output(ni_v, i)\n bb.output(nj_v, j)\n bb.output(nk_v, k)\n\n true_v = gb.const(True)\n oi_v, oj_v, ok_v = gb.Loop(['', true_v, i_v, j_v, k_v],\n body=bb.make_graph(),\n outputs=['oi', 'oj', 'ok'])\n sum_v = gb.Sum([oi_v, oj_v, ok_v])\n\n gb.output(sum_v, expected)\n gb.gradient(i_v, np.array(gi, np.float32))\n gb.gradient(j_v, np.array(gj, np.float32))\n gb.gradient(k_v, np.array(gk, np.float32))\n\n gb.gen_test()\n\n return fn\n\n\n# This case needs stacks for retained inputs/outputs in the loop.\ndef gen_loop_backprop_need_stack_test():\n ii = 1.0\n ji = 1.0\n ki = 1.0\n i = np.array(ii, np.float32)\n j = np.array(ji, np.float32)\n k = np.array(ki, np.float32)\n while i < 100:\n i *= j\n j += 1\n k = np.sqrt(k) * j\n expected = i + j + k\n\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = np.array(ii, np.float32)\n j = np.array(ji, np.float32)\n k = np.array(ki, np.float32)\n i_v = gb.param('i', i)\n j_v = gb.param('j', j)\n k_v = gb.param('k', k)\n\n bb = onnx_script.GraphBuilder(test_name + '_body')\n iter_v = bb.input('iter', np.array(0))\n cond_v = bb.input('cond', np.array(True))\n bi_v = bb.input('bi', i)\n bj_v = bb.input('bj', j)\n bk_v = bb.input('bk', k)\n one_v = bb.const(1.0)\n ni_v = bb.Mul([bi_v, bj_v])\n nj_v = bb.Add([bj_v, one_v])\n nk_v = bb.Mul([bb.Sqrt([bk_v]), nj_v])\n hundred_v = bb.const(100.0)\n cond_v = bb.Less([ni_v, hundred_v])\n bb.output(cond_v, np.array(True))\n bb.output(ni_v, i)\n bb.output(nj_v, j)\n bb.output(nk_v, k)\n\n true_v = gb.const(True)\n oi_v, oj_v, ok_v = gb.Loop(['', true_v, i_v, j_v, k_v],\n body=bb.make_graph(),\n outputs=['oi', 'oj', 'ok'])\n sum_v = gb.Sum([oi_v, oj_v, ok_v])\n\n gb.output(sum_v, expected)\n gb.gradient(i_v, np.array(120.0, np.float32))\n gb.gradient(j_v, np.array(284.1395, np.float32))\n gb.gradient(k_v, np.array(0.7103229, np.float32))\n\n gb.gen_test()\n\n return fn\n\n\ndef gen_sequence_test(test_name):\n # TODO(hamaji): Rewrite with onnx_script.\n inputs = [np.array(a) for a in [[1, 2], [3, 4], [5, 6]]]\n nodes = []\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceCreate',\n inputs=[],\n outputs=['seq0']))\n\n for i, input in enumerate(inputs):\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceAppend',\n inputs=['seq%d' % i, 'in%d' % i],\n outputs=['seq%d' % (i + 1)]))\n\n index_value = 1\n nodes.append(make_constant_node(\n 'index', onnx.TensorProto.INT64, [index_value]))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceLookup',\n inputs=['seq3', 'index'],\n outputs=['lookup_result']))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceStack',\n inputs=['seq3'],\n outputs=['stack3_result']))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceStack',\n inputs=['seq2'],\n outputs=['stack2_result']))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceConcat',\n inputs=['seq3'],\n outputs=['concat3_result']))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceConcat',\n inputs=['seq2'],\n outputs=['concat2_result']))\n nodes.append(onnx.helper.make_node(\n 'ChainerSequenceSize',\n inputs=['seq3'],\n outputs=['stack3_size']))\n\n outputs = [\n ('lookup_result', np.array([3, 4])),\n ('stack3_result', np.stack(inputs)),\n ('stack2_result', np.stack(inputs[0:2])),\n ('concat3_result', np.concatenate(inputs)),\n ('concat2_result', np.concatenate(inputs[0:2])),\n ('stack3_size', np.array(3)),\n ]\n inputs = [('in%d' % i, input) for i, input in enumerate(inputs)]\n inputs_vi = [_extract_value_info(a, n) for n, a in inputs]\n outputs_vi = [_extract_value_info(a, n) for n, a in outputs]\n graph = onnx.helper.make_graph(\n nodes=nodes,\n name=test_name,\n inputs=inputs_vi,\n outputs=outputs_vi)\n gen_test(graph, inputs, outputs, name=test_name)\n\n\ndef gen_sequence_pad_test(test_name):\n # TODO(hamaji): Rewrite with GraphBuilder's input/output.\n gb = onnx_script.GraphBuilder(test_name)\n inputs = [np.array(a) for a in [[1, 2, 3], [4], [5, 6]]]\n gb.ChainerSequenceCreate(inputs=[], outputs=['seq0'])\n\n for i, input in enumerate(inputs):\n gb.ChainerSequenceAppend(inputs=['seq%d' % i, 'in%d' % i],\n outputs=['seq%d' % (i + 1)])\n\n index_value = 1\n index_v = gb.const([index_value])\n gb.ChainerSequenceLookup(\n inputs=['seq3', index_v],\n outputs=['lookup_result'])\n gb.ChainerSequencePad(\n value=-42.0,\n length=4,\n inputs=['seq3'],\n outputs=['pad3_result'])\n gb.ChainerSequencePad(\n value=-42.0,\n inputs=['seq2'],\n outputs=['pad2_result'])\n gb.ChainerSequenceLengths(\n inputs=['seq3'],\n outputs=['seq3_lengths_seq'])\n gb.ChainerSequenceStack(\n inputs=['seq3_lengths_seq'],\n outputs=['seq3_lengths'])\n\n padded = np.array([[1, 2, 3, -42], [4, -42, -42, -42], [5, 6, -42, -42]])\n outputs = [\n ('lookup_result', np.array([4])),\n ('pad3_result', padded),\n ('pad2_result', padded[0:2, 0:3]),\n ('seq3_lengths', np.array([3, 1, 2])),\n ]\n inputs = [('in%d' % i, input) for i, input in enumerate(inputs)]\n inputs_vi = [_extract_value_info(a, n) for n, a in inputs]\n outputs_vi = [_extract_value_info(a, n) for n, a in outputs]\n graph = onnx.helper.make_graph(\n nodes=gb.nodes,\n name=test_name,\n inputs=inputs_vi,\n outputs=outputs_vi)\n gen_test(graph, inputs, outputs, name=test_name)\n\n\ndef gen_sequence_split_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n inputs = np.array([[1, 2, 3, -42], [4, -42, -42, -42], [5, 6, -42, -42]])\n lengths = np.array([3, 1, 2])\n\n inputs_v = gb.input('input', inputs)\n lengths_v = gb.input('lengths', lengths)\n\n seq_v = gb.ChainerSequenceSeparate(inputs=[inputs_v], outputs=['seq'])\n lengths_seq_v = gb.ChainerSequenceSeparate(inputs=[lengths_v],\n outputs=['lengths_seq'])\n unpadded_v = gb.ChainerSequenceUnpad(inputs=[inputs_v, lengths_seq_v],\n outputs=['unpadded'])\n seq_a1_v = gb.ChainerSequenceSeparate(inputs=[inputs_v],\n outputs=['seq_a1'],\n axis=1)\n\n for i in range(4):\n index_v = gb.const([i], name='index_%d' % i)\n if i < 3:\n gb.output(gb.ChainerSequenceLookup(\n inputs=[seq_v, index_v],\n outputs=['split_result_%d' % i]), inputs[i])\n gb.output(gb.ChainerSequenceLookup(\n inputs=[unpadded_v, index_v],\n outputs=['unpad_result_%d' % i]), inputs[i][:lengths[i]])\n gb.output(gb.ChainerSequenceLookup(\n inputs=[seq_a1_v, index_v],\n outputs=['split_a1_result_%d' % i]), inputs[:, i])\n\n gb.gen_test()\n\n\ndef gen_sequence_io_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input = aranges(3, 2, 4)\n input_seq = [[1, 2, 3, -42], [4, -42, -42, -42], [5, 6, -42, -42]]\n\n input_v = gb.input('input', input)\n input_seq_v = gb.input('input_seq', Seq(input_seq))\n\n split_v = gb.ChainerSequenceSeparate([input_v])\n stack_v = gb.ChainerSequenceStack([input_seq_v])\n\n gb.output(gb.Identity([input_v]), input)\n gb.output(gb.Identity([input_seq_v]), Seq(input_seq))\n gb.output(split_v, Seq(list(map(np.squeeze, np.split(input, len(input))))))\n gb.output(stack_v, np.stack(input_seq))\n\n gb.gen_test()\n\n\ndef gen_sequence_range_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n num_inputs = 0\n for args in [(4,), (-4,), (3, 8), (5, 2),\n (1, 16, 3), (1, 17, 3), (5, -2, -1), (9, 15, -1)]:\n input_vs = []\n for arg in args:\n input_vs.append(gb.input('input_%d' % num_inputs, arg))\n num_inputs += 1\n output_v = gb.ChainerSequenceRange(input_vs)\n len_v = gb.ChainerSequenceSize([output_v])\n expected = list(range(*args))\n gb.output(len_v, len(expected))\n if expected:\n gb.output(output_v, Seq(expected))\n gb.gen_test()\n\n\ndef gen_sequence_pop_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n inputs = np.array([10, 3, 4, 7, 2, 5])\n\n inputs_v = gb.input('input', inputs)\n\n seq_v = gb.ChainerSequenceSeparate(inputs=[inputs_v])\n pop_count = 3\n for i in range(pop_count):\n seq_v, pop_v = gb.ChainerSequencePop(\n inputs=[seq_v],\n outputs=['seq_%d' % i, 'pop_%d' % i]\n )\n gb.output(pop_v, inputs[-1-i])\n\n # This `seq_v` is used twice, so not-optimized pass will be tested.\n len1_v = gb.ChainerSequenceSize(inputs=[seq_v])\n seq_v, _ = gb.ChainerSequencePop(\n inputs=[seq_v],\n outputs=['seq_final', 'pop_final'],\n )\n len2_v = gb.ChainerSequenceSize(inputs=[seq_v])\n gb.output(gb.Add(inputs=[len1_v, len2_v]),\n (len(inputs) - pop_count) * 2 - 1)\n\n gb.gen_test()\n\n\ndef gen_sequence_constants_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n inputs = [4, 2, 3]\n seq_v = gb.const_seq(inputs)\n gb.output(seq_v, Seq(inputs))\n gb.gen_test()\n\n\ndef gen_sequence_create_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n inputs = [4, 2, 3]\n inputs_v = [gb.input('input_%d' % i, input)\n for i, input in enumerate(inputs)]\n seq_v = gb.ChainerSequenceCreate(inputs_v)\n stack_v = gb.ChainerSequenceStack([seq_v])\n gb.output(seq_v, Seq(inputs))\n gb.output(stack_v, np.array(inputs))\n gb.gen_test()\n\n\ndef gen_sequence_extend_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input1 = aranges(3, 4)\n input2 = aranges(3, 1) * 2\n seq1 = [np.squeeze(i, 0) for i in np.split(input1, 3)]\n seq2 = [np.squeeze(i, 0) for i in np.split(input2, 3)]\n\n input1_v = gb.input('input1', input1)\n input2_v = gb.input('input2', input2)\n seq1_v = gb.ChainerSequenceSeparate([input1_v])\n seq2_v = gb.ChainerSequenceSeparate([input2_v])\n\n gb.output(gb.ChainerSequenceExtend([seq1_v, seq2_v]), Seq(seq1 + seq2))\n\n gb.gen_test()\n\n\ndef gen_sequence_update_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n inputs = [4, 2, 3]\n seq_v = gb.const_seq(inputs)\n seq_v = gb.ChainerSequenceUpdate([seq_v, gb.const(2), gb.const(42)])\n seq_v = gb.ChainerSequenceUpdate([seq_v, gb.const(-3), gb.const(-49)])\n gb.output(seq_v, Seq([-49, 2, 42]))\n gb.gen_test()\n\n\ndef gen_generic_len_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input = aranges(4, 2, 3)\n\n input_v = gb.input('input', input)\n len0_v = gb.ChainerGenericLen([input_v])\n reduced_v = gb.ReduceSum([input_v], axes=[0], keepdims=False)\n len1_v = gb.ChainerGenericLen([reduced_v])\n seq_v = gb.ChainerSequenceSeparate(inputs=[input_v])\n len_seq_v = gb.ChainerGenericLen([seq_v])\n\n gb.output(len0_v, input.shape[0])\n gb.output(len1_v, input.shape[1])\n gb.output(len_seq_v, input.shape[0])\n\n gb.gen_test()\n\n\ndef gen_generic_getitem_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input = aranges(4, 5, 3)\n reduced = np.sum(input, 0)\n\n input_v = gb.input('input', input)\n reduced_v = gb.ReduceSum([input_v], axes=[0], keepdims=False)\n seq_v = gb.ChainerSequenceSeparate(inputs=[input_v])\n\n for i in range(-2, 4):\n index_v = gb.const([i])\n gb.output(gb.ChainerGenericGetItem([input_v, index_v]), input[i])\n gb.output(gb.ChainerGenericGetItem([reduced_v, index_v]), reduced[i])\n gb.output(gb.ChainerGenericGetItem([seq_v, index_v]), input[i])\n\n gb.gen_test()\n\n\ndef gen_generic_getslice_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input = aranges(4, 5, 3)\n reduced = np.sum(input, 0)\n\n input_v = gb.input('input', input)\n reduced_v = gb.ReduceSum([input_v], axes=[0], keepdims=False)\n seq_v = gb.ChainerSequenceSeparate(inputs=[input_v])\n\n def get_slice(input_v, s):\n ins = [input_v]\n if s.start is not None:\n v = gb.const([s.start])\n ins.append(v)\n if s.stop is not None:\n v = gb.const([s.stop])\n ins.append(v)\n if s.step is not None:\n v = gb.const([s.step])\n ins.append(v)\n return gb.ChainerGenericGetSlice(ins)\n\n def add_test(s):\n expected = input[s]\n gb.output(get_slice(input_v, s), expected)\n gb.output(get_slice(reduced_v, s), reduced[s])\n actual_v = get_slice(seq_v, s)\n if len(expected):\n gb.output(gb.ChainerSequenceStack([actual_v]), expected)\n else:\n gb.output(gb.ChainerSequenceSize([actual_v]), 0)\n\n add_test(slice(None))\n for i in range(4):\n add_test(slice(i, None))\n\n for s, e in [(1, 2), (-2, 3), (0, -2), (999, 9999)]:\n add_test(slice(s, e))\n\n for s, e, t in [(1, 4, 2), (0, 100, -1), (0, 100, -2)]:\n add_test(slice(s, e, t))\n\n gb.gen_test()\n\n\n# TODO(hamaji): Add more tests for both GetItem/SetItem.\ndef gen_setitem_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input = np.array([1, 2, 3])\n\n input_v = gb.input('input', input)\n output_v = gb.ChainerSetItem([input_v, gb.const(1), gb.const(42)],\n slice_specs=[1])\n\n gb.output(output_v, np.array([1, 42, 3]))\n gb.gen_test()\n\n\ndef gen_generic_add_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n input1 = aranges(3, 4)\n input2 = aranges(3, 1) * 2\n seq1 = [np.squeeze(i, 0) for i in np.split(input1, 3)]\n seq2 = [np.squeeze(i, 0) for i in np.split(input2, 3)]\n\n input1_v = gb.input('input1', input1)\n input2_v = gb.input('input2', input2)\n seq1_v = gb.ChainerSequenceSeparate([input1_v])\n seq2_v = gb.ChainerSequenceSeparate([input2_v])\n\n gb.output(gb.ChainerGenericAdd([input1_v, input2_v]), input1 + input2)\n gb.output(gb.ChainerGenericAdd([seq1_v, seq2_v]), Seq(seq1 + seq2))\n gb.output(gb.ChainerGenericAdd([input1_v, seq2_v]), input1 + input2)\n gb.output(gb.ChainerGenericAdd([seq1_v, input2_v]), input1 + input2)\n\n gb.gen_test()\n\n\n# TODO(hamaji): Test actual output.\ndef gen_print_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n in1_v = gb.const(21)\n in2_v = gb.const(2)\n result_v = gb.Mul([in1_v, in2_v])\n gb.ChainerPrint([result_v], outputs=[])\n gb.output(gb.Identity([result_v]), 42)\n gb.gen_test()\n\n\ndef gen_hello_world_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n hello = 'Hello, world!\\n'\n out_v = gb.ChainerSequenceCreate([])\n for ch in hello:\n ch_v = gb.const(ord(ch), dtype=np.uint8)\n out_v = gb.ChainerSequenceAppend([out_v, ch_v])\n gb.output(out_v, Seq(list(np.array(ord(ch), np.uint8) for ch in hello)))\n gb.gen_test()\n\n\ndef gen_type_coersion_test(test_name):\n # Probably, ONNX expects no type coersion happens and this test is\n # not valid ONNX, but we relax the restriction.\n gb = onnx_script.GraphBuilder(test_name)\n iv = 42\n fv = 2.3\n int_v = gb.const(iv)\n float_v = gb.const(fv)\n\n gb.output(gb.Add([int_v, float_v]), iv + fv)\n gb.output(gb.Add([float_v, int_v]), fv + iv)\n gb.output(gb.Sub([int_v, float_v]), iv - fv)\n gb.output(gb.Sub([float_v, int_v]), fv - iv)\n gb.output(gb.Mul([int_v, float_v]), iv * fv)\n gb.output(gb.Mul([float_v, int_v]), fv * iv)\n gb.output(gb.Div([int_v, float_v]), iv / fv)\n gb.output(gb.Div([float_v, int_v]), fv / iv)\n\n gb.gen_test()\n\n\ndef gen_incomplete_transpose_test(test_name):\n # ONNX does not allow transposition with incomplete permutations,\n # but this is necessary to realize things like np.swapaxes.\n gb = onnx_script.GraphBuilder(test_name)\n\n input = aranges(3, 2, 4, 5, 6)\n input_v = gb.input('input', input)\n gb.output(gb.Transpose([input_v], perm=[0, 2, 1]),\n np.transpose(input, axes=[0, 2, 1, 3, 4]))\n\n gb.gen_test()\n\n\ndef gen_maxpool_cover_all_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n\n input = np.random.random((1, 3, 7, 7))\n input_v = gb.input('input', input)\n\n # Forget shape.\n squeezed_v = gb.Squeeze([input_v])\n dynamic_v = gb.Unsqueeze([squeezed_v], axes=[0])\n\n gb.output(gb.MaxPool([input_v], kernel_shape=[3, 3], strides=[2, 2],\n outputs=['not_cover_all']),\n F.max_pooling_2d(input, ksize=3, stride=2, cover_all=False))\n gb.output(gb.MaxPool([input_v], kernel_shape=[3, 3], strides=[2, 2],\n ceil_mode=1,\n outputs=['cover_all']),\n F.max_pooling_2d(input, ksize=3, stride=2, cover_all=True))\n gb.output(gb.MaxPool([dynamic_v], kernel_shape=[3, 3], strides=[2, 2],\n outputs=['not_cover_all_dynamic']),\n F.max_pooling_2d(input, ksize=3, stride=2, cover_all=False))\n gb.output(gb.MaxPool([dynamic_v], kernel_shape=[3, 3], strides=[2, 2],\n ceil_mode=1,\n outputs=['cover_all_dynamic']),\n F.max_pooling_2d(input, ksize=3, stride=2, cover_all=True))\n\n gb.gen_test()\n\n\ndef gen_batchnorm_training_test(save_mean_var=False):\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n\n batch_size = 2\n chan = 3\n wh = 5\n epsilon = 1e-4\n momentum = 0.95\n chainer.config.train = True\n\n input = np.random.random((batch_size, chan, wh, wh)).astype(np.float32)\n bn = L.BatchNormalization(chan, decay=momentum, eps=epsilon)\n # Initialize.\n bn(np.random.random((batch_size, chan, wh, wh)).astype(np.float32))\n\n scale = bn.gamma.array.copy()\n bias = bn.beta.array.copy()\n running_mean = bn.avg_mean.copy()\n running_var = bn.avg_var.copy()\n\n output = bn(input)\n\n input_v = gb.input('input', input)\n scale_v = gb.input('scale', scale)\n bias_v = gb.input('bias', bias)\n mean_in_v = gb.input('mean_in', running_mean)\n var_in_v = gb.input('var_in', running_var)\n\n output_names = ['output', 'mean_out', 'var_out']\n if save_mean_var:\n output_names.extend(['saved_mean', 'saved_var'])\n\n output_v, mean_out_v, var_out_v, *saved = gb.BatchNormalization(\n inputs=[input_v, scale_v, bias_v, mean_in_v, var_in_v],\n epsilon=epsilon, momentum=momentum,\n outputs=output_names\n )\n\n gb.output(output_v, output)\n gb.output(mean_out_v, bn.avg_mean)\n gb.output(var_out_v, bn.avg_var)\n if saved:\n mean_out_v, var_out_v = saved\n mean_out = input.mean(axis=(0, 2, 3))\n var_out = input.var(axis=(0, 2, 3))\n np.testing.assert_allclose(output.creator.mean, mean_out)\n gb.output(mean_out_v, mean_out)\n gb.output(var_out_v, var_out)\n\n gb.gen_test()\n\n return fn\n\n\ndef gen_spacetodepth_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n small_data = np.array([0.0, 0.1, 0.2, 0.3,\n 1.0, 1.1, 1.2, 1.3,\n 2.0, 2.1, 2.2, 2.3,\n 3.0, 3.1, 3.2, 3.3]).reshape(1, 2, 2, 4)\n input_small = gb.input('input_small', small_data)\n output_small = np.array([0.0, 0.2, 2.0, 2.2,\n 0.1, 0.3, 2.1, 2.3,\n 1.0, 1.2, 3.0, 3.2,\n 1.1, 1.3, 3.1, 3.3]).reshape(1, 8, 1, 2)\n gb.output(gb.SpaceToDepth(\n inputs=['input_small'], blocksize=2, outputs=['output_small']),\n output_small)\n\n middle_data = np.arange(108, dtype=np.float32).reshape(2, 3, 3, 6)\n input_middle = gb.input('input_middle', middle_data)\n output_middle = np.array([\n 0, 3, 18, 21, 36, 39, 1, 4, 19, 22, 37,\n 40, 2, 5, 20, 23, 38, 41, 6, 9, 24, 27,\n 42, 45, 7, 10, 25, 28, 43, 46, 8, 11, 26,\n 29, 44, 47, 12, 15, 30, 33, 48, 51, 13, 16,\n 31, 34, 49, 52, 14, 17, 32, 35, 50, 53, 54,\n 57, 72, 75, 90, 93, 55, 58, 73, 76, 91, 94,\n 56, 59, 74, 77, 92, 95, 60, 63, 78, 81, 96,\n 99, 61, 64, 79, 82, 97, 100, 62, 65, 80, 83,\n 98, 101, 66, 69, 84, 87, 102, 105, 67, 70, 85,\n 88, 103, 106, 68, 71, 86, 89, 104, 107],\n dtype=np.float32).reshape(2, 27, 1, 2)\n gb.output(gb.SpaceToDepth(\n inputs=['input_middle'], blocksize=3, outputs=['output_middle']),\n output_middle)\n\n gb.gen_test()\n\n\ndef gen_imagescaler_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n test_data = np.ones([2, 3, 4, 5], dtype=np.float32)\n gb.input('input', test_data)\n scale = 2.0\n bias = [1., 2., 3.]\n expected = test_data * scale + np.array(\n bias, dtype=np.float32)[None, :, None, None]\n\n gb.output(gb.ImageScaler(\n inputs=['input'], scale=scale, bias=bias, outputs=['output']),\n expected)\n\n gb.gen_test()\n\n\ndef gen_pad_negative_width_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n v = aranges(2, 5, 6, 7)\n i_v = gb.input('input', v)\n gb.output(gb.Pad([i_v], pads=[0, -2, -1, -2, 0, -2, -2, -1]),\n v[:, 2:-2, 1:-2, 2:-1])\n gb.gen_test()\n\n\ndef gen_pad_batch_size_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n v = aranges(2, 5, 6, 7)\n i_v = gb.input('input', v)\n o = np.pad(v, ((0, 6), (0, 0), (0, 0), (0, 0)), 'constant')\n gb.output(gb.ChainerPadBatchSize([i_v], size=8), o)\n gb.gen_test()\n\n\ndef gen_const_int_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n c = np.array(list(range(20)))\n c_v = gb.const(c)\n gb.output(gb.Identity([c_v]), c)\n gb.gen_test()\n\n\ndef gen_const_str_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = 42\n i_v = gb.input('input', i)\n c = np.array(\"hoge\", dtype=np.object)\n c_v = gb.const(c)\n gb.ChainerPrint([c_v, i_v], [])\n gb.output(gb.Identity([i_v]), i)\n gb.gen_test()\n\n\ndef gen_const_prop_use_twice_test(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n c = np.array(list(range(20)))\n c_v = gb.const(c)\n gb.output(gb.Add([c_v, c_v]), c * 2)\n gb.gen_test()\n\n\ndef gen_abs_test(dtype):\n def fn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n i = np.array([42, -24], dtype=dtype)\n i_v = gb.input('input', i)\n gb.output(gb.Abs([i_v]), np.abs(i))\n gb.gen_test()\n return fn\n\n\ndef gen_convtranspose_bn(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n bsize = 2\n ichan = 3\n ochan = 4\n ksize = 3\n isize = 7\n\n x = aranges(bsize, ochan, isize, isize)\n w = aranges(ochan, ichan, ksize, ksize) * 0.01\n scale = aranges(ichan) * 0.1 + 1\n bias = aranges(ichan) * 0.1 + 2\n mean = aranges(ichan) * 0.1 + 3\n var = aranges(ichan) * 0.1 + 4\n\n conv = F.deconvolution_2d(x, w, pad=1, outsize=(isize, isize))\n y = F.fixed_batch_normalization(conv, scale, bias, mean, var)\n\n x_v = gb.input('x', x)\n w_v = gb.param('w', w)\n scale_v = gb.param('scale', scale)\n bias_v = gb.param('bias', bias)\n mean_v = gb.param('mean', mean)\n var_v = gb.param('var', var)\n\n conv_v = gb.ConvTranspose([x_v, w_v],\n kernel_shape=[ksize, ksize],\n pads=[1, 1, 1, 1],\n output_shape=[isize, isize])\n y_v = gb.BatchNormalization([conv_v, scale_v, bias_v, mean_v, var_v])\n\n gb.output(y_v, y)\n gb.gen_test()\n\n\ndef gen_unsqueeze_negative_axis(test_name):\n gb = onnx_script.GraphBuilder(test_name)\n\n x = aranges(1, 2, 3, 5)\n y = np.expand_dims(x, axis=-2)\n\n x_v = gb.input('x', x)\n y_v = gb.Unsqueeze([x_v], axes=[-2])\n\n gb.output(y_v, y)\n gb.gen_test()\n\n\nclass TestCase(test_case.TestCase):\n def __init__(self, name, func, **kwargs):\n super(TestCase, self).__init__('out', name, **kwargs)\n self.func = func\n\n\ndef get_tests():\n tests = []\n def test(name, func, diversed=False, **kwargs):\n tests.append(TestCase(name, func, **kwargs))\n if diversed:\n tests.append(TestCase(name + '_diversed', func,\n backend='chxvm_test', **kwargs))\n\n test('extra_test_negative_reshape', gen_negative_reshape_test)\n\n test('extra_test_inf_nan', gen_inf_nan_test, equal_nan=True)\n\n test('extra_test_select_item', gen_select_item_test, diversed=True)\n\n test('extra_test_if_true', gen_if_test(True))\n test('extra_test_if_false', gen_if_test(False))\n test('extra_test_if_with_input_true',\n gen_if_with_input_test(True))\n test('extra_test_if_with_input_false',\n gen_if_with_input_test(False))\n test('extra_test_if_with_external_true',\n gen_if_with_external_test(True))\n test('extra_test_if_with_external_false',\n gen_if_with_external_test(False))\n\n test('extra_test_loop_basic', gen_loop_test())\n test('extra_test_loop_max_trip_count',\n gen_loop_test(max_trip_count=4))\n test('extra_test_loop_no_max_trip_count',\n gen_loop_test(max_trip_count=None))\n test('extra_test_loop_false_cond',\n gen_loop_test(terminal_condition=False))\n test('extra_test_loop_no_cond',\n gen_loop_test(terminal_condition=None))\n test('extra_test_loop_scan_out',\n gen_loop_test(has_scan_outputs=True))\n test('extra_test_loop_zero_max_trip_count',\n gen_loop_test(max_trip_count=0))\n test('extra_test_loop_zero_trip_count',\n gen_loop_test(cond_trip_count=0))\n # TODO(hamaji): Probably, we do not care loops with zero\n # iterations and scan outputs.\n #\n # TestCase('extra_test_loop_zero_max_trip_count_scan',\n # gen_loop_test(max_trip_count=0,\n # has_scan_outputs=True)),\n # TestCase('extra_test_loop_zero_trip_count_scan',\n # gen_loop_test(cond_trip_count=0,\n # has_scan_outputs=True)),\n\n test('extra_test_loop_use_enclosing',\n gen_loop_use_enclosing_test())\n\n test('extra_backprop_test', gen_backprop_test)\n\n test('extra_backprop_test_concat', gen_concat_backprop_test)\n\n test('extra_backprop_test_loop_012',\n gen_loop_backprop_test(0, 1, 2, 1, 5, 1))\n test('extra_backprop_test_loop_000',\n gen_loop_backprop_test(0, 0, 0, 1, 6, 1))\n test('extra_backprop_test_need_stack_loop',\n gen_loop_backprop_need_stack_test())\n\n test('extra_test_scan_sum', gen_scan_sum_test, fail=True)\n\n test('extra_test_sequence', gen_sequence_test)\n test('extra_test_sequence_pad', gen_sequence_pad_test)\n test('extra_test_sequence_split', gen_sequence_split_test)\n test('extra_test_sequence_io', gen_sequence_io_test)\n test('extra_test_sequence_range', gen_sequence_range_test)\n test('extra_test_sequence_pop', gen_sequence_pop_test)\n test('extra_test_sequence_constants', gen_sequence_constants_test)\n test('extra_test_sequence_create', gen_sequence_create_test)\n test('extra_test_sequence_extend', gen_sequence_extend_test)\n test('extra_test_sequence_update', gen_sequence_update_test)\n\n test('extra_test_sentiment_lstm',\n sentiment.gen_rnn_sentiment_test('LSTM'), rtol=0.2)\n test('extra_test_sentiment_bilstm',\n sentiment.gen_rnn_sentiment_test('BiLSTM'),\n rtol=0.5)\n test('extra_test_sentiment_gru',\n sentiment.gen_rnn_sentiment_test('GRU'), rtol=0.4)\n # TODO(hamaji): Investigate why there is a huge error.\n test('extra_test_sentiment_bigru',\n sentiment.gen_rnn_sentiment_test('BiGRU'), rtol=2.5)\n\n test('extra_test_generic_len', gen_generic_len_test)\n test('extra_test_generic_getitem', gen_generic_getitem_test)\n test('extra_test_generic_getslice', gen_generic_getslice_test)\n test('extra_test_generic_add', gen_generic_add_test)\n\n test('extra_test_setitem', gen_setitem_test)\n\n test('extra_test_print', gen_print_test)\n test('extra_test_hello_world', gen_hello_world_test)\n\n test('extra_test_type_coersion', gen_type_coersion_test,\n skip_shape_inference=True)\n test('extra_test_incomplete_transpose',\n gen_incomplete_transpose_test,\n skip_shape_inference=True)\n test('extra_test_maxpool_cover_all', gen_maxpool_cover_all_test,\n skip_shape_inference=True)\n\n test('extra_test_batchnorm_training', gen_batchnorm_training_test(False))\n test('extra_test_batchnorm_training_saved',\n gen_batchnorm_training_test(True))\n\n test('extra_test_spacetodepth', gen_spacetodepth_test)\n\n test('extra_test_imagescaler', gen_imagescaler_test)\n\n test('extra_test_pad_negative_width', gen_pad_negative_width_test)\n\n test('extra_test_pad_batch_size', gen_pad_batch_size_test)\n\n test('extra_test_const_int', gen_const_int_test)\n\n test('extra_test_const_str', gen_const_str_test)\n\n test('extra_test_const_prop_use_twice', gen_const_prop_use_twice_test)\n\n test('extra_test_abs_int8', gen_abs_test(np.int8))\n test('extra_test_abs_int64', gen_abs_test(np.int64))\n test('extra_test_abs_float16', gen_abs_test(np.float16))\n\n test('extra_test_convtranspose_bn', gen_convtranspose_bn)\n\n # TODO(hamaji): ONNX's shape inference for Unsqueeze is probably broken.\n test('extra_test_unsqueeze_negative_axis', gen_unsqueeze_negative_axis,\n skip_shape_inference=True)\n\n tests += gen_chainercv_op_tests.get_tests()\n\n return tests\n\n\ndef main():\n for test in get_tests():\n test.func(test.name)\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.split",
"numpy.expand_dims",
"numpy.random.random",
"numpy.pad",
"numpy.sqrt",
"numpy.abs",
"numpy.arange",
"numpy.squeeze",
"numpy.stack",
"numpy.ones",
"numpy.concatenate",
"numpy.prod",
"numpy.transpose",
"numpy.array",
"numpy.sum",
"numpy.testing.assert_allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tyuzu/opencv_snippets | [
"f6690baa72a3119b9545f5c031c523dccddf1281"
] | [
"corners.py"
] | [
"import numpy as np\nimport cv2\n\nimg = cv2.imread('k.jpg')\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\ngray = np.float32(gray)\n\ncorners = cv2.goodFeaturesToTrack(gray, 100, 0.01, 10)\ncorners = np.int0(corners)\n\nfor corner in corners:\n x,y = corner.ravel()\n cv2.circle(img,(x,y),3,255,-1)\n \ncv2.imshow('Corner',img)\n"
] | [
[
"numpy.int0",
"numpy.float32"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adamnovak/biopython | [
"92772dd6add33e0b87ab593841f924f0f6f16090"
] | [
"Bio/Affy/CelFile.py"
] | [
"# Copyright 2004 by Harry Zuzan. All rights reserved.\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\n\"\"\"\nClasses for accessing the information in Affymetrix cel files.\n\nFunctions:\nread Read a cel file and store its contents in a Record\n\nClasses:\nRecord Contains the information from a cel file\n\"\"\"\n\n#We use print in the doctests\nfrom __future__ import print_function\n\ntry:\n import numpy\nexcept ImportError:\n from Bio import MissingPythonDependencyError\n raise MissingPythonDependencyError(\n \"Install NumPy if you want to use Bio.Affy.CelFile\")\n\n\nclass Record(object):\n \"\"\"Stores the information in a cel file\n\n Example usage:\n\n >>> from Bio.Affy import CelFile\n >>> with open('Affy/affy_v3_example.CEL') as handle:\n ... c = CelFile.read(handle)\n ...\n >>> print(c.ncols, c.nrows)\n 5 5\n >>> print(c.intensities)\n [[ 234. 170. 22177. 164. 22104.]\n [ 188. 188. 21871. 168. 21883.]\n [ 188. 193. 21455. 198. 21300.]\n [ 188. 182. 21438. 188. 20945.]\n [ 193. 20370. 174. 20605. 168.]]\n >>> print(c.stdevs)\n [[ 24. 34.5 2669. 19.7 3661.2]\n [ 29.8 29.8 2795.9 67.9 2792.4]\n [ 29.8 88.7 2976.5 62. 2914.5]\n [ 29.8 76.2 2759.5 49.2 2762. ]\n [ 38.8 2611.8 26.6 2810.7 24.1]]\n >>> print(c.npix)\n [[25 25 25 25 25]\n [25 25 25 25 25]\n [25 25 25 25 25]\n [25 25 25 25 25]\n [25 25 25 25 25]]\n\n \"\"\"\n def __init__(self):\n self.version = None\n self.GridCornerUL = None\n self.GridCornerUR = None\n self.GridCornerLR = None\n self.GridCornerLL = None\n self.DatHeader = None\n self.Algorithm = None\n self.AlgorithmParameters = None\n self.NumberCells = None\n self.intensities = None\n self.stdevs = None\n self.npix = None\n self.nrows = None\n self.ncols = None\n self.nmask = None\n self.mask = None\n self.noutliers = None\n self.outliers = None\n self.modified = None\n\ndef read(handle):\n \"\"\"\n Read the information in a cel file, and store it in a Record.\n \"\"\"\n # Needs error handling.\n # Needs to know the chip design.\n record = Record()\n section = \"\"\n for line in handle:\n if not line.strip():\n continue\n # Set current section\n if line[:5] == \"[CEL]\":\n section = \"CEL\"\n elif line[:8] == \"[HEADER]\":\n section = \"HEADER\"\n elif line[:11] == \"[INTENSITY]\":\n section = \"INTENSITY\"\n record.intensities = numpy.zeros((record.nrows, record.ncols))\n record.stdevs = numpy.zeros((record.nrows, record.ncols))\n record.npix = numpy.zeros((record.nrows, record.ncols), int)\n elif line[:7] == \"[MASKS]\":\n section = \"MASKS\"\n record.mask = numpy.zeros((record.nrows, record.ncols))\n elif line[:10] == \"[OUTLIERS]\":\n section = \"OUTLIERS\"\n record.outliers = numpy.zeros((record.nrows, record.ncols))\n elif line[:10] == \"[MODIFIED]\":\n section = \"MODIFIED\"\n record.modified = numpy.zeros((record.nrows, record.ncols))\n elif line[0] == \"[\":\n # This would be an unknown section\n section = \"\"\n elif section == \"CEL\":\n keyword, value = line.split(\"=\", 1)\n if keyword == 'Version':\n record.version = int(value)\n elif section == \"HEADER\":\n # Set record.ncols and record.nrows, remaining data goes into\n # record.header dict\n keyword, value = line.split(\"=\", 1)\n if keyword == \"Cols\":\n record.ncols = int(value)\n elif keyword == \"Rows\":\n record.nrows = int(value)\n elif keyword == 'GridCornerUL':\n x, y = value.split()\n record.GridCornerUL = (int(x), int(y))\n elif keyword == 'GridCornerUR':\n x, y = value.split()\n record.GridCornerUR = (int(x), int(y))\n elif keyword == 'GridCornerLR':\n x, y = value.split()\n record.GridCornerLR = (int(x), int(y))\n elif keyword == 'GridCornerLL':\n x, y = value.split()\n record.GridCornerLL = (int(x), int(y))\n elif keyword == 'DatHeader':\n record.DatHeader = value.strip('\\n\\r')\n elif keyword == 'Algorithm':\n record.Algorithm = value.strip('\\n\\r')\n elif keyword == 'AlgorithmParameters':\n record.AlgorithmParameters = value.strip('\\n\\r')\n elif section == \"INTENSITY\":\n if \"NumberCells\" in line:\n record.NumberCells = int(line.split(\"=\", 1)[1])\n elif \"CellHeader\" in line:\n pass\n else:\n words = line.split()\n y = int(words[0])\n x = int(words[1])\n record.intensities[x, y] = float(words[2])\n record.stdevs[x, y] = float(words[3])\n record.npix[x, y] = int(words[4])\n elif section == \"MASKS\":\n if \"NumberCells\" in line:\n record.nmask = int(line.split(\"=\", 1)[1])\n elif \"CellHeader\" in line:\n pass\n else:\n words = line.split()\n y = int(words[0])\n x = int(words[1])\n record.mask[x, y] = int(1)\n elif section == \"OUTLIERS\":\n if \"NumberCells\" in line:\n record.noutliers = int(line.split(\"=\", 1)[1])\n elif \"CellHeader\" in line:\n pass\n else:\n words = line.split()\n y = int(words[0])\n x = int(words[1])\n record.outliers[x, y] = int(1)\n elif section == \"MODIFIED\":\n if \"NumberCells\" in line:\n record.nmodified= int(line.split(\"=\", 1)[1])\n elif \"CellHeader\" in line:\n pass\n else:\n words = line.split()\n y = int(words[0])\n x = int(words[1])\n record.modified[x, y] = float(words[2])\n else:\n continue\n return record\n\nif __name__ == \"__main__\":\n from Bio._utils import run_doctest\n run_doctest()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hamdi-haddad/pyEIT | [
"30c47839c537dbdfb65f2b70daa68f4cc8e13d9a"
] | [
"examples/eit_dynamic_greit.py"
] | [
"# coding: utf-8\n\"\"\" demo using GREIT \"\"\"\n# Copyright (c) Benyuan Liu. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pyeit.mesh as mesh\nfrom pyeit.eit.fem import Forward\nfrom pyeit.eit.utils import eit_scan_lines\nimport pyeit.eit.greit as greit\n\n\"\"\" 0. construct mesh \"\"\"\nmesh_obj, el_pos = mesh.create(16, h0=0.1)\n\n# extract node, element, alpha\npts = mesh_obj[\"node\"]\ntri = mesh_obj[\"element\"]\n\n\"\"\" 1. problem setup \"\"\"\n# this step is not needed, actually\n# mesh_0 = mesh.set_perm(mesh_obj, background=1.0)\n\n# test function for altering the 'permittivity' in mesh\nanomaly = [\n {\"x\": 0.4, \"y\": 0, \"d\": 0.1, \"perm\": 10},\n {\"x\": -0.4, \"y\": 0, \"d\": 0.1, \"perm\": 10},\n {\"x\": 0, \"y\": 0.5, \"d\": 0.1, \"perm\": 0.1},\n {\"x\": 0, \"y\": -0.5, \"d\": 0.1, \"perm\": 0.1},\n]\nmesh_new = mesh.set_perm(mesh_obj, anomaly=anomaly, background=1.0)\ndelta_perm = np.real(mesh_new[\"perm\"] - mesh_obj[\"perm\"])\n\n# show alpha\nfig, axes = plt.subplots(2, 1, constrained_layout=True)\nfig.set_size_inches(6, 4)\n\nax = axes[0]\nim = ax.tripcolor(\n pts[:, 0], pts[:, 1], tri, delta_perm, shading=\"flat\", cmap=plt.cm.viridis\n)\nax.axis(\"equal\")\nax.set_xlim([-1.2, 1.2])\nax.set_ylim([-1.2, 1.2])\nax.set_title(r\"$\\Delta$ Conductivity\")\n# fig.set_size_inches(6, 4)\n\n\"\"\" 2. FEM forward simulations \"\"\"\n# setup EIT scan conditions\nel_dist, step = 1, 1\nex_mat = eit_scan_lines(16, el_dist)\n\n# calculate simulated data\nfwd = Forward(mesh_obj, el_pos)\nf0 = fwd.solve_eit(ex_mat, step=step, perm=mesh_obj[\"perm\"])\nf1 = fwd.solve_eit(ex_mat, step=step, perm=mesh_new[\"perm\"])\n\n\"\"\" 3. Construct using GREIT \"\"\"\neit = greit.GREIT(mesh_obj, el_pos, ex_mat=ex_mat, step=step, parser=\"std\")\neit.setup(p=0.50, lamb=0.001)\nds = eit.solve(f1.v, f0.v)\nx, y, ds = eit.mask_value(ds, mask_value=np.NAN)\n\n# plot\n\"\"\"\nimshow will automatically set NaN (bad values) to 'w',\nif you want to manually do so\n\nimport matplotlib.cm as cm\ncmap = cm.gray\ncmap.set_bad('w', 1.)\nplt.imshow(np.real(ds), interpolation='nearest', cmap=cmap)\n\"\"\"\nax = axes[1]\nim = ax.imshow(np.real(ds), interpolation=\"none\", cmap=plt.cm.viridis)\nax.axis(\"equal\")\n\nfig.colorbar(im, ax=axes.ravel().tolist())\n# fig.savefig('../doc/images/demo_greit.png', dpi=96)\nplt.show()\n"
] | [
[
"numpy.real",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
krrish94/learn_tensorflow | [
"b5725bfbd09911e7c7342ab76eea07e294d5573c"
] | [
"lstm_repetition_detection_classifier.py"
] | [
"# Tutorial from: https://jasdeep06.github.io/posts/Understanding-LSTM-in-Tensorflow-MNIST/\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\n\n# Seed RNG\nrng_seed = 12345\nnp.random.seed(rng_seed)\ntf.set_random_seed(rng_seed)\n\n# Declare constants\n\n# Dataset generation params\nnum_tokens = 10\ndataset_size = 400000\n\n# LSTM is unrolled through 10 time steps\nseq_len = 10\n# Number of hidden LSTM units\nnum_units = 10\n# Size of each input\nn_input = 1\n# Learning rate\nlearning_rate = 0.01\n# Beta (for ADAM)\nbeta = 0.9\n# Momentum\nmomentum = 0.099\n# Number of classes\nn_classes = seq_len + 1\n# Batch size\nbatch_size = 500\n# Train/Test split\ntrain_split = 0.8\n# Number of epochs\nnum_epochs = 1000\n# Flag to check if loss has been stepped down\nstepFlag = False\n# Class weights (for 0->no-repetition vs 1->repetition)\nclass_weights = tf.constant([0.1, 0.9])\n# class_weights = np.ones((batch_size * seq_len,n_classes))\n# class_weights[:,0] = 0.1*class_weights[:,0]\n# class_weights[:,1] = 0.1*class_weights[:,1]\n# class_weights_tensor = tf.constant(class_weights, dtype = tf.float32)\n\n# More variable definitions\nnum_iters = int(np.floor(dataset_size / batch_size))\ntrain_iters = int(train_split * num_iters)\ntest_iters = num_iters - train_iters\nnum_train = int(np.floor(train_split * dataset_size))\nnum_test = dataset_size - num_train\n\n# Verbosity controls\nprint_experiment_summary = True\nif print_experiment_summary:\n\tprint('Total number of samples:', dataset_size)\n\tprint('Train samples:', num_train)\n\tprint('Test samples:', num_test)\n\tprint('Batch size:', batch_size)\n\tprint('Train batches:', train_iters)\n\tprint('Test batches:', test_iters)\n\tprint('Max epochs:', num_epochs)\nprint_train_every = 1000\nprint_test_every = 100\n\n\n# Synthesize data\n\ndata = np.zeros((dataset_size, seq_len))\nlabel = np.zeros((dataset_size, n_classes))\nprint(label.shape)\nfor i in range(dataset_size):\n\t# Generate a random permutation of all tokens. \n\t# Throw in a random translation of all tokens.\n\ttmp = np.random.permutation(num_tokens) #+ np.random.randint(50)\n\tcoin_filp = np.random.randint(0,2)\n\tif coin_filp == 0 or coin_filp == 1:\n\t\t# Add a repetition\n\t\t# Index of number to repeat\n\t\ttmpIdx_src = np.random.randint(len(tmp))\n\t\t# Where to repeat that number\n\t\ttmpIdx_dst = np.random.randint(len(tmp))\n\t\twhile tmpIdx_dst == tmpIdx_src:\n\t\t\ttmpIdx_dst = np.random.randint(len(tmp))\n\t\ttmp[tmpIdx_dst] = tmp[tmpIdx_src]\n\t\t# label[i,tmpIdx_src,1] = 1.0\n\t\tif tmpIdx_src > tmpIdx_dst:\n\t\t\ttmpvar = tmpIdx_dst\n\t\t\ttmpIdx_dst = tmpIdx_src\n\t\t\ttmpIdx_src = tmpvar\n\t\tlabel[i,tmpIdx_dst+1] = 1.0\n\telse:\n\t\tlabel[i,0] = 1.0\n\tdata[i,:] = tmp\n\t# print(data[i,:])\n\t# print(label[i,:])\n\n\n# Define placeholders\n\n# Outputs\nout_weights = tf.Variable(tf.random_normal([num_units, n_classes]))\n# out_bias = tf.Variable(tf.random_normal([n_classes]))\nout_bias = tf.Variable(tf.constant(0.0, shape = [n_classes], dtype = tf.float32))\n\n# Inputs\nx = tf.placeholder(\"float\", [None, seq_len, n_input])\ny = tf.placeholder(\"float\", [None, n_classes])\n\n# Reshape x from shape [batch_size, seq_len, n_input] to\n# 'seq_len' number of [batch_size, n_input] tensors\ninput = tf.unstack(x, seq_len, axis = 1)\n\n# Define network\nlstm_layer = rnn.GRUCell(num_units)\noutputs, _ = rnn.static_rnn(lstm_layer, input, dtype = tf.float32)\nprediction = tf.matmul(outputs[-1], out_weights) + out_bias\n\n# Loss function\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = prediction, labels = y))\nalpha = 0.0000001\nregularizer = tf.nn.l2_loss(out_weights)\nloss = tf.reduce_mean(loss + alpha * regularizer)\n# Optimization\n# opt = tf.train.AdamOptimizer(learning_rate = learning_rate, beta1 = beta).minimize(loss)\nopt = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = momentum).minimize(loss)\n\n# Accuracy Computation\nmistakes = tf.not_equal(tf.argmax(y, axis = 1), tf.argmax(prediction, axis = 1))\nerror = tf.reduce_mean(tf.cast(mistakes, tf.float32))\n\n# Init variables\ninit = tf.global_variables_initializer()\n\n# Run session\nwith tf.Session() as sess:\n\t\n\tsess.run(init)\n\tepoch = 0\n\t\n\twhile epoch < num_epochs:\n\n\t\t# if epoch == 50 or epoch == 100 or epoch == 150 or epoch == 250 or epoch == 300 or epoch == 500:\n\t\t# \tlearning_rate = 0.1 * learning_rate\n\t\t# \tif epoch == 150:\n\t\t# \t\tbeta1 = 0.7\n\t\tif epoch % 50 == 0 and epoch < 500:\n\t\t\tlearning_rate = 0.1 * learning_rate\n\t\t\tif epoch == 150:\n\t\t\t\tbeta1 = 0.7\n\t\t\tif epoch == 450:\n\t\t\t\tbeta1 = 0.6\n\n\t\tshuffledOrder = np.random.permutation(dataset_size)\n\n\t\t# if epoch > 7 and not stepFlag:\n\t\t# \tlearning_rate = learning_rate / 10\n\t\t# \tstepFlag = True\n\n\t\titer = 0\n\t\ttrain_error_this_epoch = 0.0\n\t\ttrain_error_temp = 0.0\n\t\twhile iter < train_iters:\n\n\t\t\tcurIterInds = shuffledOrder[iter*batch_size:(iter+1)*batch_size]\n\t\t\tbatch_x = data[curIterInds,:]\n\t\t\tbatch_x = np.expand_dims(batch_x, -1)\n\t\t\tbatch_y = label[curIterInds,:]\n\n\t\t\tnet_out = sess.run([loss, opt, prediction, error], feed_dict = {x: batch_x, y: batch_y})\n\n\t\t\ttrain_error_this_epoch += net_out[3]\n\t\t\ttrain_error_temp += net_out[3]\n\t\t\tif iter % print_train_every == 0:\n\t\t\t\tprint('Epoch: ', epoch, 'Iter', iter, 'Loss:', net_out[0])\n\t\t\t\ttrain_error_temp = 0.0\n\t\t\t\n\t\t\titer += 1\n\n\t\ttest_error_this_epoch = 0.0\n\t\ttest_error_temp = 0.0\n\t\ttmp_counter = 0\n\t\twhile iter < train_iters + test_iters:\n\t\t\t# batch_x, batch_y = mnist.train.next_batch(batch_size = batch_size)\n\t\t\t# batch_x = batch_x.reshape((batch_size, seq_len, n_input))\n\t\t\tstartIdx = iter*batch_size\n\t\t\tendIdx = (iter+1)*batch_size\n\t\t\tbatch_x = data[startIdx:endIdx,:]\n\t\t\tbatch_x = np.expand_dims(batch_x, -1)\n\t\t\tbatch_y = label[startIdx:endIdx,:]\n\t\t\tnet_out = sess.run([loss, prediction, error], feed_dict = {x: batch_x, y: batch_y})\n\n\t\t\ttest_error_this_epoch += net_out[2]\n\t\t\ttmp_counter += 1\n\t\t\ttest_error_temp += net_out[2]\n\t\t\tif iter % print_test_every == 0:\n\t\t\t\tprint('Epoch: ', epoch, 'Test Err:', net_out[2])\n\t\t\t\ttest_error_temp = 0.0\n\t\t\t\trandom_disp = np.random.randint(batch_size)\n\t\t\t\tprint(np.squeeze(batch_x[random_disp]))\n\t\t\t\tprint('Pred:', np.argmax(net_out[1][random_disp]), \\\n\t\t\t\t\t'GT:', np.argmax(batch_y[random_disp]))\n\t\t\t\n\t\t\titer += 1\n\t\tprint('#######################')\n\t\tprint('Error:', test_error_this_epoch / float(tmp_counter))\n\t\tprint('#######################')\n\n\t\tepoch += 1\n"
] | [
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.expand_dims",
"tensorflow.contrib.rnn.GRUCell",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.nn.l2_loss",
"numpy.random.randint",
"tensorflow.train.MomentumOptimizer",
"numpy.argmax",
"tensorflow.Session",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.contrib.rnn.static_rnn",
"numpy.floor",
"tensorflow.set_random_seed",
"tensorflow.constant",
"numpy.random.seed",
"tensorflow.reduce_mean",
"numpy.random.permutation",
"tensorflow.random_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
chellabeatrixkiddo/keras-frcnn | [
"0b09f279f32143e084e2b884076ee51d9daac55d"
] | [
"test_frcnn.py"
] | [
"from __future__ import division\nimport os\nimport cv2\nimport numpy as np\nimport sys\nimport pickle\nfrom optparse import OptionParser\nimport time\nfrom keras_frcnn import config\nfrom keras import backend as K\nfrom keras.layers import Input\nfrom keras.models import Model\nfrom keras_frcnn import roi_helpers\nimport pandas as pd\n\nsys.setrecursionlimit(40000)\n\nparser = OptionParser()\n\nparser.add_option(\"-p\", \"--path\", dest=\"test_path\", help=\"Path to test data.\")\nparser.add_option(\"-n\", \"--num_rois\", type=\"int\", dest=\"num_rois\",\n\t\t\t\thelp=\"Number of ROIs per iteration. Higher means more memory use.\", default=32)\nparser.add_option(\"--config_filename\", dest=\"config_filename\", help=\n\t\t\t\t\"Location to read the metadata related to the training (generated when training).\",\n\t\t\t\tdefault=\"config.pickle\")\nparser.add_option(\"--network\", dest=\"network\", help=\"Base network to use. Supports vgg or resnet50.\", default='resnet50')\n\n(options, args) = parser.parse_args()\n\nif not options.test_path: # if filename is not given\n\tparser.error('Error: path to test data must be specified. Pass --path to command line')\n\n\nresult_df = pd.DataFrame(columns=['image_name', 'x1', 'y1', 'x2', 'y2'])\n\nconfig_output_filename = options.config_filename\n\nwith open(config_output_filename, 'rb') as f_in:\n\tC = pickle.load(f_in)\n\nif C.network == 'resnet50':\n\timport keras_frcnn.resnet as nn\nelif C.network == 'vgg':\n\timport keras_frcnn.vgg as nn\n\n# turn off any data augmentation at test time\nC.use_horizontal_flips = False\nC.use_vertical_flips = False\nC.rot_90 = False\n\nimg_path = options.test_path\n\ndef format_img_size(img, C):\n\t\"\"\" formats the image size based on config \"\"\"\n\timg_min_side = float(C.im_size)\n\t(height,width,_) = img.shape\n\t\t\n\tif width <= height:\n\t\tratio = img_min_side/width\n\t\tnew_height = int(ratio * height)\n\t\tnew_width = int(img_min_side)\n\telse:\n\t\tratio = img_min_side/height\n\t\tnew_width = int(ratio * width)\n\t\tnew_height = int(img_min_side)\n\timg = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC)\n\treturn img, ratio\t\n\ndef format_img_channels(img, C):\n\t\"\"\" formats the image channels based on config \"\"\"\n\timg = img[:, :, (2, 1, 0)]\n\timg = img.astype(np.float32)\n\timg[:, :, 0] -= C.img_channel_mean[0]\n\timg[:, :, 1] -= C.img_channel_mean[1]\n\timg[:, :, 2] -= C.img_channel_mean[2]\n\timg /= C.img_scaling_factor\n\timg = np.transpose(img, (2, 0, 1))\n\timg = np.expand_dims(img, axis=0)\n\treturn img\n\ndef format_img(img, C):\n\t\"\"\" formats an image for model prediction based on config \"\"\"\n\timg, ratio = format_img_size(img, C)\n\timg = format_img_channels(img, C)\n\treturn img, ratio\n\n# Method to transform the coordinates of the bounding box to its original size\ndef get_real_coordinates(ratio, x1, y1, x2, y2):\n\n\treal_x1 = int(round(x1 // ratio))\n\treal_y1 = int(round(y1 // ratio))\n\treal_x2 = int(round(x2 // ratio))\n\treal_y2 = int(round(y2 // ratio))\n\n\treturn (real_x1, real_y1, real_x2 ,real_y2)\n\nclass_mapping = C.class_mapping\n\nif 'bg' not in class_mapping:\n\tclass_mapping['bg'] = len(class_mapping)\n\nclass_mapping = {v: k for k, v in class_mapping.items()}\nprint(class_mapping)\nclass_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}\nC.num_rois = int(options.num_rois)\n\nif C.network == 'resnet50':\n\tnum_features = 1024\nelif C.network == 'vgg':\n\tnum_features = 512\n\nif K.image_data_format() == 'channels_first':\n\tinput_shape_img = (3, None, None)\n\tinput_shape_features = (num_features, None, None)\nelse:\n\tinput_shape_img = (None, None, 3)\n\tinput_shape_features = (None, None, num_features)\n\n\nimg_input = Input(shape=input_shape_img)\nroi_input = Input(shape=(C.num_rois, 4))\nfeature_map_input = Input(shape=input_shape_features)\n\n# define the base network (resnet here, can be VGG, Inception, etc)\nshared_layers = nn.nn_base(img_input, trainable=True)\n\n# define the RPN, built on the base layers\nnum_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)\nrpn_layers = nn.rpn(shared_layers, num_anchors)\n\nclassifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True)\n\nmodel_rpn = Model(img_input, rpn_layers)\nmodel_classifier_only = Model([feature_map_input, roi_input], classifier)\n\nmodel_classifier = Model([feature_map_input, roi_input], classifier)\n\nprint('Loading weights from {}'.format(C.model_path))\nmodel_rpn.load_weights(C.model_path, by_name=True)\nmodel_classifier.load_weights(C.model_path, by_name=True)\n\nmodel_rpn.compile(optimizer='sgd', loss='mse')\nmodel_classifier.compile(optimizer='sgd', loss='mse')\n\nall_imgs = []\n\nclasses = {}\n\nbbox_threshold = 0.8\n\nvisualise = True\nimg_id = 1\nfor idx, img_name in enumerate(sorted(os.listdir(img_path))):\n\tif not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):\n\t\tcontinue\n\tprint(img_name)\n\tst = time.time()\n\tfilepath = os.path.join(img_path,img_name)\n\n\timg = cv2.imread(filepath)\n\n\tX, ratio = format_img(img, C)\n\n\tif K.image_data_format() == 'channels_last':\n\t\tX = np.transpose(X, (0, 2, 3, 1))\n\n\t# get the feature maps and output from the RPN\n\t[Y1, Y2, F] = model_rpn.predict(X)\n\t\n\n\tR = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_data_format(), overlap_thresh=0.7)\n\n\t# convert from (x1,y1,x2,y2) to (x,y,w,h)\n\tR[:, 2] -= R[:, 0]\n\tR[:, 3] -= R[:, 1]\n\n\t# apply the spatial pyramid pooling to the proposed regions\n\tbboxes = {}\n\tprobs = {}\n\n\tfor jk in range(R.shape[0]//C.num_rois + 1):\n\t\tROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0)\n\t\tif ROIs.shape[1] == 0:\n\t\t\tbreak\n\n\t\tif jk == R.shape[0]//C.num_rois:\n\t\t\t#pad R\n\t\t\tcurr_shape = ROIs.shape\n\t\t\ttarget_shape = (curr_shape[0],C.num_rois,curr_shape[2])\n\t\t\tROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)\n\t\t\tROIs_padded[:, :curr_shape[1], :] = ROIs\n\t\t\tROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]\n\t\t\tROIs = ROIs_padded\n\n\t\t[P_cls, P_regr] = model_classifier_only.predict([F, ROIs])\n\n\t\tfor ii in range(P_cls.shape[1]):\n\n\t\t\tif np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):\n\t\t\t\t#print(\"inside if \")\n\t\t\t\tcontinue\n\n\t\t\tcls_name = class_mapping[np.argmax(P_cls[0, ii, :])]\n\n\t\t\tif cls_name not in bboxes:\n\t\t\t\tbboxes[cls_name] = []\n\t\t\t\tprobs[cls_name] = []\n\n\t\t\t(x, y, w, h) = ROIs[0, ii, :]\n\n\t\t\tcls_num = np.argmax(P_cls[0, ii, :])\n\t\t\ttry:\n\t\t\t\t(tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)]\n\t\t\t\ttx /= C.classifier_regr_std[0]\n\t\t\t\tty /= C.classifier_regr_std[1]\n\t\t\t\ttw /= C.classifier_regr_std[2]\n\t\t\t\tth /= C.classifier_regr_std[3]\n\t\t\t\tx, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)\n\t\t\t\t#print(\"x, y, w, h:\", x,y,w,h)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tbboxes[cls_name].append([C.rpn_stride*x, C.rpn_stride*y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)])\n\t\t\tprobs[cls_name].append(np.max(P_cls[0, ii, :]))\n\n\tall_dets = []\n\n\tfor key in bboxes:\n\t\tbbox = np.array(bboxes[key])\n\t\t\n\t\tnew_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5)\n\t\tfor jk in range(new_boxes.shape[0]):\n\t\t\t(x1, y1, x2, y2) = new_boxes[jk,:]\n\n\t\t\t(real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)\n\t\t\t\n\t\t\td = {\"image_name\":img_name, \"x1\":real_x1, \"y1\":real_y1, \"x2\":real_x2, \"y2\":real_y2}\n\t\t\tresult_df = result_df.append(pd.Series(d), ignore_index=True)\n\t\t\t\n\t\t\t#print(\"image_name:\", img_name, \"x1:\", real_x1, \"y1:\", real_y1, \"x2:\", real_x2, \"y2:\", real_y2)\n\t\t\t\n\t\t\tcv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2)\n\n\t\t\ttextLabel = '{}: {}'.format(key,int(100*new_probs[jk]))\n\t\t\tall_dets.append((key,100*new_probs[jk]))\n\n\t\t\t(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1)\n\t\t\ttextOrg = (real_x1, real_y1-0)\n\n\t\t\tcv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2)\n\t\t\tcv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1)\n\t\t\tcv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1)\n\n\tprint('Elapsed time = {}'.format(time.time() - st))\n\tprint(all_dets)\n\t#cv2.imshow('img', img)\n\t#cv2.waitKey(0)\n\tcv2.imwrite('./results_imgs/{}.png'.format(idx),img)\n\tif(img_id >= 10):\n\t\tbreak\n\timg_id = img_id + 1\n\nresult_df.to_csv(\"./result_annos.csv\", index=False)\n"
] | [
[
"numpy.expand_dims",
"pandas.Series",
"pandas.DataFrame",
"numpy.max",
"numpy.argmax",
"numpy.transpose",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
SensorUp/LDAR_Sim | [
"e341998257eb3c74497c950935c02bb81c01eb32"
] | [
"LDAR_Sim/src/initialization/campaigns.py"
] | [
"\nfrom math import floor\nfrom numpy import zeros\n# --- initialize Campaigns ---\n\n\ndef _add_method_campaign(campaign_s_t, d_per_campaign, timesteps, n_sites, m_name):\n n_campaigns = floor(timesteps/d_per_campaign)\n return campaign_s_t.update(\n {\n m_name: {\n 'current_campaign': 0,\n 'n_campaigns': n_campaigns,\n 'ts_start': [c*d_per_campaign for c in range(0, n_campaigns)],\n 'n_sites': n_sites,\n 'n_sites_screened': zeros(n_campaigns, dtype=int),\n 'n_sites_surveyed': zeros(n_campaigns, dtype=int),\n 'n_flags': zeros(n_campaigns, dtype=int),\n 'n_tags': zeros(n_campaigns, dtype=int),\n 'n_repairs': zeros(n_campaigns, dtype=int)\n }\n })\n\n\ndef init_campaigns(n_subtype_rs, sites_per_subtype, timesteps):\n \"\"\" Initialize campaigns by going through all site subtypes, and all\n methods, and setting up campaign periods for each. IF subtypes have\n one or more scheduled method with a single rs value, all unscheduled\n methods ie. OGI_FU and natural will have the same schedule.\n\n Args:\n n_subtype_rs (dict): dict of dicts where key is subtype and val is a dict with\n key as method and val of the RS associated with the method/subtype.\n If no single RS value for subtype set to None.\n If method is un-scheduled (OGI-FU or Natural) set to -1.\n sites_per_subtype (dict): dict where key is subtype and val is number of sites in\n in subtype.\n timesteps (integer): [description]\n\n Returns:\n dict: campaign object. Where campaign['subtype_code]['method'] = {\n 'current_campaign': 0,\n 'n_campaigns': n_campaigns,\n 'ts_start': [c*d_per_campaign for c in range(0, n_campaigns)],\n 'n_sites': n_sites,\n 'n_sites_screened': zeros(n_campaigns, dtype=int),\n 'n_sites_surveyed': zeros(n_campaigns, dtype=int),\n 'n_flags': zeros(n_campaigns, dtype=int),\n 'n_tags': zeros(n_campaigns, dtype=int),\n 'n_repairs': zeros(n_campaigns, dtype=int)\n }\n \"\"\"\n campaigns = {}\n for s_t, s_vals in n_subtype_rs.items():\n ref_day_per_campaign = None\n has_one_campaign = True\n campaigns.update({s_t: {}})\n non_sched_meths = []\n for m_idx, m_rs in s_vals.items():\n if m_rs != -1:\n if m_rs is not None and m_rs != 0:\n d_per_campaign = int(floor(365/m_rs))\n if has_one_campaign is True:\n if ref_day_per_campaign is None:\n ref_day_per_campaign = d_per_campaign\n elif ref_day_per_campaign != d_per_campaign:\n has_one_campaign = False\n else:\n # If campaign lengths are not set or vary, set the n of campaigns to annual\n has_one_campaign = False\n d_per_campaign = 365\n _add_method_campaign(campaigns[s_t], d_per_campaign, timesteps,\n sites_per_subtype[s_t], m_idx)\n else:\n non_sched_meths.append(m_idx)\n\n # Add non-scheduled methods\n # if there is a only one RS value for all sites and methods is subtype then\n # set non-scheduled methods to havethe asame survey period\n for m in non_sched_meths:\n if has_one_campaign and ref_day_per_campaign is not None:\n d_per_campaign = ref_day_per_campaign\n else:\n d_per_campaign = 365\n _add_method_campaign(campaigns[s_t], d_per_campaign, timesteps,\n sites_per_subtype[s_t], m)\n\n return campaigns\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TAU-MLwell/Marginal-Contribution-Feature-Importance | [
"956f401d3af0b9da7a607cc30304669f6b723d7c"
] | [
"mci/mci_values.py"
] | [
"import matplotlib.pyplot as plt\nfrom typing import Sequence, Tuple, Optional\nfrom mci.estimators.contribution_tracker import ContributionTracker\n\n\nclass MciValues:\n\n \"\"\"contain MCI values and project relevant plots from them\"\"\"\n\n def __init__(self,\n mci_values: Sequence[float],\n feature_names: Sequence[str],\n contexts: Sequence[Tuple[str, ...]],\n additional_values: Optional[Sequence[Sequence[float]]] = None,\n additional_contexts: Optional[Sequence[Sequence[Tuple[str, ...]]]] = None,\n shapley_values: Optional[Sequence[float]] = None):\n \"\"\"\n :param mci_values: array of MCI values for each feature\n :param feature_names: array of features names (corresponds to the values)\n :param contexts: array of argmax contribution contexts for each feature (corresponds to the values)\n :param additional_values: placeholder for additional MCI values per feature (for non optimal values)\n :param additional_contexts: placeholder for additional MCI contexts per feature (for non optimal values)\n :param shapley_values: shapley values for comparison (optional)\n \"\"\"\n self.mci_values = mci_values\n self.feature_names = feature_names\n self.contexts = contexts\n self.additional_values = additional_values\n self.additional_contexts = additional_contexts\n self.shapley_values = shapley_values\n\n @classmethod\n def create_from_tracker(cls, tracker: ContributionTracker, feature_names: Sequence[str]):\n return cls(mci_values=tracker.max_contributions,\n feature_names=feature_names,\n contexts=tracker.argmax_contexts,\n additional_values=tracker.all_contributions,\n additional_contexts=tracker.all_contexts,\n shapley_values=tracker.avg_contributions)\n\n def plot_values(self, plot_contexts: bool = False, score_name=\"MCI\", file_path: Optional[str] = None):\n \"\"\"Simple bar plot for MCI values per feature name\"\"\"\n score_features = sorted([(score, feature, context) for score, feature, context\n in zip(self.mci_values, self.feature_names, self.contexts)],\n key=lambda x: x[0])\n\n if plot_contexts:\n features = [f\"{f} ({', '.join(context)})\" for score, f, context in score_features]\n else:\n features = [f for score, f, context in score_features]\n plt.barh(y=features, width=[score for score, f, context in score_features])\n plt.title(f\"{score_name} feature importance\")\n plt.xlabel(f\"{score_name} value\")\n plt.ylabel(\"Feature name\")\n\n if file_path:\n plt.savefig(file_path, dpi=300)\n plt.close()\n else:\n plt.show()\n\n def plot_shapley_values(self, file_path: Optional[str] = None):\n score_features = sorted([(score, feature) for score, feature\n in zip(self.shapley_values, self.feature_names)],\n key=lambda x: x[0])\n features = [f for score, f in score_features]\n plt.barh(y=features, width=[score for score, f in score_features])\n plt.title(f\"Shapley feature importance\")\n plt.xlabel(f\"Shapley value\")\n plt.ylabel(\"Feature name\")\n if file_path:\n plt.savefig(file_path, dpi=300)\n plt.close()\n else:\n plt.show()\n\n def results_dict(self) -> dict:\n results = {\n \"feature_names\": self.feature_names,\n \"mci_values\": self.mci_values,\n \"contexts\": self.contexts,\n \"shapley_values\": self.shapley_values\n }\n return results\n"
] | [
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.barh",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sujaynagaraj/probML_project | [
"4b1fe9d18b949b01bc1948599ee2494875c7ab92"
] | [
"plotting_helpers.py"
] | [
"import matplotlib.pyplot as plt\n\ndef to_numpy(x):\n\t\treturn x.detach().cpu().numpy()\n\ndef make_quick_plot(batch_dict, example, extra_str=\"\"):\n\n plot_dict = {x: to_numpy(batch_dict[x]) for x in ('observed_data', 'observed_tp', 'data_to_predict', 'tp_to_predict', 'observed_mask')}\n\n plt.figure()\n plt.scatter(plot_dict[\"observed_tp\"], plot_dict[\"observed_data\"][example, :, 0])\n plt.title(f\"Observed TP vs Observed Data mode: {batch_dict['mode']}\")\n plt.savefig(f\"observed_tp_vs_observed_data_{extra_str}_{batch_dict['mode']}_{example}.png\")\n \n\n plt.figure()\n plt.scatter(plot_dict[\"tp_to_predict\"], plot_dict[\"data_to_predict\"][example, :, 0])\n plt.title(f\"TP to predict vs Data to predict mode: {batch_dict['mode']}\")\n plt.savefig(f\"tp_to_predict_vs_data_to_predict_{extra_str}_{batch_dict['mode']}_{example}.png\")\n\n plt.figure()\n plt.scatter(plot_dict[\"tp_to_predict\"], plot_dict[\"observed_mask\"][example, :])\n plt.title(f\"TP to predict vs Data to predict amsk: {batch_dict['mode']}\")\n plt.savefig(f\"tp_to_predict_vs_data_to_predict__mask_{extra_str}_{batch_dict['mode']}_{example}.png\")\n"
] | [
[
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.