repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
zhiqiangdon/autogluon
[ "71ee7ef0f05d8f0aad112d8c1719174aa33194d9" ]
[ "tabular/tests/unittests/data/test_label_cleaner.py" ]
[ "import numpy as np\nimport pandas as pd\nimport pytest\n\nfrom autogluon.core.constants import BINARY, MULTICLASS, REGRESSION, SOFTCLASS\nfrom autogluon.core.data.label_cleaner import LabelCleaner, LabelCleanerBinary, LabelCleanerMulticlass, LabelCleanerMulticlassToBinary, LabelCleanerDummy\n\n\ndef test_label_cleaner_binary():\n # Given\n problem_type = BINARY\n input_labels_numpy = np.array(['l1', 'l2', 'l2', 'l1', 'l1', 'l2'])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array(['new', 'l1', 'l2'])\n expected_output_labels = pd.Series([0, 1, 1, 0, 0, 1])\n expected_output_labels_pos_class_l1 = pd.Series([1, 0, 0, 1, 1, 0])\n expected_output_labels_new = pd.Series([np.nan, 0, 1])\n expected_output_labels_new_pos_class_l1 = pd.Series([np.nan, 1, 0])\n expected_output_labels_new_inverse = pd.Series([np.nan, 'l1', 'l2'])\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels) # positive_class='l2'\n label_cleaner_pos_class_l1 = LabelCleaner.construct(problem_type=problem_type, y=input_labels, positive_class='l1')\n\n # Raise exception\n with pytest.raises(ValueError):\n LabelCleaner.construct(problem_type=problem_type, y=input_labels, positive_class='unknown_class')\n\n # Raise exception\n with pytest.raises(AssertionError):\n LabelCleaner.construct(problem_type=problem_type, y=input_labels_new)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerBinary)\n assert label_cleaner.problem_type_transform == BINARY\n assert label_cleaner.cat_mappings_dependent_var == {0: 'l1', 1: 'l2'}\n assert label_cleaner_pos_class_l1.cat_mappings_dependent_var == {0: 'l2', 1: 'l1'}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_pos_class_l1 = label_cleaner_pos_class_l1.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n output_labels_new_pos_class_l1 = label_cleaner_pos_class_l1.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_inverse_pos_class_l1 = label_cleaner_pos_class_l1.inverse_transform(output_labels_pos_class_l1)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n output_labels_new_inverse_pos_class_l1 = label_cleaner_pos_class_l1.inverse_transform(output_labels_new_pos_class_l1)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels_pos_class_l1.equals(output_labels_pos_class_l1)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n assert expected_output_labels_new_pos_class_l1.equals(output_labels_new_pos_class_l1)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels.equals(output_labels_inverse_pos_class_l1)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse_pos_class_l1)\n\n\ndef test_label_cleaner_multiclass():\n # Given\n problem_type = MULTICLASS\n input_labels_numpy = np.array([2, 4, 2, 2, 4, 1])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array([3, 5, 2])\n expected_output_labels = pd.Series([1, 2, 1, 1, 2, 0])\n expected_output_labels_new = pd.Series([np.nan, np.nan, 1])\n expected_output_labels_new_inverse = pd.Series([np.nan, np.nan, 2])\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=input_labels)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerMulticlass)\n assert label_cleaner.problem_type_transform == MULTICLASS\n assert label_cleaner.cat_mappings_dependent_var == {0: 1, 1: 2, 2: 4}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n\ndef test_label_cleaner_multiclass_to_binary():\n # Given\n problem_type = MULTICLASS\n input_labels_numpy = np.array(['l1', 'l2', 'l2', 'l1', 'l1', 'l2'])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_uncleaned = pd.Series(['l0', 'l1', 'l2', 'l2', 'l1', 'l1', 'l2', 'l3', 'l4'])\n input_labels_category = input_labels.astype('category')\n input_labels_with_shifted_index = input_labels.copy()\n input_labels_with_shifted_index.index += 5\n input_labels_new = np.array(['l0', 'l1', 'l2'])\n input_labels_proba_transformed = pd.Series([0.7, 0.2, 0.5], index=[5, 2, 8])\n expected_output_labels = pd.Series([0, 1, 1, 0, 0, 1])\n expected_output_labels_new = pd.Series([np.nan, 0, 1])\n expected_output_labels_new_inverse = pd.Series([np.nan, 'l1', 'l2'])\n expected_output_labels_proba_transformed_inverse = pd.DataFrame(\n data=[\n [0, 0.3, 0.7, 0, 0],\n [0, 0.8, 0.2, 0, 0],\n [0, 0.5, 0.5, 0, 0]\n ], index=[5, 2, 8], columns=['l0', 'l1', 'l2', 'l3', 'l4'], dtype=np.float32\n )\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=input_labels_uncleaned)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerMulticlassToBinary)\n assert label_cleaner.problem_type_transform == BINARY\n assert label_cleaner.cat_mappings_dependent_var == {0: 'l1', 1: 'l2'}\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_category = label_cleaner.transform(input_labels_category)\n output_labels_with_shifted_index = label_cleaner.transform(input_labels_with_shifted_index)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_with_shifted_index_inverse = label_cleaner.inverse_transform(output_labels_with_shifted_index)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels.equals(output_labels_category)\n assert not expected_output_labels.equals(output_labels_with_shifted_index)\n output_labels_with_shifted_index.index -= 5\n assert expected_output_labels.equals(output_labels_with_shifted_index)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert input_labels_with_shifted_index.equals(output_labels_with_shifted_index_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n output_labels_proba_transformed_inverse = label_cleaner.inverse_transform_proba(input_labels_proba_transformed, as_pandas=True)\n\n pd.testing.assert_frame_equal(expected_output_labels_proba_transformed_inverse, output_labels_proba_transformed_inverse)\n\n\ndef test_label_cleaner_regression():\n # Given\n problem_type = REGRESSION\n input_labels_numpy = np.array([2, 4, 2, 2, 4, 1])\n input_labels = pd.Series(input_labels_numpy)\n input_labels_new = pd.Series([3, 5, 2])\n expected_output_labels = input_labels.copy()\n expected_output_labels_new = input_labels_new.copy()\n expected_output_labels_new_inverse = input_labels_new.copy()\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=None)\n\n # Then\n assert isinstance(label_cleaner, LabelCleanerDummy)\n assert label_cleaner.problem_type_transform == REGRESSION\n\n output_labels = label_cleaner.transform(input_labels)\n output_labels_with_numpy = label_cleaner.transform(input_labels_numpy)\n output_labels_new = label_cleaner.transform(input_labels_new)\n\n output_labels_inverse = label_cleaner.inverse_transform(output_labels)\n output_labels_new_inverse = label_cleaner.inverse_transform(output_labels_new)\n\n assert expected_output_labels.equals(output_labels)\n assert expected_output_labels.equals(output_labels_with_numpy)\n assert expected_output_labels_new.equals(output_labels_new)\n\n assert input_labels.equals(output_labels_inverse)\n assert expected_output_labels_new_inverse.equals(output_labels_new_inverse)\n\n\ndef test_label_softclass():\n # Given\n problem_type = SOFTCLASS\n input_labels = pd.DataFrame([\n [0, 1, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0],\n [0, 0, 0.3, 0.6, 0.1, 0],\n ])\n\n # When\n label_cleaner = LabelCleaner.construct(problem_type=problem_type, y=input_labels, y_uncleaned=None)\n\n # Then\n assert input_labels.equals(label_cleaner.transform(input_labels))\n assert input_labels.equals(label_cleaner.inverse_transform(input_labels))\n assert label_cleaner.num_classes == 6\n" ]
[ [ "numpy.array", "pandas.Series", "pandas.DataFrame", "pandas.testing.assert_frame_equal" ] ]
koushalkh/ML-LAB
[ "597389b7c93f964361baad8c8e27b2cf196e967c" ]
[ "10-regression.py" ]
[ "import numpy as np\r\nfrom bokeh.plotting import figure, show, output_notebook\r\nfrom bokeh.layouts import gridplot\r\nfrom bokeh.io import push_notebook\r\n\r\n#output_notebook()\r\nimport numpy as np\r\n\r\ndef local_regression(x0, X, Y, tau):\r\n # add bias term\r\n x0 = np.r_[1, x0] # Add one to avoid the loss in information \r\n X = np.c_[np.ones(len(X)), X] \r\n \r\n # fit model: normal equations with kernel\r\n xw = X.T * radial_kernel(x0, X, tau) # XTranspose * W\r\n \r\n beta = np.linalg.pinv(xw @ X) @ xw @ Y # @ Matrix Multiplication or Dot Product \r\n \r\n # predict value\r\n return x0 @ beta # @ Matrix Multiplication or Dot Product for prediction \r\n\r\ndef radial_kernel(x0, X, tau):\r\n return np.exp(np.sum((X - x0) ** 2, axis=1) / (-2 * tau * tau)) # Weight or Radial Kernal Bias Function\r\n\r\nn = 1000\r\n# generate dataset\r\nX = np.linspace(-3, 3, num=n)\r\nprint(\"The Data Set ( 10 Samples) X :\\n\",X[1:10])\r\nY = np.log(np.abs(X ** 2 - 1) + .5)\r\nprint(\"The Fitting Curve Data Set (10 Samples) Y :\\n\",Y[1:10])\r\n# jitter X\r\nX += np.random.normal(scale=.1, size=n)\r\nprint(\"Normalised (10 Samples) X :\\n\",X[1:10])\r\n\r\ndomain = np.linspace(-3, 3, num=300)\r\nprint(\" Xo Domain Space(10 Samples) :\\n\",domain[1:10])\r\n\r\ndef plot_lwr(tau):\r\n # prediction through regression\r\n prediction = [local_regression(x0, X, Y, tau) for x0 in domain]\r\n plot = figure(plot_width=400, plot_height=400)\r\n plot.title.text='tau=%g' % tau\r\n plot.scatter(X, Y, alpha=.3)\r\n plot.line(domain, prediction, line_width=2, color='red')\r\n return plot\r\n# Plotting the curves with different tau\r\nshow(gridplot([\r\n [plot_lwr(10.), plot_lwr(1.)],\r\n [plot_lwr(0.1), plot_lwr(0.01)]\r\n]))\r\n" ]
[ [ "numpy.sum", "numpy.abs", "numpy.random.normal", "numpy.linalg.pinv", "numpy.linspace" ] ]
miyamotost/ITDD-with-DialogueAct
[ "827b8b27bacb8a48fea479e709c39eaee3610552" ]
[ "onmt/model_builder.py" ]
[ "\"\"\"\nThis file is for models creation, which consults options\nand creates each encoder and decoder accordingly.\n\"\"\"\nimport re\nimport torch\nimport torch.nn as nn\nfrom torch.nn.init import xavier_uniform_\n\nimport onmt.inputters as inputters\nimport onmt.modules\nfrom onmt.encoders.rnn_encoder import RNNEncoder\nfrom onmt.encoders.mtransformer import TransformerEncoder\nfrom onmt.encoders.cnn_encoder import CNNEncoder\nfrom onmt.encoders.mean_encoder import MeanEncoder\nfrom onmt.encoders.audio_encoder import AudioEncoder\nfrom onmt.encoders.image_encoder import ImageEncoder\n\nfrom onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder\nfrom onmt.decoders.mtransformer import TransformerDecoder\nfrom onmt.decoders.cnn_decoder import CNNDecoder\n\nfrom onmt.modules import Embeddings, CopyGenerator\nfrom onmt.utils.misc import use_gpu\nfrom onmt.utils.logging import logger\n\n\ndef build_embeddings(opt, word_field, feat_fields, for_encoder=True):\n \"\"\"\n Args:\n opt: the option in current environment.\n word_dict(Vocab): words dictionary.\n feature_dicts([Vocab], optional): a list of feature dictionary.\n for_encoder(bool): build Embeddings for encoder or decoder?\n \"\"\"\n emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size\n\n word_padding_idx = word_field.vocab.stoi[word_field.pad_token]\n num_word_embeddings = len(word_field.vocab)\n\n feat_pad_indices = [ff.vocab.stoi[ff.pad_token] for ff in feat_fields]\n num_feat_embeddings = [len(ff.vocab) for ff in feat_fields]\n\n emb = Embeddings(\n word_vec_size=emb_dim,\n position_encoding=opt.position_encoding,\n feat_merge=opt.feat_merge,\n feat_vec_exponent=opt.feat_vec_exponent,\n feat_vec_size=opt.feat_vec_size,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n feat_padding_idx=feat_pad_indices,\n word_vocab_size=num_word_embeddings,\n feat_vocab_sizes=num_feat_embeddings,\n sparse=opt.optim == \"sparseadam\"\n )\n return emb\n\n\ndef build_encoder(opt, embeddings):\n \"\"\"\n Various encoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this encoder.\n \"\"\"\n if opt.encoder_type == \"transformer\":\n encoder = TransformerEncoder(\n opt.model_mode,\n opt.model_mode2,\n opt.model_ffn_mode,\n opt.enc_layers,\n opt.enc_rnn_size,\n opt.heads,\n opt.transformer_ff,\n opt.dropout,\n embeddings\n )\n elif opt.encoder_type == \"cnn\":\n encoder = CNNEncoder(\n opt.enc_layers,\n opt.enc_rnn_size,\n opt.cnn_kernel_width,\n opt.dropout,\n embeddings)\n elif opt.encoder_type == \"mean\":\n encoder = MeanEncoder(opt.enc_layers, embeddings)\n else:\n encoder = RNNEncoder(\n opt.rnn_type,\n opt.brnn,\n opt.enc_layers,\n opt.enc_rnn_size,\n opt.dropout,\n embeddings,\n opt.bridge\n )\n return encoder\n\n\ndef build_decoder(opt, embeddings):\n \"\"\"\n Various decoder dispatcher function.\n Args:\n opt: the option in current environment.\n embeddings (Embeddings): vocab embeddings for this decoder.\n \"\"\"\n if opt.decoder_type == \"transformer\":\n decoder = TransformerDecoder(\n opt.model_mode,\n opt.model_mode2,\n opt.model_ffn_mode,\n opt.dec_layers,\n opt.dec_rnn_size,\n opt.heads,\n opt.transformer_ff,\n opt.global_attention,\n opt.copy_attn,\n opt.self_attn_type,\n opt.dropout,\n embeddings\n )\n elif opt.decoder_type == \"cnn\":\n decoder = CNNDecoder(\n opt.dec_layers,\n opt.dec_rnn_size,\n opt.global_attention,\n opt.copy_attn,\n opt.cnn_kernel_width,\n opt.dropout,\n embeddings\n )\n else:\n dec_class = InputFeedRNNDecoder if opt.input_feed else StdRNNDecoder\n decoder = dec_class(\n opt.rnn_type,\n opt.brnn,\n opt.dec_layers,\n opt.dec_rnn_size,\n opt.global_attention,\n opt.global_attention_function,\n opt.coverage_attn,\n opt.context_gate,\n opt.copy_attn,\n opt.dropout,\n embeddings,\n opt.reuse_copy_attn\n )\n return decoder\n\n\ndef load_test_model(opt, dummy_opt, model_path=None):\n if model_path is None:\n model_path = opt.models[0]\n checkpoint = torch.load(model_path,\n map_location=lambda storage, loc: storage)\n fields = inputters.load_fields_from_vocab(\n checkpoint['vocab'], data_type=opt.data_type)\n\n model_opt = checkpoint['opt']\n\n for arg in dummy_opt:\n if arg not in model_opt:\n model_opt.__dict__[arg] = dummy_opt[arg]\n\n #if not hasattr(model_opt, 'model_mode'):\n model_opt.model_mode = opt.model_mode\n model_opt.model_mode2 = opt.model_mode2\n model_opt.model_ffn_mode = opt.model_ffn_mode\n print(\n \"[onmt.model_builder.py] model_opt.model_mode: {}, model_opt.model_mode2: {}, model_opt.model_ffn_mode: {}\"\n .format(model_opt.model_mode, model_opt.model_mode2, model_opt.model_ffn_mode)\n )\n\n model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)\n model.eval()\n model.generator.eval()\n return fields, model, model_opt\n\n\ndef build_base_model(model_opt, fields, gpu, checkpoint=None):\n \"\"\"\n Args:\n model_opt: the option loaded from checkpoint.\n fields: `Field` objects for the model.\n gpu(bool): whether to use gpu.\n checkpoint: the model gnerated by train phase, or a resumed snapshot\n model from a stopped training.\n Returns:\n the NMTModel.\n \"\"\"\n assert model_opt.model_type in [\"text\", \"img\", \"audio\"], \\\n \"Unsupported model type %s\" % model_opt.model_type\n\n # for backward compatibility\n if model_opt.rnn_size != -1:\n model_opt.enc_rnn_size = model_opt.rnn_size\n model_opt.dec_rnn_size = model_opt.rnn_size\n\n # Build encoder.\n if model_opt.model_type == \"text\":\n feat_fields = [fields[k]\n for k in inputters.collect_features(fields, 'src')]\n src_emb = build_embeddings(model_opt, fields[\"src\"], feat_fields)\n #print(\"[build_base_model in onmt.model_builder.py] fields[\\\"src\\\"]: {}\".format(fields[\"src\"]))\n #print(\"[build_base_model in onmt.model_builder.py] feat_fields: {}\".format(feat_fields))\n #print(\"[build_base_model in onmt.model_builder.py] src_emb: {}\".format(src_emb))\n print(\n \"[onmt.model_builder.py] model_opt.model_mode: {}, model_opt.model_mode2: {}, model_opt.model_ffn_mode: {}\"\n .format(model_opt.model_mode, model_opt.model_mode2, model_opt.model_ffn_mode)\n )\n encoder = build_encoder(model_opt, src_emb)\n elif model_opt.model_type == \"img\":\n # why is build_encoder not used here?\n # why is the model_opt.__dict__ check necessary?\n if \"image_channel_size\" not in model_opt.__dict__:\n image_channel_size = 3\n else:\n image_channel_size = model_opt.image_channel_size\n\n encoder = ImageEncoder(\n model_opt.enc_layers,\n model_opt.brnn,\n model_opt.enc_rnn_size,\n model_opt.dropout,\n image_channel_size\n )\n elif model_opt.model_type == \"audio\":\n encoder = AudioEncoder(\n model_opt.rnn_type,\n model_opt.enc_layers,\n model_opt.dec_layers,\n model_opt.brnn,\n model_opt.enc_rnn_size,\n model_opt.dec_rnn_size,\n model_opt.audio_enc_pooling,\n model_opt.dropout,\n model_opt.sample_rate,\n model_opt.window_size\n )\n\n # Build decoder.\n feat_fields = [fields[k]\n for k in inputters.collect_features(fields, 'tgt')]\n tgt_emb = build_embeddings(\n model_opt, fields[\"tgt\"], feat_fields, for_encoder=False)\n\n # Share the embedding matrix - preprocess with share_vocab required.\n if model_opt.share_embeddings:\n # src/tgt vocab should be the same if `-share_vocab` is specified.\n assert fields['src'].vocab == fields['tgt'].vocab, \\\n \"preprocess with -share_vocab if you use share_embeddings\"\n\n tgt_emb.word_lut.weight = src_emb.word_lut.weight\n\n decoder = build_decoder(model_opt, tgt_emb)\n decoder2 = build_decoder(model_opt, tgt_emb)\n\n # Build NMTModel(= encoder + decoder).\n device = torch.device(\"cuda\" if gpu else \"cpu\")\n # model = onmt.models.NMTModel(encoder, decoder)\n model = onmt.models.KTransformerModel(encoder, decoder, decoder2)\n\n # Build Generator.\n if not model_opt.copy_attn:\n if model_opt.generator_function == \"sparsemax\":\n gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)\n else:\n gen_func = nn.LogSoftmax(dim=-1)\n generator = nn.Sequential(\n nn.Linear(model_opt.dec_rnn_size, len(fields[\"tgt\"].vocab)),\n gen_func\n )\n if model_opt.share_decoder_embeddings:\n generator[0].weight = decoder.embeddings.word_lut.weight\n else:\n vocab_size = len(fields[\"tgt\"].vocab)\n pad_idx = fields[\"tgt\"].vocab.stoi[fields[\"tgt\"].pad_token]\n generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)\n\n # Load the model states from checkpoint or initialize them.\n if checkpoint is not None:\n # This preserves backward-compat for models using customed layernorm\n def fix_key(s):\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.b_2',\n r'\\1.layer_norm\\2.bias', s)\n s = re.sub(r'(.*)\\.layer_norm((_\\d+)?)\\.a_2',\n r'\\1.layer_norm\\2.weight', s)\n return s\n\n checkpoint['model'] = {fix_key(k): v\n for k, v in checkpoint['model'].items()}\n # end of patch for backward compatibility\n\n model.load_state_dict(checkpoint['model'], strict=False)\n generator.load_state_dict(checkpoint['generator'], strict=False)\n else:\n if model_opt.param_init != 0.0:\n for p in model.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n for p in generator.parameters():\n p.data.uniform_(-model_opt.param_init, model_opt.param_init)\n if model_opt.param_init_glorot:\n for p in model.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n for p in generator.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n if hasattr(model.encoder, 'embeddings'):\n model.encoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)\n if hasattr(model.decoder, 'embeddings'):\n model.decoder.embeddings.load_pretrained_vectors(\n model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)\n\n model.generator = generator\n model.to(device)\n\n return model\n\n\ndef build_model(model_opt, opt, fields, checkpoint):\n logger.info('Building model...')\n model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)\n logger.info(model)\n return model\n" ]
[ [ "torch.nn.LogSoftmax", "torch.nn.init.xavier_uniform_", "torch.device", "torch.load" ] ]
gwallison/openFF-build
[ "3526dd5fdb78576186399f84c0bcb95b7edfd991" ]
[ "builder_tasks/CAS_2_incorporate_reference.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 31 07:32:13 2021\n\n@author: Gary\n\nIn this script, the cas master list is merged with the CAS reference list\ncreated from the SciFinder searches. \n\nThe steps in this process:\n- fetch the reference dataframes for authoritative CAS numbers and deprecated ones.\n- find and mark all tentative_CAS numbers that match authoritative numbers\n- find and mark all that match deprecated numbers\n- find and mark those 'valid-but-empty' cas numbers, mark them.\n\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport sys\n\n \ndef merge_with_ref(df):\n # df is the new casigs with cas_tool fields included\n # fetch the reference dataframes\n ref = pd.read_csv('./sources/CAS_ref_and_names.csv',\n encoding='utf-8',quotechar='$')\n dep = pd.read_csv('./sources/CAS_deprecated.csv',encoding='utf-8',quotechar='$')\n \n # get the matches with reference numbers\n test = pd.merge(df, #[['CASNumber','tent_CAS','valid_after_cleaning']],\n ref[['cas_number']],\n left_on='tent_CAS',right_on='cas_number',how='left',\n indicator=True)\n test['on_ref_list'] = np.where(test['_merge']=='both',\n 'verified;normal','unk') \n test['bgCAS'] = np.where(test['_merge']=='both',\n test.cas_number, # if in both, save the CAS\n '') # otherwise leave it empty\n test = test.drop('_merge',axis=1) # clean up before next merge\n\n # now find the deprecated CAS numbers\n test = pd.merge(test,dep,\n left_on='tent_CAS',right_on='deprecated',how='left',\n indicator=True)\n # A potential error is if we get an authoritative match AND a deprecated\n # match. Scan for that situation, alert the user, and exit\n cond1 = ~test.cas_number.isna()\n cond2 = test['_merge']=='both'\n if (cond1&cond2).sum()>0:\n print('DEPRECATED DETECTED ON AN VERIFIED CAS')\n print(test[cond1&cond2])\n sys.exit(1)\n \n # mark the deprecated and take the valid CAS as bgCAS\n test['on_ref_list'] = np.where(test['_merge']=='both',\n 'verified;from deprecated',test.on_ref_list) \n test['bgCAS'] = np.where(test['_merge']=='both',\n test.cas_replacement,test.bgCAS)\n test = test.drop(['_merge','cas_number'],axis=1) # clean up before next merge\n \n # mark the CAS numbers that are formally valid but without authoritative cas in ref.\n # these may be good targets for later curating\n cond1 = test.valid_after_cleaning\n cond2 = test.on_ref_list=='unk'\n test['bgCAS'] = np.where(cond1&cond2,'valid_but_empty',test.bgCAS)\n test['on_ref_list'] = np.where(cond1&cond2,'valid_but_empty',test.on_ref_list)\n test = test.drop(['deprecated',\n 'cas_replacement','tent_CAS',\n #'ing_name',\n 'valid_after_cleaning'],axis=1) # clean up before next merge\n test['is_new'] = True\n # Now concat with the old data (DONT MERGE - otherwise old gets clobbered!)\n print(f'\\nNumber of new CAS/Ing lines to curate: {len(test)}\\n')\n old = pd.read_csv('./sources/casing_curate_master.csv',quotechar='$',\n encoding='utf-8')\n old = old[['CASNumber','IngredientName','bgCAS','category',\n 'close_syn','comment','first_date','change_date','change_comment']]\n old['is_new'] = False \n out = pd.concat([test,old],sort=True)\n \n return out[['CASNumber','IngredientName','bgCAS','category','is_new',\n 'clean_wo_work','on_ref_list',\n 'close_syn','comment','first_date',\n 'change_date','change_comment']],len(test)\n\n" ]
[ [ "pandas.read_csv", "numpy.where", "pandas.merge", "pandas.concat" ] ]
PKSingh0017/MSCG-Net
[ "b7e79d68f14984fe460eff72bcbb8049e4d2bc9f" ]
[ "lib/utils/funtions.py" ]
[ "import numpy as np\n\nimport torch\nfrom torch import Tensor\n\nfrom PIL import Image, ImageEnhance\n\ndef torch_none(x: Tensor):\n return x\n\n\ndef torch_rot90_(x: Tensor):\n return x.transpose_(2, 3).flip(2)\n\n\ndef torch_rot90(x: Tensor):\n return x.transpose(2, 3).flip(2)\n\n\ndef torch_rot180(x: Tensor):\n return x.flip(2).flip(3)\n\n\ndef torch_rot270(x: Tensor):\n return x.transpose(2, 3).flip(3)\n\n\ndef torch_flipud(x: Tensor):\n return x.flip(2)\n\n\ndef torch_fliplp(x: Tensor):\n return x.flip(3)\n\n\ndef torch_transpose(x: Tensor):\n return x.transpose(2, 3)\n\n\ndef torch_transpose_(x: Tensor):\n return x.transpose_(2, 3)\n\n\ndef torch_transpose2(x: Tensor):\n return x.transpose(3, 2)\n\n\ndef pad_tensor(image_tensor: Tensor, pad_size: int = 32):\n \"\"\"Pads input tensor to make it's height and width dividable by @pad_size\n\n :param image_tensor: Input tensor of shape NCHW\n :param pad_size: Pad size\n :return: Tuple of output tensor and pad params. Second argument can be used to reverse pad operation of model output\n \"\"\"\n rows, cols = image_tensor.size(2), image_tensor.size(3)\n\n if rows > pad_size:\n pad_rows = rows % pad_size\n pad_rows = pad_size - pad_rows if pad_rows > 0 else 0\n else:\n pad_rows = pad_size - rows\n\n if cols > pad_size:\n pad_cols = cols % pad_size\n pad_cols = pad_size - pad_cols if pad_cols > 0 else 0\n else:\n pad_cols = pad_size - cols\n\n if pad_rows == 0 and pad_cols == 0:\n return image_tensor, (0, 0, 0, 0)\n\n pad_top = pad_rows // 2\n pad_btm = pad_rows - pad_top\n\n pad_left = pad_cols // 2\n pad_right = pad_cols - pad_left\n\n pad = [pad_left, pad_right, pad_top, pad_btm]\n image_tensor = torch.nn.functional.pad(image_tensor, pad)\n return image_tensor, pad\n\n\ndef unpad_tensor(image_tensor, pad):\n pad_left, pad_right, pad_top, pad_btm = pad\n rows, cols = image_tensor.size(2), image_tensor.size(3)\n return image_tensor[..., pad_top:rows - pad_btm, pad_left: cols - pad_right]\n\n\ndef image_enhance(img, gama=1.55):\n # image = img\n # if convert:\n image = np.asarray(img*255, np.uint8)\n # --------- down contrast\n image = Image.fromarray(image)\n # image.show()\n contrast = ImageEnhance.Contrast(image)\n image = contrast.enhance(gama)\n # ----------\n # if convert:\n image = np.asarray(image, np.float32) / 255.0\n return image\n\n\n" ]
[ [ "torch.nn.functional.pad", "numpy.asarray" ] ]
2channelkrt/VLAE
[ "5fd34127a0805c14b967cc1742c4ac69bf52bb51" ]
[ "utils.py" ]
[ "import torch\n\n\ndef clip_grad(gradient, clip_value):\n \"\"\" clip between clip_min and clip_max\n \"\"\"\n return torch.clamp(gradient, min=-clip_value, max=clip_value)\n\ndef clip_grad_norm(gradient, clip_value):\n norm = (gradient**2).sum(-1)\n divisor = torch.max(torch.ones_like(norm).cuda(), norm / clip_value)\n return gradient / divisor.unsqueeze(-1)\n" ]
[ [ "torch.ones_like", "torch.clamp" ] ]
hsyis/object-detection-yolo2-tiny
[ "507ac8aa2fc1cd9d2e12db9a720a68dceb3a85f0" ]
[ "proj4/src/__init__.py" ]
[ "import sys\nimport numpy as np\nimport cv2\nimport time\nimport argparse\n\nimport yolov2tiny\n\n\ndef resize_input(im):\n imsz = cv2.resize(im, (416, 416))\n imsz = imsz / 255.0\n imsz = imsz[:, :, ::-1]\n return np.asarray(imsz, dtype=np.float32)\n\n\ndef image_object_detection(in_image, out_image, debug):\n frame = cv2.imread(in_image)\n\n y2t = yolov2tiny.YOLO2_TINY([1, 416, 416, 3], \"./y2t_weights.onnx\", debug)\n\n t_end2end = time.time()\n\n _frame = resize_input(frame)\n _frame = np.expand_dims(_frame, axis=0)\n\n t_inference = time.time()\n tout = y2t.inference(_frame)\n t_inference = time.time() - t_inference\n\n tout = np.squeeze(tout)\n frame = yolov2tiny.postprocessing(\n tout, cv2.resize(frame, (416, 416), interpolation=cv2.INTER_CUBIC)\n )\n t_end2end = time.time() - t_end2end\n\n cv2.imwrite(out_image, frame)\n\n print(\"DNN inference elapsed time: %.3f\" % t_inference)\n print(\"End-to-end elapsed time : %.3f\" % t_end2end)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"IN_IMAGE\", help=\"path to the input jpg\")\n parser.add_argument(\"OUT_IMAGE\", help=\"path to the output jpg\")\n parser.add_argument(\"--debug\", action=\"store_true\", help=\"turn on debug flag\")\n args = parser.parse_args()\n\n image_object_detection(args.IN_IMAGE, args.OUT_IMAGE, args.debug)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.expand_dims", "numpy.asarray", "numpy.squeeze" ] ]
adelevie/cudf
[ "fe5e07ddba42eeb6e1dc7cf94cdeebf52970b187" ]
[ "python/cudf/cudf/core/column/numerical.py" ]
[ "# Copyright (c) 2018-2021, NVIDIA CORPORATION.\nfrom __future__ import annotations\n\nfrom numbers import Number\nfrom typing import Any, Callable, Sequence, Union, cast\n\nimport numpy as np\nimport pandas as pd\nfrom nvtx import annotate\nfrom pandas.api.types import is_integer_dtype\n\nimport cudf\nfrom cudf import _lib as libcudf\nfrom cudf._lib.quantiles import quantile as cpp_quantile\nfrom cudf._typing import BinaryOperand, ColumnLike, Dtype, DtypeObj, ScalarLike\nfrom cudf.core.buffer import Buffer\nfrom cudf.core.column import (\n ColumnBase,\n as_column,\n build_column,\n column,\n string,\n)\nfrom cudf.utils import cudautils, utils\nfrom cudf.utils.dtypes import (\n min_column_type,\n min_signed_type,\n numeric_normalize_types,\n to_cudf_compatible_scalar,\n)\n\n\nclass NumericalColumn(ColumnBase):\n def __init__(\n self,\n data: Buffer,\n dtype: DtypeObj,\n mask: Buffer = None,\n size: int = None,\n offset: int = 0,\n null_count: int = None,\n ):\n \"\"\"\n Parameters\n ----------\n data : Buffer\n dtype : np.dtype\n The dtype associated with the data Buffer\n mask : Buffer, optional\n \"\"\"\n dtype = np.dtype(dtype)\n if data.size % dtype.itemsize:\n raise ValueError(\"Buffer size must be divisible by element size\")\n if size is None:\n size = data.size // dtype.itemsize\n size = size - offset\n\n super().__init__(\n data,\n size=size,\n dtype=dtype,\n mask=mask,\n offset=offset,\n null_count=null_count,\n )\n\n def __contains__(self, item: ScalarLike) -> bool:\n \"\"\"\n Returns True if column contains item, else False.\n \"\"\"\n # Handles improper item types\n # Fails if item is of type None, so the handler.\n try:\n if np.can_cast(item, self.data_array_view.dtype):\n item = self.data_array_view.dtype.type(item)\n else:\n return False\n except (TypeError, ValueError):\n return False\n # TODO: Use `scalar`-based `contains` wrapper\n return libcudf.search.contains(\n self, column.as_column([item], dtype=self.dtype)\n ).any()\n\n def unary_operator(self, unaryop: str) -> ColumnBase:\n return _numeric_column_unaryop(self, op=unaryop)\n\n def binary_operator(\n self, binop: str, rhs: BinaryOperand, reflect: bool = False,\n ) -> ColumnBase:\n int_dtypes = [\n np.dtype(\"int8\"),\n np.dtype(\"int16\"),\n np.dtype(\"int32\"),\n np.dtype(\"int64\"),\n np.dtype(\"uint8\"),\n np.dtype(\"uint16\"),\n np.dtype(\"uint32\"),\n np.dtype(\"uint64\"),\n ]\n if rhs is None:\n out_dtype = self.dtype\n else:\n if not (\n isinstance(rhs, (NumericalColumn, cudf.Scalar,),)\n or np.isscalar(rhs)\n ):\n msg = \"{!r} operator not supported between {} and {}\"\n raise TypeError(msg.format(binop, type(self), type(rhs)))\n out_dtype = np.result_type(self.dtype, rhs.dtype)\n if binop in [\"mod\", \"floordiv\"]:\n tmp = self if reflect else rhs\n if (tmp.dtype in int_dtypes) and (\n (np.isscalar(tmp) and (0 == tmp))\n or ((isinstance(tmp, NumericalColumn)) and (0.0 in tmp))\n ):\n out_dtype = np.dtype(\"float64\")\n return _numeric_column_binop(\n lhs=self, rhs=rhs, op=binop, out_dtype=out_dtype, reflect=reflect\n )\n\n def _apply_scan_op(self, op: str) -> ColumnBase:\n return libcudf.reduce.scan(op, self, True)\n\n def normalize_binop_value(\n self, other: ScalarLike\n ) -> Union[ColumnBase, ScalarLike]:\n if other is None:\n return other\n if isinstance(other, cudf.Scalar):\n if self.dtype == other.dtype:\n return other\n # expensive device-host transfer just to\n # adjust the dtype\n other = other.value\n elif isinstance(other, np.ndarray) and other.ndim == 0:\n other = other.item()\n other_dtype = np.min_scalar_type(other)\n if other_dtype.kind in {\"b\", \"i\", \"u\", \"f\"}:\n if isinstance(other, cudf.Scalar):\n return other\n other_dtype = np.promote_types(self.dtype, other_dtype)\n if other_dtype == np.dtype(\"float16\"):\n other_dtype = np.dtype(\"float32\")\n other = other_dtype.type(other)\n if self.dtype.kind == \"b\":\n other_dtype = min_signed_type(other)\n if np.isscalar(other):\n other = np.dtype(other_dtype).type(other)\n return other\n else:\n ary = utils.scalar_broadcast_to(\n other, size=len(self), dtype=other_dtype\n )\n return column.build_column(\n data=Buffer(ary), dtype=ary.dtype, mask=self.mask,\n )\n else:\n raise TypeError(f\"cannot broadcast {type(other)}\")\n\n def int2ip(self) -> \"cudf.core.column.StringColumn\":\n if self.dtype != np.dtype(\"int64\"):\n raise TypeError(\"Only int64 type can be converted to ip\")\n\n return libcudf.string_casting.int2ip(self)\n\n def as_string_column(\n self, dtype: Dtype, format=None\n ) -> \"cudf.core.column.StringColumn\":\n if len(self) > 0:\n return string._numeric_to_str_typecast_functions[\n np.dtype(self.dtype)\n ](self)\n else:\n return cast(\n \"cudf.core.column.StringColumn\", as_column([], dtype=\"object\")\n )\n\n def as_datetime_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.DatetimeColumn\":\n return cast(\n \"cudf.core.column.DatetimeColumn\",\n build_column(\n data=self.astype(\"int64\").base_data,\n dtype=dtype,\n mask=self.base_mask,\n offset=self.offset,\n size=self.size,\n ),\n )\n\n def as_timedelta_column(\n self, dtype: Dtype, **kwargs\n ) -> \"cudf.core.column.TimeDeltaColumn\":\n return cast(\n \"cudf.core.column.TimeDeltaColumn\",\n build_column(\n data=self.astype(\"int64\").base_data,\n dtype=dtype,\n mask=self.base_mask,\n offset=self.offset,\n size=self.size,\n ),\n )\n\n def as_numerical_column(self, dtype: Dtype) -> NumericalColumn:\n dtype = np.dtype(dtype)\n if dtype == self.dtype:\n return self\n return libcudf.unary.cast(self, dtype)\n\n def reduce(self, op: str, skipna: bool = None, **kwargs) -> float:\n min_count = kwargs.pop(\"min_count\", 0)\n preprocessed = self._process_for_reduction(\n skipna=skipna, min_count=min_count\n )\n if isinstance(preprocessed, ColumnBase):\n return libcudf.reduce.reduce(op, preprocessed, **kwargs)\n else:\n return cast(float, preprocessed)\n\n def sum(\n self, skipna: bool = None, dtype: Dtype = None, min_count: int = 0\n ) -> float:\n return self.reduce(\n \"sum\", skipna=skipna, dtype=dtype, min_count=min_count\n )\n\n def product(\n self, skipna: bool = None, dtype: Dtype = None, min_count: int = 0\n ) -> float:\n return self.reduce(\n \"product\", skipna=skipna, dtype=dtype, min_count=min_count\n )\n\n def mean(self, skipna: bool = None, dtype: Dtype = np.float64) -> float:\n return self.reduce(\"mean\", skipna=skipna, dtype=dtype)\n\n def var(\n self, skipna: bool = None, ddof: int = 1, dtype: Dtype = np.float64\n ) -> float:\n return self.reduce(\"var\", skipna=skipna, dtype=dtype, ddof=ddof)\n\n def std(\n self, skipna: bool = None, ddof: int = 1, dtype: Dtype = np.float64\n ) -> float:\n return self.reduce(\"std\", skipna=skipna, dtype=dtype, ddof=ddof)\n\n def sum_of_squares(self, dtype: Dtype = None) -> float:\n return libcudf.reduce.reduce(\"sum_of_squares\", self, dtype=dtype)\n\n def kurtosis(self, skipna: bool = None) -> float:\n skipna = True if skipna is None else skipna\n\n if len(self) == 0 or (not skipna and self.has_nulls):\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n self = self.nans_to_nulls().dropna() # type: ignore\n\n if len(self) < 4:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n n = len(self)\n miu = self.mean()\n m4_numerator = ((self - miu) ** self.normalize_binop_value(4)).sum()\n V = self.var()\n\n if V == 0:\n return 0\n\n term_one_section_one = (n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))\n term_one_section_two = m4_numerator / (V ** 2)\n term_two = ((n - 1) ** 2) / ((n - 2) * (n - 3))\n kurt = term_one_section_one * term_one_section_two - 3 * term_two\n return kurt\n\n def skew(self, skipna: bool = None) -> ScalarLike:\n skipna = True if skipna is None else skipna\n\n if len(self) == 0 or (not skipna and self.has_nulls):\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n self = self.nans_to_nulls().dropna() # type: ignore\n\n if len(self) < 3:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n n = len(self)\n miu = self.mean()\n m3 = (((self - miu) ** self.normalize_binop_value(3)).sum()) / n\n m2 = self.var(ddof=0)\n\n if m2 == 0:\n return 0\n\n unbiased_coef = ((n * (n - 1)) ** 0.5) / (n - 2)\n skew = unbiased_coef * m3 / (m2 ** (3 / 2))\n return skew\n\n def quantile(\n self, q: Union[float, Sequence[float]], interpolation: str, exact: bool\n ) -> NumericalColumn:\n if isinstance(q, Number) or cudf.utils.dtypes.is_list_like(q):\n np_array_q = np.asarray(q)\n if np.logical_or(np_array_q < 0, np_array_q > 1).any():\n raise ValueError(\n \"percentiles should all be in the interval [0, 1]\"\n )\n # Beyond this point, q either being scalar or list-like\n # will only have values in range [0, 1]\n result = self._numeric_quantile(q, interpolation, exact)\n if isinstance(q, Number):\n return (\n cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n if result[0] is cudf.NA\n else result[0]\n )\n return result\n\n def median(self, skipna: bool = None) -> NumericalColumn:\n skipna = True if skipna is None else skipna\n\n if not skipna and self.has_nulls:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n # enforce linear in case the default ever changes\n return self.quantile(0.5, interpolation=\"linear\", exact=True)\n\n def _numeric_quantile(\n self, q: Union[float, Sequence[float]], interpolation: str, exact: bool\n ) -> NumericalColumn:\n quant = [float(q)] if not isinstance(q, (Sequence, np.ndarray)) else q\n # get sorted indices and exclude nulls\n sorted_indices = self.as_frame()._get_sorted_inds(True, \"first\")\n sorted_indices = sorted_indices[self.null_count :]\n\n return cpp_quantile(self, quant, interpolation, sorted_indices, exact)\n\n def cov(self, other: ColumnBase) -> float:\n if (\n len(self) == 0\n or len(other) == 0\n or (len(self) == 1 and len(other) == 1)\n ):\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n result = (self - self.mean()) * (other - other.mean())\n cov_sample = result.sum() / (len(self) - 1)\n return cov_sample\n\n def corr(self, other: ColumnBase) -> float:\n if len(self) == 0 or len(other) == 0:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n\n cov = self.cov(other)\n lhs_std, rhs_std = self.std(), other.std()\n\n if not cov or lhs_std == 0 or rhs_std == 0:\n return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)\n return cov / lhs_std / rhs_std\n\n def round(self, decimals: int = 0) -> NumericalColumn:\n \"\"\"Round the values in the Column to the given number of decimals.\n \"\"\"\n return libcudf.round.round(self, decimal_places=decimals)\n\n def applymap(\n self, udf: Callable[[ScalarLike], ScalarLike], out_dtype: Dtype = None\n ) -> ColumnBase:\n \"\"\"Apply an element-wise function to transform the values in the Column.\n\n Parameters\n ----------\n udf : function\n Wrapped by numba jit for call on the GPU as a device function.\n out_dtype : numpy.dtype; optional\n The dtype for use in the output.\n By default, use the same dtype as *self.dtype*.\n\n Returns\n -------\n result : Column\n The mask is preserved.\n \"\"\"\n if out_dtype is None:\n out_dtype = self.dtype\n out = column.column_applymap(udf=udf, column=self, out_dtype=out_dtype)\n return out\n\n def default_na_value(self) -> ScalarLike:\n \"\"\"Returns the default NA value for this column\n \"\"\"\n dkind = self.dtype.kind\n if dkind == \"f\":\n return self.dtype.type(np.nan)\n elif dkind == \"i\":\n return np.iinfo(self.dtype).min\n elif dkind == \"u\":\n return np.iinfo(self.dtype).max\n elif dkind == \"b\":\n return self.dtype.type(False)\n else:\n raise TypeError(f\"numeric column of {self.dtype} has no NaN value\")\n\n def find_and_replace(\n self,\n to_replace: ColumnLike,\n replacement: ColumnLike,\n all_nan: bool = False,\n ) -> NumericalColumn:\n \"\"\"\n Return col with *to_replace* replaced with *value*.\n \"\"\"\n to_replace_col = _normalize_find_and_replace_input(\n self.dtype, to_replace\n )\n if all_nan:\n replacement_col = column.as_column(replacement, dtype=self.dtype)\n else:\n replacement_col = _normalize_find_and_replace_input(\n self.dtype, replacement\n )\n if len(replacement_col) == 1 and len(to_replace_col) > 1:\n replacement_col = column.as_column(\n utils.scalar_broadcast_to(\n replacement[0], (len(to_replace_col),), self.dtype\n )\n )\n replaced = self.copy()\n to_replace_col, replacement_col, replaced = numeric_normalize_types(\n to_replace_col, replacement_col, replaced\n )\n return libcudf.replace.replace(\n replaced, to_replace_col, replacement_col\n )\n\n def fillna(\n self,\n fill_value: Any = None,\n method: str = None,\n dtype: Dtype = None,\n fill_nan: bool = True,\n ) -> NumericalColumn:\n \"\"\"\n Fill null values with *fill_value*\n \"\"\"\n if fill_nan:\n col = self.nans_to_nulls()\n else:\n col = self\n\n if method is not None:\n return super(NumericalColumn, col).fillna(fill_value, method)\n\n if (\n isinstance(fill_value, cudf.Scalar)\n and fill_value.dtype == col.dtype\n ):\n return super(NumericalColumn, col).fillna(fill_value, method)\n\n if np.isscalar(fill_value):\n # castsafely to the same dtype as self\n fill_value_casted = col.dtype.type(fill_value)\n if not np.isnan(fill_value) and (fill_value_casted != fill_value):\n raise TypeError(\n f\"Cannot safely cast non-equivalent \"\n f\"{type(fill_value).__name__} to {col.dtype.name}\"\n )\n fill_value = cudf.Scalar(fill_value_casted)\n else:\n fill_value = column.as_column(fill_value, nan_as_null=False)\n # cast safely to the same dtype as self\n if is_integer_dtype(col.dtype):\n fill_value = _safe_cast_to_int(fill_value, col.dtype)\n else:\n fill_value = fill_value.astype(col.dtype)\n\n return super(NumericalColumn, col).fillna(fill_value, method)\n\n def find_first_value(\n self, value: ScalarLike, closest: bool = False\n ) -> int:\n \"\"\"\n Returns offset of first value that matches. For monotonic\n columns, returns the offset of the first larger value\n if closest=True.\n \"\"\"\n value = to_cudf_compatible_scalar(value)\n if not pd.api.types.is_number(value):\n raise ValueError(\"Expected a numeric value\")\n found = 0\n if len(self):\n found = cudautils.find_first(\n self.data_array_view, value, mask=self.mask\n )\n if found == -1 and self.is_monotonic and closest:\n if value < self.min():\n found = 0\n elif value > self.max():\n found = len(self)\n else:\n found = cudautils.find_first(\n self.data_array_view, value, mask=self.mask, compare=\"gt\",\n )\n if found == -1:\n raise ValueError(\"value not found\")\n elif found == -1:\n raise ValueError(\"value not found\")\n return found\n\n def find_last_value(self, value: ScalarLike, closest: bool = False) -> int:\n \"\"\"\n Returns offset of last value that matches. For monotonic\n columns, returns the offset of the last smaller value\n if closest=True.\n \"\"\"\n value = to_cudf_compatible_scalar(value)\n if not pd.api.types.is_number(value):\n raise ValueError(\"Expected a numeric value\")\n found = 0\n if len(self):\n found = cudautils.find_last(\n self.data_array_view, value, mask=self.mask,\n )\n if found == -1 and self.is_monotonic and closest:\n if value < self.min():\n found = -1\n elif value > self.max():\n found = len(self) - 1\n else:\n found = cudautils.find_last(\n self.data_array_view, value, mask=self.mask, compare=\"lt\",\n )\n if found == -1:\n raise ValueError(\"value not found\")\n elif found == -1:\n raise ValueError(\"value not found\")\n return found\n\n def can_cast_safely(self, to_dtype: DtypeObj) -> bool:\n \"\"\"\n Returns true if all the values in self can be\n safely cast to dtype\n \"\"\"\n if self.dtype.kind == to_dtype.kind:\n if self.dtype <= to_dtype:\n return True\n else:\n # Kinds are the same but to_dtype is smaller\n if \"float\" in to_dtype.name:\n info = np.finfo(to_dtype)\n elif \"int\" in to_dtype.name:\n info = np.iinfo(to_dtype)\n lower_, upper_ = info.min, info.max\n\n if self.dtype.kind == \"f\":\n # Exclude 'np.inf', '-np.inf'\n s = cudf.Series(self)\n # TODO: replace np.inf with cudf scalar when\n # https://github.com/rapidsai/cudf/pull/6297 merges\n non_infs = s[\n ((s == np.inf) | (s == -np.inf)).logical_not()\n ]\n col = non_infs._column\n else:\n col = self\n\n min_ = col.min()\n # TODO: depending on implementation of cudf scalar and future\n # refactor of min/max, change the test method\n if np.isnan(min_):\n # Column contains only infs\n return True\n\n max_ = col.max()\n if (min_ >= lower_) and (max_ < upper_):\n return True\n else:\n return False\n\n # want to cast int to uint\n elif self.dtype.kind == \"i\" and to_dtype.kind == \"u\":\n i_max_ = np.iinfo(self.dtype).max\n u_max_ = np.iinfo(to_dtype).max\n\n if self.min() >= 0:\n if i_max_ <= u_max_:\n return True\n if self.max() < u_max_:\n return True\n return False\n\n # want to cast uint to int\n elif self.dtype.kind == \"u\" and to_dtype.kind == \"i\":\n u_max_ = np.iinfo(self.dtype).max\n i_max_ = np.iinfo(to_dtype).max\n\n if u_max_ <= i_max_:\n return True\n if self.max() < i_max_:\n return True\n return False\n\n # want to cast int to float\n elif self.dtype.kind in {\"i\", \"u\"} and to_dtype.kind == \"f\":\n info = np.finfo(to_dtype)\n biggest_exact_int = 2 ** (info.nmant + 1)\n if (self.min() >= -biggest_exact_int) and (\n self.max() <= biggest_exact_int\n ):\n return True\n else:\n\n filled = self.fillna(0)\n if (\n cudf.Series(filled).astype(to_dtype).astype(filled.dtype)\n == cudf.Series(filled)\n ).all():\n return True\n else:\n return False\n\n # want to cast float to int:\n elif self.dtype.kind == \"f\" and to_dtype.kind in {\"i\", \"u\"}:\n info = np.iinfo(to_dtype)\n min_, max_ = info.min, info.max\n\n # best we can do is hope to catch it here and avoid compare\n if (self.min() >= min_) and (self.max() <= max_):\n filled = self.fillna(0, fill_nan=False)\n if (cudf.Series(filled) % 1 == 0).all():\n return True\n else:\n return False\n else:\n return False\n\n return False\n\n\n@annotate(\"BINARY_OP\", color=\"orange\", domain=\"cudf_python\")\ndef _numeric_column_binop(\n lhs: Union[ColumnBase, ScalarLike],\n rhs: Union[ColumnBase, ScalarLike],\n op: str,\n out_dtype: Dtype,\n reflect: bool = False,\n) -> ColumnBase:\n if reflect:\n lhs, rhs = rhs, lhs\n\n is_op_comparison = op in [\"lt\", \"gt\", \"le\", \"ge\", \"eq\", \"ne\"]\n\n if is_op_comparison:\n out_dtype = \"bool\"\n\n out = libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)\n\n if is_op_comparison:\n out = out.fillna(op == \"ne\")\n\n return out\n\n\ndef _numeric_column_unaryop(operand: ColumnBase, op: str) -> ColumnBase:\n if callable(op):\n return libcudf.transform.transform(operand, op)\n\n op = libcudf.unary.UnaryOp[op.upper()]\n return libcudf.unary.unary_operation(operand, op)\n\n\ndef _safe_cast_to_int(col: ColumnBase, dtype: DtypeObj) -> ColumnBase:\n \"\"\"\n Cast given NumericalColumn to given integer dtype safely.\n \"\"\"\n assert is_integer_dtype(dtype)\n\n if col.dtype == dtype:\n return col\n\n new_col = col.astype(dtype)\n if (new_col == col).all():\n return new_col\n else:\n raise TypeError(\n f\"Cannot safely cast non-equivalent \"\n f\"{col.dtype.type.__name__} to {np.dtype(dtype).type.__name__}\"\n )\n\n\ndef _normalize_find_and_replace_input(\n input_column_dtype: DtypeObj, col_to_normalize: Union[ColumnBase, list]\n) -> ColumnBase:\n normalized_column = column.as_column(\n col_to_normalize,\n dtype=input_column_dtype if len(col_to_normalize) <= 0 else None,\n )\n col_to_normalize_dtype = normalized_column.dtype\n if isinstance(col_to_normalize, list):\n col_to_normalize_dtype = min_column_type(\n normalized_column, input_column_dtype\n )\n # Scalar case\n if len(col_to_normalize) == 1:\n col_to_normalize_casted = input_column_dtype.type(\n col_to_normalize[0]\n )\n if not np.isnan(col_to_normalize_casted) and (\n col_to_normalize_casted != col_to_normalize[0]\n ):\n raise TypeError(\n f\"Cannot safely cast non-equivalent \"\n f\"{col_to_normalize[0]} \"\n f\"to {input_column_dtype.name}\"\n )\n else:\n col_to_normalize_dtype = input_column_dtype\n elif hasattr(col_to_normalize, \"dtype\"):\n col_to_normalize_dtype = col_to_normalize.dtype\n else:\n raise TypeError(f\"Type {type(col_to_normalize)} not supported\")\n\n if (\n col_to_normalize_dtype.kind == \"f\"\n and input_column_dtype.kind in {\"i\", \"u\"}\n ) or (col_to_normalize_dtype.num > input_column_dtype.num):\n raise TypeError(\n f\"Potentially unsafe cast for non-equivalent \"\n f\"{col_to_normalize_dtype.name} \"\n f\"to {input_column_dtype.name}\"\n )\n return normalized_column.astype(input_column_dtype)\n\n\ndef digitize(\n column: ColumnBase, bins: np.ndarray, right: bool = False\n) -> ColumnBase:\n \"\"\"Return the indices of the bins to which each value in column belongs.\n\n Parameters\n ----------\n column : Column\n Input column.\n bins : Column-like\n 1-D column-like object of bins with same type as `column`, should be\n monotonically increasing.\n right : bool\n Indicates whether interval contains the right or left bin edge.\n\n Returns\n -------\n A column containing the indices\n \"\"\"\n if not column.dtype == bins.dtype:\n raise ValueError(\n \"Digitize() expects bins and input column have the same dtype.\"\n )\n\n bin_col = as_column(bins, dtype=bins.dtype)\n if bin_col.nullable:\n raise ValueError(\"`bins` cannot contain null entries.\")\n\n return as_column(\n libcudf.sort.digitize(column.as_frame(), bin_col.as_frame(), right)\n )\n" ]
[ [ "numpy.min_scalar_type", "numpy.promote_types", "numpy.logical_or", "numpy.dtype", "pandas.api.types.is_number", "pandas.api.types.is_integer_dtype", "numpy.asarray", "numpy.can_cast", "numpy.iinfo", "numpy.isnan", "numpy.result_type", "numpy.finfo", "numpy.isscalar" ] ]
huangshunliang/keras_h
[ "87ec630a76e2badf6ddd48f889c80e5e2f571117" ]
[ "tests/test_model_saving.py" ]
[ "import pytest\nimport os\nimport tempfile\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom keras.models import Model, Sequential\nfrom keras.layers import Dense, Dropout, Lambda, RepeatVector, TimeDistributed\nfrom keras.layers import Input\nfrom keras import optimizers\nfrom keras import objectives\nfrom keras import metrics\nfrom keras.utils.test_utils import keras_test\nfrom keras.models import save_model, load_model\n\n\n@keras_test\[email protected](reason=\"Currently optimizer state is not preserved for mxnet backend.\")\ndef test_sequential_model_saving():\n model = Sequential()\n model.add(Dense(2, input_dim=3))\n model.add(RepeatVector(3))\n model.add(TimeDistributed(Dense(3)))\n model.compile(loss=objectives.MSE,\n optimizer=optimizers.RMSprop(lr=0.0001),\n metrics=[metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n\n new_model = load_model(fname)\n os.remove(fname)\n\n out2 = new_model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n\n # test that new updates are the same with both models\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n new_model.train_on_batch(x, y)\n out = model.predict(x)\n out2 = new_model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n\n\n@keras_test\ndef test_sequential_model_saving_2():\n # test with custom optimizer, loss\n custom_opt = optimizers.rmsprop\n custom_loss = objectives.mse\n model = Sequential()\n model.add(Dense(2, input_dim=3))\n model.add(Dense(3))\n model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])\n\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n\n model = load_model(fname,\n custom_objects={'custom_opt': custom_opt,\n 'custom_loss': custom_loss})\n os.remove(fname)\n\n out2 = model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n\n\n@keras_test\ndef test_fuctional_model_saving():\n input = Input(shape=(3,))\n x = Dense(2)(input)\n output = Dense(3)(x)\n\n model = Model(input, output)\n model.compile(loss=objectives.MSE,\n optimizer=optimizers.RMSprop(lr=0.0001),\n metrics=[metrics.categorical_accuracy])\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n\n model = load_model(fname)\n os.remove(fname)\n\n out2 = model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n\n\n@keras_test\ndef test_saving_without_compilation():\n model = Sequential()\n model.add(Dense(2, input_dim=3))\n model.add(Dense(3))\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n model = load_model(fname)\n os.remove(fname)\n\n\n@keras_test\ndef test_saving_right_after_compilation():\n model = Sequential()\n model.add(Dense(2, input_dim=3))\n model.add(Dense(3))\n model.compile(loss='mse', optimizer='sgd', metrics=['acc'])\n model.model._make_train_function()\n\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n model = load_model(fname)\n os.remove(fname)\n\n\n@keras_test\ndef test_loading_weights_by_name():\n \"\"\"\n test loading model weights by name on:\n - sequential model\n \"\"\"\n\n # test with custom optimizer, loss\n custom_opt = optimizers.rmsprop\n custom_loss = objectives.mse\n\n # sequential model\n model = Sequential()\n model.add(Dense(2, input_dim=3, name=\"rick\"))\n model.add(Dense(3, name=\"morty\"))\n model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])\n\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n old_weights = [layer.get_weights() for layer in model.layers]\n _, fname = tempfile.mkstemp('.h5')\n\n model.save_weights(fname)\n\n # delete and recreate model\n del(model)\n model = Sequential()\n model.add(Dense(2, input_dim=3, name=\"rick\"))\n model.add(Dense(3, name=\"morty\"))\n model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])\n\n # load weights from first model\n model.load_weights(fname, by_name=True)\n os.remove(fname)\n\n out2 = model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n for i in range(len(model.layers)):\n new_weights = model.layers[i].get_weights()\n for j in range(len(new_weights)):\n assert_allclose(old_weights[i][j], new_weights[j], atol=1e-05)\n\n\n@keras_test\ndef test_loading_weights_by_name_2():\n \"\"\"\n test loading model weights by name on:\n - both sequential and functional api models\n - different architecture with shared names\n \"\"\"\n\n # test with custom optimizer, loss\n custom_opt = optimizers.rmsprop\n custom_loss = objectives.mse\n\n # sequential model\n model = Sequential()\n model.add(Dense(2, input_dim=3, name=\"rick\"))\n model.add(Dense(3, name=\"morty\"))\n model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])\n\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n old_weights = [layer.get_weights() for layer in model.layers]\n _, fname = tempfile.mkstemp('.h5')\n\n model.save_weights(fname)\n\n # delete and recreate model using Functional API\n del(model)\n data = Input(shape=(3,))\n rick = Dense(2, name=\"rick\")(data)\n jerry = Dense(3, name=\"jerry\")(rick) # add 2 layers (but maintain shapes)\n jessica = Dense(2, name=\"jessica\")(jerry)\n morty = Dense(3, name=\"morty\")(jessica)\n\n model = Model(input=[data], output=[morty])\n model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])\n\n # load weights from first model\n model.load_weights(fname, by_name=True)\n os.remove(fname)\n\n out2 = model.predict(x)\n assert np.max(np.abs(out - out2)) > 1e-05\n\n rick = model.layers[1].get_weights()\n jerry = model.layers[2].get_weights()\n jessica = model.layers[3].get_weights()\n morty = model.layers[4].get_weights()\n\n assert_allclose(old_weights[0][0], rick[0], atol=1e-05)\n assert_allclose(old_weights[0][1], rick[1], atol=1e-05)\n assert_allclose(old_weights[1][0], morty[0], atol=1e-05)\n assert_allclose(old_weights[1][1], morty[1], atol=1e-05)\n assert_allclose(np.zeros_like(jerry[1]), jerry[1]) # biases init to 0\n assert_allclose(np.zeros_like(jessica[1]), jessica[1]) # biases init to 0\n\n\n# a function to be called from the Lambda layer\ndef square_fn(x):\n return x * x\n\n\n@keras_test\ndef test_saving_lambda_custom_objects():\n input = Input(shape=(3,))\n x = Lambda(lambda x: square_fn(x), output_shape=(3,))(input)\n output = Dense(3)(x)\n\n model = Model(input, output)\n model.compile(loss=objectives.MSE,\n optimizer=optimizers.RMSprop(lr=0.0001),\n metrics=[metrics.categorical_accuracy])\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n out = model.predict(x)\n _, fname = tempfile.mkstemp('.h5')\n save_model(model, fname)\n\n model = load_model(fname, custom_objects={'square_fn': square_fn})\n os.remove(fname)\n\n out2 = model.predict(x)\n assert_allclose(out, out2, atol=1e-05)\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n" ]
[ [ "numpy.random.random", "numpy.zeros_like", "numpy.abs", "numpy.testing.assert_allclose" ] ]
argallab/model_based_shared_control
[ "ff42226b6345266f35a32021c7d0b44cc5948ec1" ]
[ "src/model_based_shared_control/scripts/collect_data_script.py" ]
[ "#!/usr/bin/env python\n\nimport gym\nimport rospy\nfrom sensor_msgs.msg import Joy\nfrom std_msgs.msg import String\nfrom model_based_shared_control.msg import State\nfrom pyglet.window import key\nimport numpy as np\n\nclass LunarLander():\n\n def __init__(self):\n # initalize node\n rospy.init_node('lunar_lander')\n\n # register shutdown hook\n rospy.on_shutdown(self.shutdown_hook)\n self.called_shutdown = False\n\n self.user_id = str(rospy.get_param('user_id'))\n self.user_id = self.user_id.zfill(2)\n self.main_joystick = rospy.get_param('main_joystick')\n self.inverted = rospy.get_param('inverted')\n self.data_path = rospy.get_param('data_path')\n\n # keep track of current keystroke\n self.user_actions = [0, 0]\n\n # build environment\n self.env = gym.make('LunarLanderMultiFire-v0')\n self.env.reset()\n self.env.render()\n\n # set up keystroke hooks\n self.env.viewer.window.on_key_press = self.key_press\n self.terminate = False\n\n # set up joy subscriber\n rospy.Subscriber('/joy', Joy, self.joy_callback)\n\n # set up state and shutdown publishers\n self.state_pub = rospy.Publisher('/state', State, queue_size=1)\n self.shutdown_pub = rospy.Publisher('/shutdown', String, queue_size=1)\n state_msg = State()\n done = False\n\n # set up goal locations\n self.goal_x_list = [10]\n self.goal_y_list = [6]\n self.goal_x_idx = 0\n\n # run system with input from user\n r = rospy.Rate(10)\n self.total_reward, self.total_steps, self.trial_idx = 0, 0, 1\n while not rospy.is_shutdown():\n if self.check_if_success():\n self.trial_idx += 1\n self.env.reset()\n if self.env.legs[0].ground_contact or self.env.legs[1].ground_contact or done:\n self.trial_idx += 1\n self.env.reset()\n done = False\n else:\n # get user input\n main_thruster, side_thruster = self.user_actions\n # write message\n state_msg.x = self.env.lander.position.x - self.goal_x_list[0]\n state_msg.y = self.env.lander.position.y - self.goal_y_list[0]\n state_msg.theta = self.env.lander.angle\n state_msg.x_dot = self.env.lander.linearVelocity.x\n state_msg.y_dot = self.env.lander.linearVelocity.y\n state_msg.theta_dot = self.env.lander.angularVelocity\n state_msg.u_1 = main_thruster\n state_msg.u_2 = side_thruster\n # publish message\n self.state_pub.publish(state_msg)\n # take step\n observation, reward, done, info = self.env.step(np.array([main_thruster, side_thruster]))\n self.total_reward += reward\n self.total_steps += 1\n\n if self.terminate == True:\n self.shutdown_hook()\n print('Terminating early')\n break\n\n # update screen and keep time\n self.env.render()\n r.sleep()\n\n def check_if_success(self):\n dist = np.sqrt(np.power((self.env.lander.position.x - self.env.goal_x), 2) + np.power((self.env.lander.position.y - self.env.goal_y), 2))\n x_vel = np.sqrt(np.power(self.env.lander.linearVelocity.x, 2))\n y_vel = np.sqrt(np.power(self.env.lander.linearVelocity.y,2))\n a_vel = np.sqrt(np.power(self.env.lander.angularVelocity,2))\n if dist < 0.9 and x_vel < 1.5 and y_vel < 1 and a_vel < 0.3:\n return True\n else:\n return False\n\n def shutdown_hook(self):\n if not self.called_shutdown:\n print('Shutting down')\n self.called_shutdown = True\n # store koopman operator\n koopman_filepath = self.data_path + 'models/' + self.user_id # koopman_operator.hpp sets file extension\n self.shutdown_pub.publish(koopman_filepath)\n print('Saved Koopman model.')\n\n def joy_callback(self, data):\n invert = 1\n if self.inverted:\n invert = -1\n if self.main_joystick == 'right':\n self.user_actions = [data.axes[3], invert*data.axes[0]]\n elif self.main_joystick == 'left':\n self.user_actions = [data.axes[1], invert*data.axes[2]]\n\n def key_press(self, k, mod):\n if k == key.SPACE:\n self.terminate = True\n\nif __name__=='__main__':\n ll = LunarLander()\n" ]
[ [ "numpy.array", "numpy.power" ] ]
mikoar/DAV_project
[ "8346e2c0b3171b7ddcf576eda0261533dbc6c826" ]
[ "scripts/germany_mobility.py" ]
[ "import plotly\nimport plotly.graph_objs as go\nimport pandas as pd\nimport sys\n\nmobility_germany = pd.read_csv(\"../data/mobility_germany.csv\")\n\nmobility_germany = mobility_germany.loc[mobility_germany.sub_region_1.isnull(), :]\n\ncolors = (['indianred']*2+['lightsalmon']*5)*12 + ['indianred']\n\nfig = go.Figure()\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.retail_and_recreation_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n marker_color=colors \n))\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.grocery_and_pharmacy_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n visible = False,\n marker_color=colors \n))\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.parks_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n visible = False,\n marker_color=colors \n))\n\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.transit_stations_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n visible = False,\n marker_color=colors \n))\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.workplaces_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n visible = False,\n marker_color=colors \n))\n\nfig.add_traces(go.Bar(\n x=mobility_germany.date,\n y=mobility_germany.residential_percent_change_from_baseline,\n hovertemplate= \"Date: %{x}<br>\" + \\\n \"% change in mobility: %{y}<br>\" + \\\n \"<extra></extra>\",\n visible = False,\n marker_color=colors \n))\n\nupdatemenus = list([\n dict(active=1,\n yanchor=\"top\",\n x=0.2,\n y=1.1,\n buttons=list([\n dict(label='Retail & Recreation',\n method='update',\n args=[{'visible': [True, False, False, False, False, False]},\n {'title': 'Retail & Recreation Mobility Change From Baseline'}]),\n dict(label='Grocery & Pharmacy',\n method='update',\n args=[{'visible': [False, True, False, False, False, False]},\n {'title': 'Grocery & Pharmacy Mobility Change From Baseline'}]),\n dict(label='Parks',\n method='update',\n args=[{'visible': [False, False, True, False, False, False]},\n {'title': 'Parks Mobility Change From Baseline'}]),\n dict(label='Transit Stations',\n method='update',\n args=[{'visible': [False, False, False, True, False, False]},\n {'title': 'Transit Stations Mobility Change From Baseline'}]),\n dict(label='Workplaces',\n method='update',\n args=[{'visible': [False, False, False, False, True, False]},\n {'title': 'Workplaces Mobility Change From Baseline'}]),\n dict(label='Residential',\n method='update',\n args=[{'visible': [False, False, False, False, False, True]},\n {'title': 'Residential Mobility Change From Baseline'}]),\n ]),\n )\n ])\n\nfig.update_layout(\n updatemenus = updatemenus,\n title={\n 'text': \"Mobility report\",\n 'x':0.5,\n 'y':0.92,\n 'xanchor': 'center',\n 'yanchor': 'top',\n \"font\": {\"size\": 20}})\n\nargs = sys.argv\nif len(args)>1:\n if args[1] == \"1\":\n name = args[0].split(\".\")[0]\n path = \"../plots/\"\n fig.write_html(\"{}{}.html\".format(path, name))\n print(\"The plot was saved to {}{}.html\".format(path, name))\n else:\n fig.show()\nelse:\n fig.show()" ]
[ [ "pandas.read_csv" ] ]
thomaskeefe/py_jive
[ "81f741afe2ef5f3d3dd006b7619690d6cb5ae09c" ]
[ "jive/utils.py" ]
[ "import numpy as np\nfrom scipy.sparse import issparse\nfrom scipy.sparse.linalg import svds\nfrom scipy.linalg import svd as full_svd\n\nfrom jive.lazymatpy.interface import LinearOperator\nfrom jive.lazymatpy.convert2scipy import convert2scipy\n\n\ndef svd_wrapper(X, rank=None):\n \"\"\"\n Computes the (possibly partial) SVD of a matrix. Handles the case where\n X is either dense or sparse.\n\n Parameters\n ----------\n X: array-like, shape (N, D)\n\n rank: rank of the desired SVD (required for sparse matrices)\n\n Output\n ------\n U, D, V\n\n U: array-like, shape (N, rank)\n Orthonormal matrix of left singular vectors.\n\n D: list, shape (rank, )\n Singular values in non-increasing order (e.g. D[0] is the largest).\n\n V: array-like, shape (D, rank)\n Orthonormal matrix of right singular vectors\n\n \"\"\"\n full = False\n if rank is None or rank == min(X.shape):\n full = True\n\n if isinstance(X, LinearOperator):\n scipy_svds = svds(convert2scipy(X), rank)\n U, D, V = fix_scipy_svds(scipy_svds)\n\n elif issparse(X) or not full:\n assert rank <= min(X.shape) - 1 # svds cannot compute the full svd\n scipy_svds = svds(X, rank)\n U, D, V = fix_scipy_svds(scipy_svds)\n\n else:\n U, D, V = full_svd(X, full_matrices=False)\n V = V.T\n\n if rank:\n U = U[:, :rank]\n D = D[:rank]\n V = V[:, :rank]\n\n return U, D, V\n\n\ndef fix_scipy_svds(scipy_svds):\n \"\"\"\n scipy.sparse.linalg.svds orders the singular values backwards,\n this function fixes this insanity and returns the singular values\n in decreasing order\n\n Parameters\n ----------\n scipy_svds: the out put from scipy.sparse.linalg.svds\n\n Output\n ------\n U, D, V\n ordered in decreasing singular values\n \"\"\"\n U, D, V = scipy_svds\n\n sv_reordering = np.argsort(-D)\n\n U = U[:, sv_reordering]\n D = D[sv_reordering]\n V = V.T[:, sv_reordering]\n\n return U, D, V\n\n\ndef centering(X, method='mean'):\n \"\"\"\n Mean centers columns of a matrix.\n\n Parameters\n ----------\n X: array-like, shape (n_samples, n_features)\n The input matrix.\n\n method: str, None\n How to center.\n\n Output\n ------\n X_centered, center\n\n X_centered: array-like, shape (n_samples, n_features)\n The centered version of X whose columns have mean zero.\n\n center: array-like, shape (n_features, )\n The column means of X.\n \"\"\"\n\n if type(method) == bool and method:\n method = 'mean'\n\n if issparse(X):\n raise NotImplementedError\n # X_centered = MeanCentered(blocks[bn], centers_[bn])\n else:\n if method == 'mean':\n center = np.array(X.mean(axis=0)).reshape(-1)\n X_centered = X - center\n else:\n center = None\n X_centered = X\n\n return X_centered, center\n" ]
[ [ "scipy.sparse.issparse", "scipy.sparse.linalg.svds", "numpy.argsort", "scipy.linalg.svd" ] ]
birkealine/konstanz-open-data-api
[ "5e7267020dd7db7592e1738d23f83990f9a92424" ]
[ "opencity/fetcher/xls_fetcher.py" ]
[ "import pandas as pd\nimport xlrd\n\nclass xlsFetcher(object):\n \"\"\"\n xlsFetcher: fetches xls files\n \"\"\"\n\n def __init__(self):\n self.flag_final = True\n\n def parse_xls(self, url):\n \"\"\"\n parses data from url to dataframe\n\n PARAMETERS:\n -----------\n url: String\n data id based link\n\n RETURNS:\n\t -----------\n DataFrame: data for url\n \"\"\"\n try:\n df = pd.read_excel(url, sheet_name=None)\n return df\n except:\n self.flag_final = False\n return pd.DataFrame()\n\n def load_data(self, url):\n \"\"\"\n function to load data set\n\n PARAMETERS:\n\t -----------\n url: String\n\t\t\tdata id based link\n\n RETURNS:\n\t -----------\n DataFrame: data for url\n Boolean: flag_final (true if success)\n \"\"\"\n return self.parse_xls(url), self.flag_final\n" ]
[ [ "pandas.read_excel", "pandas.DataFrame" ] ]
jeanveau/rasa_core
[ "953d54f057c19ef10b9c71cb8fcfc2d13dbefc65" ]
[ "rasa/core/test.py" ]
[ "import argparse\nimport asyncio\nimport json\nimport logging\nimport os\nimport typing\nimport warnings\nfrom collections import defaultdict, namedtuple\nfrom typing import Any, Dict, List, Optional, Text, Tuple\n\nfrom rasa.core.events import (\n ActionExecuted, UserUttered,\n ActionExecutionRejected)\n\nif typing.TYPE_CHECKING:\n from rasa.core.agent import Agent\n from rasa.core.trackers import DialogueStateTracker\n\nlogger = logging.getLogger(__name__)\n\nStoryEvalution = namedtuple(\"StoryEvaluation\",\n \"evaluation_store \"\n \"failed_stories \"\n \"action_list \"\n \"in_training_data_fraction\")\n\n\ndef create_argument_parser():\n \"\"\"Create argument parser for the evaluate script.\"\"\"\n import rasa.core.cli.arguments\n\n import rasa.core.cli.train\n from rasa.core import cli\n\n parser = argparse.ArgumentParser(\n description='evaluates a dialogue model')\n parent_parser = argparse.ArgumentParser(add_help=False)\n cli.test.add_evaluation_arguments(parent_parser)\n cli.arguments.add_model_and_story_group(parent_parser,\n allow_pretrained_model=False)\n rasa.core.cli.arguments.add_logging_option_arguments(parent_parser)\n subparsers = parser.add_subparsers(help='mode', dest='mode')\n subparsers.add_parser('default',\n help='default mode: evaluate a dialogue'\n ' model',\n parents=[parent_parser])\n subparsers.add_parser('compare',\n help='compare mode: evaluate multiple'\n ' dialogue models to compare '\n 'policies',\n parents=[parent_parser])\n\n return parser\n\n\nclass EvaluationStore(object):\n \"\"\"Class storing action, intent and entity predictions and targets.\"\"\"\n\n def __init__(\n self,\n action_predictions: Optional[List[str]] = None,\n action_targets: Optional[List[str]] = None,\n intent_predictions: Optional[List[str]] = None,\n intent_targets: Optional[List[str]] = None,\n entity_predictions: Optional[List[Dict[Text, Any]]] = None,\n entity_targets: Optional[List[Dict[Text, Any]]] = None\n ) -> None:\n self.action_predictions = action_predictions or []\n self.action_targets = action_targets or []\n self.intent_predictions = intent_predictions or []\n self.intent_targets = intent_targets or []\n self.entity_predictions = entity_predictions or []\n self.entity_targets = entity_targets or []\n\n def add_to_store(\n self,\n action_predictions: Optional[List[str]] = None,\n action_targets: Optional[List[str]] = None,\n intent_predictions: Optional[List[str]] = None,\n intent_targets: Optional[List[str]] = None,\n entity_predictions: Optional[List[Dict[Text, Any]]] = None,\n entity_targets: Optional[List[Dict[Text, Any]]] = None\n ) -> None:\n \"\"\"Add items or lists of items to the store\"\"\"\n for k, v in locals().items():\n if k != 'self' and v:\n attr = getattr(self, k)\n if isinstance(v, list):\n attr.extend(v)\n else:\n attr.append(v)\n\n def merge_store(self, other: 'EvaluationStore') -> None:\n \"\"\"Add the contents of other to self\"\"\"\n self.add_to_store(action_predictions=other.action_predictions,\n action_targets=other.action_targets,\n intent_predictions=other.intent_predictions,\n intent_targets=other.intent_targets,\n entity_predictions=other.entity_predictions,\n entity_targets=other.entity_targets)\n\n def has_prediction_target_mismatch(self):\n return (self.intent_predictions != self.intent_targets or\n self.entity_predictions != self.entity_targets or\n self.action_predictions != self.action_targets)\n\n def serialise_targets(self,\n include_actions=True,\n include_intents=True,\n include_entities=False):\n targets = []\n if include_actions:\n targets += self.action_targets\n if include_intents:\n targets += self.intent_targets\n if include_entities:\n targets += self.entity_targets\n\n return [json.dumps(t) if isinstance(t, dict) else t for t in targets]\n\n def serialise_predictions(self,\n include_actions=True,\n include_intents=True,\n include_entities=False):\n predictions = []\n\n if include_actions:\n predictions += self.action_predictions\n if include_intents:\n predictions += self.intent_predictions\n if include_entities:\n predictions += self.entity_predictions\n\n return [json.dumps(t) if isinstance(t, dict) else t\n for t in predictions]\n\n\nclass WronglyPredictedAction(ActionExecuted):\n \"\"\"The model predicted the wrong action.\n\n Mostly used to mark wrong predictions and be able to\n dump them as stories.\"\"\"\n\n type_name = \"wrong_action\"\n\n def __init__(self, correct_action, predicted_action,\n policy, confidence, timestamp=None):\n self.predicted_action = predicted_action\n super(WronglyPredictedAction, self).__init__(correct_action,\n policy,\n confidence,\n timestamp=timestamp)\n\n def as_story_string(self):\n return \"{} <!-- predicted: {} -->\".format(self.action_name,\n self.predicted_action)\n\n\nclass EndToEndUserUtterance(UserUttered):\n \"\"\"End-to-end user utterance.\n\n Mostly used to print the full end-to-end user message in the\n `failed_stories.md` output file.\"\"\"\n\n def as_story_string(self, e2e=True):\n return super(EndToEndUserUtterance, self).as_story_string(e2e=True)\n\n\nclass WronglyClassifiedUserUtterance(UserUttered):\n \"\"\"The NLU model predicted the wrong user utterance.\n\n Mostly used to mark wrong predictions and be able to\n dump them as stories.\"\"\"\n\n type_name = \"wrong_utterance\"\n\n def __init__(self,\n text,\n correct_intent,\n correct_entities,\n parse_data=None,\n timestamp=None,\n input_channel=None,\n predicted_intent=None,\n predicted_entities=None):\n self.predicted_intent = predicted_intent\n self.predicted_entities = predicted_entities\n\n intent = {\"name\": correct_intent}\n\n super(WronglyClassifiedUserUtterance, self).__init__(text,\n intent,\n correct_entities,\n parse_data,\n timestamp,\n input_channel)\n\n def as_story_string(self, e2e=True):\n from rasa.core.events import md_format_message\n correct_message = md_format_message(self.text,\n self.intent,\n self.entities)\n predicted_message = md_format_message(self.text,\n self.predicted_intent,\n self.predicted_entities)\n return (\"{}: {} <!-- predicted: {}: {} -->\"\n \"\").format(self.intent.get(\"name\"),\n correct_message,\n self.predicted_intent,\n predicted_message)\n\n\nasync def _generate_trackers(resource_name, agent,\n max_stories=None,\n use_e2e=False):\n from rasa.core.training.generator import TrainingDataGenerator\n\n from rasa.core import training\n story_graph = await training.extract_story_graph(\n resource_name, agent.domain, agent.interpreter, use_e2e)\n g = TrainingDataGenerator(story_graph, agent.domain,\n use_story_concatenation=False,\n augmentation_factor=0,\n tracker_limit=max_stories)\n return g.generate()\n\n\ndef _clean_entity_results(entity_results):\n return [{k: r[k] for k in (\"start\", \"end\", \"entity\", \"value\") if k in r}\n for r in entity_results]\n\n\ndef _collect_user_uttered_predictions(event,\n partial_tracker,\n fail_on_prediction_errors):\n from rasa.core.utils import (\n pad_list_to_size)\n\n user_uttered_eval_store = EvaluationStore()\n\n intent_gold = event.parse_data.get(\"true_intent\")\n predicted_intent = event.parse_data.get(\"intent\").get(\"name\")\n if predicted_intent is None:\n predicted_intent = \"None\"\n user_uttered_eval_store.add_to_store(intent_predictions=predicted_intent,\n intent_targets=intent_gold)\n\n entity_gold = event.parse_data.get(\"true_entities\")\n predicted_entities = event.parse_data.get(\"entities\")\n\n if entity_gold or predicted_entities:\n if len(entity_gold) > len(predicted_entities):\n predicted_entities = pad_list_to_size(predicted_entities,\n len(entity_gold),\n \"None\")\n elif len(predicted_entities) > len(entity_gold):\n entity_gold = pad_list_to_size(entity_gold,\n len(predicted_entities),\n \"None\")\n\n user_uttered_eval_store.add_to_store(\n entity_targets=_clean_entity_results(entity_gold),\n entity_predictions=_clean_entity_results(predicted_entities)\n )\n\n if user_uttered_eval_store.has_prediction_target_mismatch():\n partial_tracker.update(\n WronglyClassifiedUserUtterance(\n event.text, intent_gold,\n user_uttered_eval_store.entity_predictions,\n event.parse_data,\n event.timestamp,\n event.input_channel,\n predicted_intent,\n user_uttered_eval_store.entity_targets)\n )\n if fail_on_prediction_errors:\n raise ValueError(\n \"NLU model predicted a wrong intent. Failed Story:\"\n \" \\n\\n{}\".format(partial_tracker.export_stories()))\n else:\n end_to_end_user_utterance = EndToEndUserUtterance(\n event.text, event.intent, event.entities)\n partial_tracker.update(end_to_end_user_utterance)\n\n return user_uttered_eval_store\n\n\ndef _emulate_form_rejection(processor, partial_tracker):\n from rasa.core.policies import FormPolicy\n if partial_tracker.active_form.get(\"name\"):\n for p in processor.policy_ensemble.policies:\n if isinstance(p, FormPolicy):\n # emulate form rejection\n partial_tracker.update(ActionExecutionRejected(\n partial_tracker.active_form[\"name\"]))\n # check if unhappy path is covered by the train stories\n if not p.state_is_unhappy(partial_tracker, processor.domain):\n # this state is not covered by the stories\n del partial_tracker.events[-1]\n partial_tracker.active_form['rejected'] = False\n\n\ndef _collect_action_executed_predictions(processor, partial_tracker, event,\n fail_on_prediction_errors):\n from rasa.core.policies import FormPolicy\n\n action_executed_eval_store = EvaluationStore()\n\n gold = event.action_name\n\n action, policy, confidence = processor.predict_next_action(partial_tracker)\n predicted = action.name()\n\n if predicted != gold and FormPolicy.__name__ in policy:\n # FormPolicy predicted wrong action\n # but it might be Ok if form action is rejected\n _emulate_form_rejection(processor, partial_tracker)\n # try again\n action, policy, confidence = processor.predict_next_action(\n partial_tracker)\n predicted = action.name()\n\n action_executed_eval_store.add_to_store(action_predictions=predicted,\n action_targets=gold)\n\n if action_executed_eval_store.has_prediction_target_mismatch():\n partial_tracker.update(WronglyPredictedAction(gold, predicted,\n event.policy,\n event.confidence,\n event.timestamp))\n if fail_on_prediction_errors:\n error_msg = (\"Model predicted a wrong action. Failed Story: \"\n \"\\n\\n{}\".format(partial_tracker.export_stories()))\n if FormPolicy.__name__ in policy:\n error_msg += (\"FormAction is not run during \"\n \"evaluation therefore it is impossible to know \"\n \"if validation failed or this story is wrong. \"\n \"If the story is correct, add it to the \"\n \"training stories and retrain.\")\n raise ValueError(error_msg)\n else:\n partial_tracker.update(event)\n\n return action_executed_eval_store, policy, confidence\n\n\ndef _predict_tracker_actions(tracker, agent: 'Agent',\n fail_on_prediction_errors=False,\n use_e2e=False):\n from rasa.core.trackers import DialogueStateTracker\n\n processor = agent.create_processor()\n tracker_eval_store = EvaluationStore()\n\n events = list(tracker.events)\n\n partial_tracker = DialogueStateTracker.from_events(tracker.sender_id,\n events[:1],\n agent.domain.slots)\n\n tracker_actions = []\n\n for event in events[1:]:\n if isinstance(event, ActionExecuted):\n action_executed_result, policy, confidence = \\\n _collect_action_executed_predictions(\n processor, partial_tracker, event,\n fail_on_prediction_errors\n )\n tracker_eval_store.merge_store(action_executed_result)\n tracker_actions.append(\n {\"action\": action_executed_result.action_targets[0],\n \"predicted\": action_executed_result.action_predictions[0],\n \"policy\": policy,\n \"confidence\": confidence}\n )\n elif use_e2e and isinstance(event, UserUttered):\n user_uttered_result = \\\n _collect_user_uttered_predictions(\n event, partial_tracker, fail_on_prediction_errors)\n\n tracker_eval_store.merge_store(user_uttered_result)\n else:\n partial_tracker.update(event)\n\n return tracker_eval_store, partial_tracker, tracker_actions\n\n\ndef _in_training_data_fraction(action_list):\n \"\"\"Given a list of action items, returns the fraction of actions\n\n that were predicted using one of the Memoization policies.\"\"\"\n from rasa.core.policies import SimplePolicyEnsemble\n\n in_training_data = [\n a[\"action\"] for a in action_list\n if not SimplePolicyEnsemble.is_not_memo_policy(a[\"policy\"])\n ]\n\n return len(in_training_data) / len(action_list)\n\n\ndef collect_story_predictions(\n completed_trackers: List['DialogueStateTracker'],\n agent: 'Agent',\n fail_on_prediction_errors: bool = False,\n use_e2e: bool = False\n) -> Tuple[StoryEvalution, int]:\n \"\"\"Test the stories from a file, running them through the stored model.\"\"\"\n from rasa_nlu.test import get_evaluation_metrics\n from tqdm import tqdm\n\n story_eval_store = EvaluationStore()\n failed = []\n correct_dialogues = []\n num_stories = len(completed_trackers)\n\n logger.info(\"Evaluating {} stories\\n\"\n \"Progress:\".format(num_stories))\n\n action_list = []\n\n for tracker in tqdm(completed_trackers):\n tracker_results, predicted_tracker, tracker_actions = \\\n _predict_tracker_actions(tracker, agent,\n fail_on_prediction_errors, use_e2e)\n\n story_eval_store.merge_store(tracker_results)\n\n action_list.extend(tracker_actions)\n\n if tracker_results.has_prediction_target_mismatch():\n # there is at least one wrong prediction\n failed.append(predicted_tracker)\n correct_dialogues.append(0)\n else:\n correct_dialogues.append(1)\n\n logger.info(\"Finished collecting predictions.\")\n with warnings.catch_warnings():\n from sklearn.exceptions import UndefinedMetricWarning\n\n warnings.simplefilter(\"ignore\", UndefinedMetricWarning)\n report, precision, f1, accuracy = get_evaluation_metrics(\n [1] * len(completed_trackers), correct_dialogues)\n\n in_training_data_fraction = _in_training_data_fraction(action_list)\n\n log_evaluation_table([1] * len(completed_trackers),\n \"END-TO-END\" if use_e2e else \"CONVERSATION\",\n report, precision, f1, accuracy,\n in_training_data_fraction,\n include_report=False)\n\n return (StoryEvalution(evaluation_store=story_eval_store,\n failed_stories=failed,\n action_list=action_list,\n in_training_data_fraction=in_training_data_fraction),\n num_stories)\n\n\ndef log_failed_stories(failed, out_directory):\n \"\"\"Take stories as a list of dicts.\"\"\"\n if not out_directory:\n return\n with open(os.path.join(out_directory, 'failed_stories.md'), 'w',\n encoding=\"utf-8\") as f:\n if len(failed) == 0:\n f.write(\"<!-- All stories passed -->\")\n else:\n for failure in failed:\n f.write(failure.export_stories())\n f.write(\"\\n\\n\")\n\n\nasync def test(stories: Text,\n agent: 'Agent',\n max_stories: Optional[int] = None,\n out_directory: Optional[Text] = None,\n fail_on_prediction_errors: bool = False,\n use_e2e: bool = False):\n \"\"\"Run the evaluation of the stories, optionally plot the results.\"\"\"\n from rasa_nlu.test import get_evaluation_metrics\n\n completed_trackers = await _generate_trackers(stories, agent,\n max_stories, use_e2e)\n\n story_evaluation, _ = collect_story_predictions(completed_trackers, agent,\n fail_on_prediction_errors,\n use_e2e)\n\n evaluation_store = story_evaluation.evaluation_store\n\n with warnings.catch_warnings():\n from sklearn.exceptions import UndefinedMetricWarning\n\n warnings.simplefilter(\"ignore\", UndefinedMetricWarning)\n report, precision, f1, accuracy = get_evaluation_metrics(\n evaluation_store.serialise_targets(),\n evaluation_store.serialise_predictions()\n )\n\n if out_directory:\n plot_story_evaluation(evaluation_store.action_targets,\n evaluation_store.action_predictions,\n report, precision, f1, accuracy,\n story_evaluation.in_training_data_fraction,\n out_directory)\n\n log_failed_stories(story_evaluation.failed_stories, out_directory)\n\n return {\n \"report\": report,\n \"precision\": precision,\n \"f1\": f1,\n \"accuracy\": accuracy,\n \"actions\": story_evaluation.action_list,\n \"in_training_data_fraction\":\n story_evaluation.in_training_data_fraction,\n \"is_end_to_end_evaluation\": use_e2e\n }\n\n\ndef log_evaluation_table(golds, name,\n report, precision, f1, accuracy,\n in_training_data_fraction,\n include_report=True): # pragma: no cover\n \"\"\"Log the sklearn evaluation metrics.\"\"\"\n logger.info(\"Evaluation Results on {} level:\".format(name))\n logger.info(\"\\tCorrect: {} / {}\"\n \"\".format(int(len(golds) * accuracy), len(golds)))\n logger.info(\"\\tF1-Score: {:.3f}\".format(f1))\n logger.info(\"\\tPrecision: {:.3f}\".format(precision))\n logger.info(\"\\tAccuracy: {:.3f}\".format(accuracy))\n logger.info(\"\\tIn-data fraction: {:.3g}\"\n \"\".format(in_training_data_fraction))\n\n if include_report:\n logger.info(\"\\tClassification report: \\n{}\".format(report))\n\n\ndef plot_story_evaluation(test_y, predictions,\n report, precision, f1, accuracy,\n in_training_data_fraction,\n out_directory):\n \"\"\"Plot the results of story evaluation\"\"\"\n from sklearn.metrics import confusion_matrix\n from sklearn.utils.multiclass import unique_labels\n import matplotlib.pyplot as plt\n from rasa_nlu.test import plot_confusion_matrix\n\n log_evaluation_table(test_y, \"ACTION\",\n report, precision, f1, accuracy,\n in_training_data_fraction,\n include_report=True)\n\n cnf_matrix = confusion_matrix(test_y, predictions)\n\n plot_confusion_matrix(cnf_matrix,\n classes=unique_labels(test_y, predictions),\n title='Action Confusion matrix')\n\n fig = plt.gcf()\n fig.set_size_inches(int(20), int(20))\n fig.savefig(os.path.join(out_directory, \"story_confmat.pdf\"),\n bbox_inches='tight')\n\n\nasync def compare(models: Text,\n stories_file: Text,\n output: Text) -> None:\n \"\"\"Evaluates multiple trained models on a test set.\"\"\"\n from rasa.core.agent import Agent\n import rasa_nlu.utils as nlu_utils\n from rasa.core import utils\n\n num_correct = defaultdict(list)\n\n for run in nlu_utils.list_subdirectories(models):\n num_correct_run = defaultdict(list)\n\n for model in sorted(nlu_utils.list_subdirectories(run)):\n logger.info(\"Evaluating model {}\".format(model))\n\n agent = Agent.load(model)\n\n completed_trackers = await _generate_trackers(stories_file, agent)\n\n story_eval_store, no_of_stories = \\\n collect_story_predictions(completed_trackers,\n agent)\n\n failed_stories = story_eval_store.failed_stories\n policy_name = ''.join(\n [i for i in os.path.basename(model) if not i.isdigit()])\n num_correct_run[policy_name].append(no_of_stories -\n len(failed_stories))\n\n for k, v in num_correct_run.items():\n num_correct[k].append(v)\n\n utils.dump_obj_as_json_to_file(os.path.join(output, 'results.json'),\n num_correct)\n\n\ndef plot_curve(output: Text, no_stories: List[int]) -> None:\n \"\"\"Plot the results from run_comparison_evaluation.\n\n Args:\n output: Output directory to save resulting plots to\n no_stories: Number of stories per run\n \"\"\"\n import matplotlib.pyplot as plt\n import numpy as np\n from rasa.core import utils\n\n ax = plt.gca()\n\n # load results from file\n data = utils.read_json_file(os.path.join(output, 'results.json'))\n x = no_stories\n\n # compute mean of all the runs for keras/embed policies\n for label in data.keys():\n if len(data[label]) == 0:\n continue\n mean = np.mean(data[label], axis=0)\n std = np.std(data[label], axis=0)\n ax.plot(x, mean, label=label, marker='.')\n ax.fill_between(x,\n [m - s for m, s in zip(mean, std)],\n [m + s for m, s in zip(mean, std)],\n color='#6b2def',\n alpha=0.2)\n ax.legend(loc=4)\n ax.set_xlabel(\"Number of stories present during training\")\n ax.set_ylabel(\"Number of correct test stories\")\n plt.savefig(os.path.join(output, 'model_comparison_graph.pdf'),\n format='pdf')\n plt.show()\n\n\ndef main():\n from rasa.core.agent import Agent\n from rasa.core.interpreter import NaturalLanguageInterpreter\n from rasa.core.utils import (\n AvailableEndpoints, set_default_subparser)\n import rasa_nlu.utils as nlu_utils\n import rasa.core.cli\n from rasa.core import utils\n\n loop = asyncio.get_event_loop()\n\n # Running as standalone python application\n arg_parser = create_argument_parser()\n set_default_subparser(arg_parser, 'default')\n cmdline_arguments = arg_parser.parse_args()\n\n logging.basicConfig(level=cmdline_arguments.loglevel)\n _endpoints = AvailableEndpoints.read_endpoints(cmdline_arguments.endpoints)\n\n if cmdline_arguments.output:\n nlu_utils.create_dir(cmdline_arguments.output)\n\n if not cmdline_arguments.core:\n raise ValueError(\"you must provide a core model directory to evaluate \"\n \"using -d / --core\")\n if cmdline_arguments.mode == 'default':\n\n _interpreter = NaturalLanguageInterpreter.create(cmdline_arguments.nlu,\n _endpoints.nlu)\n\n _agent = Agent.load(cmdline_arguments.core, interpreter=_interpreter)\n\n stories = loop.run_until_complete(\n rasa.core.cli.train.stories_from_cli_args(cmdline_arguments))\n\n loop.run_until_complete(\n test(stories, _agent, cmdline_arguments.max_stories,\n cmdline_arguments.output,\n cmdline_arguments.fail_on_prediction_errors,\n cmdline_arguments.e2e))\n\n elif cmdline_arguments.mode == 'compare':\n compare(cmdline_arguments.core,\n cmdline_arguments.stories,\n cmdline_arguments.output)\n\n story_n_path = os.path.join(cmdline_arguments.core, 'num_stories.json')\n\n number_of_stories = utils.read_json_file(story_n_path)\n plot_curve(cmdline_arguments.output, number_of_stories)\n\n logger.info(\"Finished evaluation\")\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "sklearn.utils.multiclass.unique_labels", "matplotlib.pyplot.gcf", "matplotlib.pyplot.gca", "sklearn.metrics.confusion_matrix", "matplotlib.pyplot.show", "numpy.std", "numpy.mean" ] ]
wolfenfeld/Palantiri
[ "719d78b891bf8ac8e90f8eab1d55c57b3b7bbb70" ]
[ "palantiri/ClassificationPlotHandlers.py" ]
[ "\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, roc_curve, auc\n\n\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\nfrom plotly.offline import iplot\n\nfrom palantiri.BasePlotHandlers import PlotHandler\n\n\nclass ClassifierPlotHandler(PlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n Initialization function\n :param dataset: the dataset in a dict format with the following keys:\n 'data' - numpy array with all the data points.\n 'target' - the label of the corresponding data point.\n 'target_names' - the label name.\n\n :param trained_classifier: sklearn classifier (trained / fitted).\n In order to plot the ROC plot - the classifier should have the predict_proba ability.\n :param params: other params\n \"\"\"\n\n self._dataset = dataset\n self._trained_classifier = trained_classifier\n\n self._n_classes = len(set(dataset['target']))\n\n if hasattr(self._dataset, 'target_names'):\n self.class_names = self._dataset['target_names']\n else:\n self.class_names = ['Class {0}'.format(i) for i in range(self.n_classes)]\n\n # Score of the predicted target store.\n if hasattr(self._trained_classifier, 'predict_proba'):\n self._predicted_target_score = self._trained_classifier.predict_proba(self._dataset['data'])\n else:\n self._predicted_target_score = None\n\n self._confusion_matrix = None\n self.confusion_matrix_colorscale = 'Viridis'\n\n self.prediction_figure = None\n self.roc_figure = None\n self.confusion_matrix_figure = None\n\n super(ClassifierPlotHandler, self).__init__(**params)\n\n @classmethod\n def from_pandas_dataframe(cls, dataframe, trained_classifier, **params):\n \"\"\"\n Constructing the handler from a pandas dataframe.\n :param dataframe: the dataframe form which the handler is constructed.\n The 'target' column should be included in the dataframe.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n :return: returns the classifier plot handler object.\n \"\"\"\n\n assert 'target' in dataframe.columns.values, 'target values not in dataframe'\n\n dataset = dict()\n dataset['data'] = dataframe.drop('target', axis=1).values\n dataset['target'] = dataframe['target'].values\n dataset['feature_names'] = dataframe.drop('target', axis=1).columns.values\n return cls(dataset, trained_classifier, **params)\n\n @property\n def trained_classifier(self):\n \"\"\"\n The trained classifier .\n :return: The classifier in the sklearn format.\n \"\"\"\n return self._trained_classifier\n\n @property\n def dataset(self):\n \"\"\"\n The dataset\n :return: The dataset as a dictionary\n \"\"\"\n return self._dataset\n\n @dataset.setter\n def dataset(self, dataset):\n \"\"\"\n The dataset setter.\n :param dataset: the new dataset\n \"\"\"\n self._dataset = dataset\n\n @property\n def predicted_target_score(self):\n \"\"\"\n The predicted score - available if classifier has the predict_proba functionality.\n :return: The predicted score.\n \"\"\"\n return self._predicted_target_score\n\n @property\n def confusion_matrix(self):\n \"\"\"\n The confusion matrix.\n :return: The confusion matrix as a numpy array.\n \"\"\"\n return self._confusion_matrix\n\n @property\n def n_classes(self):\n \"\"\"\n The number of classes.\n :return: An int representing the number of classes.\n \"\"\"\n return self._n_classes\n\n def build_confusion_matrix(self, normalize=False):\n \"\"\"\n Building the confusion matrix\n :param normalize: if True confusion matrix is normalized.\n \"\"\"\n\n prediction = self.trained_classifier.predict(self._dataset['data'])\n\n self._confusion_matrix = confusion_matrix(self._dataset['target'], prediction)\n\n if normalize:\n self._confusion_matrix = \\\n self._confusion_matrix.astype('float') / self._confusion_matrix.sum(axis=1)[:, np.newaxis]\n else:\n self._confusion_matrix = self._confusion_matrix\n\n def build_confusion_matrix_figure(self, figure_layout):\n \"\"\"\n Builds the confusion matrix figure in confusion_matrix_figure.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n if not self._confusion_matrix:\n self.build_confusion_matrix()\n\n cm = np.flipud(self._confusion_matrix)\n x = list(self.class_names)\n y = list(reversed(self.class_names))\n\n self.confusion_matrix_figure = ff.create_annotated_heatmap(z=cm, x=x, y=y,\n colorscale=self.confusion_matrix_colorscale)\n\n self.confusion_matrix_figure['layout'].update(figure_layout)\n\n def plot_confusion_matrix(self, figure_layout=None):\n \"\"\"\n Plotting the confusion matrix figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(\n xaxis={'title': 'Confusion Matrix <br /><br />Predicted Value'},\n yaxis={'title': 'True Value'})\n\n if not self.confusion_matrix_figure:\n self.build_confusion_matrix_figure(figure_layout)\n else:\n self.confusion_matrix_figure['layout'].update(figure_layout)\n\n iplot(self.confusion_matrix_figure)\n\n def build_roc_figure(self, figure_layout=go.Layout()):\n \"\"\"\n Building the ROC curve figure of the classifier.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n data = list()\n\n if self.n_classes < 3:\n # False positive rate and true positive rate - computed from roc_curve()\n fpr, tpr, _ = roc_curve(self.dataset['target'], self.predicted_target_score[:, 1])\n\n # Area under curve.\n roc_auc = auc(fpr, tpr)\n\n # Updating the data list.\n data.append(go.Scatter(x=fpr,\n y=tpr,\n hoverinfo='y',\n mode='lines',\n line=dict(color='darkorange'),\n name='ROC curve (area = %0.2f)' % roc_auc))\n else:\n\n # False Positive, True Positive rates and Area Under Curve values for each class.\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n for i in range(self.n_classes):\n fpr[i], tpr[i], _ = roc_curve((self.dataset['target'] == i).astype(float),\n self.predicted_target_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n data.append(go.Scatter(x=fpr[i],\n y=tpr[i],\n hoverinfo='y',\n mode='lines',\n name='ROC curve of class {0} (area = {1:0.2f})'''.format(\n self.class_names[i], roc_auc[i])))\n\n # Diagonal\n data.append(go.Scatter(x=[0, 1], y=[0, 1],\n mode='lines',\n hoverinfo='skip',\n line=dict(color='navy', dash='dash'),\n showlegend=False))\n\n self.roc_figure = go.Figure(data=data, layout=figure_layout)\n\n def plot_roc(self, figure_layout=None):\n \"\"\"\n Plotting the ROC curve figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(title=dict(text='ROC Curve', x=0.5),\n xaxis=dict(title='False Positive Rate'),\n yaxis=dict(title='True Positive Rate'))\n\n if not self.roc_figure:\n self.build_roc_figure(figure_layout=figure_layout)\n else:\n self.roc_figure['layout'].update(figure_layout)\n\n iplot(self.roc_figure)\n\n def build_prediction_figure(self, figure_layout):\n \"\"\"\n Building the classifier prediction figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n pass\n\n def plot_prediction(self, figure_layout=None):\n \"\"\"\n Plotting the prediction figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(title=dict(text='Classifier Prediction', x=0.5))\n\n if not self.prediction_figure:\n self.build_prediction_figure(figure_layout=figure_layout)\n else:\n self.prediction_figure['layout'].update(figure_layout)\n\n iplot(self.prediction_figure)\n\n def save_prediction_figure(self, file_name):\n \"\"\"\n Saving the prediction figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.prediction_figure, file_name)\n\n def save_roc_figure(self, file_name):\n \"\"\"\n Saving the ROC curve figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.roc_figure, file_name)\n\n def save_confusion_matrix_figure(self, file_name):\n \"\"\"\n Saving the confusion matrix figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.confusion_matrix_figure, file_name)\n\n\nclass TwoDimensionalClassifierPlotHandler(ClassifierPlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier on 2D. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n The initialization function of the 2D classifier plot handler.\n :param dataframe: the dataframe form which the handler is constructed.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n \"\"\"\n\n dataset['data'] = dataset['data'][:, :2]\n\n super(TwoDimensionalClassifierPlotHandler, self).__init__(dataset, trained_classifier, **params)\n\n def build_prediction_figure(self, figure_layout=go.Layout(), step_size=0.01):\n \"\"\"\n Building the classifier prediction figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n :param step_size: Plot resolution.\n \"\"\"\n\n data = list()\n\n x_min, x_max = self.dataset['data'][:, 0].min() - 1, self.dataset['data'][:, 0].max() + 1\n y_min, y_max = self.dataset['data'][:, 1].min() - 1, self.dataset['data'][:, 1].max() + 1\n\n x = np.arange(x_min, x_max, step_size)\n y = np.arange(y_min, y_max, step_size)\n x_mesh, y_mesh = np.meshgrid(x, y)\n\n z = self.trained_classifier.predict(np.column_stack((x_mesh.ravel(), y_mesh.ravel())))\n\n z = z.reshape(x_mesh.shape)\n\n data.append(go.Contour(x=x, y=y, z=z,\n showscale=False,\n hoverinfo='skip',\n colorscale='Viridis'))\n\n data.append(go.Scatter(x=self.dataset['data'][:, 0],\n y=self.dataset['data'][:, 1],\n text=[self.class_names[i] for i in self.dataset['target']],\n hoverinfo='text',\n mode='markers',\n marker=dict(color=self.dataset['target'],\n showscale=False,\n colorscale='Reds',\n line=dict(color='black', width=1))))\n\n if 'feature_names' in self.dataset.keys():\n figure_layout['xaxis'].update({'title': self.dataset['feature_names'][0]})\n figure_layout['yaxis'].update({'title': self.dataset['feature_names'][1]})\n\n self.prediction_figure = go.Figure(data=data, layout=figure_layout)\n\n\nclass ThreeDimensionalClassifierPlotHandler(ClassifierPlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier on 3D. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n The initialization function of the 3D classifier plot handler.\n :param dataframe: the dataframe form which the handler is constructed.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n \"\"\"\n\n dataset['data'] = dataset['data'][:, :3]\n\n super(ThreeDimensionalClassifierPlotHandler, self).__init__(dataset, trained_classifier, **params)\n\n def build_prediction_figure(self, figure_layout=go.Layout()):\n \"\"\"\n Plotting the classifier prediction and saving the figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n labels = self.trained_classifier.predict(self.dataset['data'])\n\n data = list()\n\n for label in set(labels):\n\n data_points = self.dataset['data'][np.in1d(labels, np.asarray(label))]\n\n data.append(go.Scatter3d(x=data_points[:, 0],\n y=data_points[:, 1],\n z=data_points[:, 2],\n text=self.class_names[label],\n hoverinfo='text',\n showlegend=True,\n name=self.class_names[label],\n mode='markers',\n marker=dict(\n line=dict(color='black', width=1))))\n\n if 'feature_names' in self.dataset.keys():\n figure_layout['scene'].update(\n dict(xaxis={'title': self.dataset['feature_names'][0]},\n yaxis={'title': self.dataset['feature_names'][1]},\n zaxis={'title': self.dataset['feature_names'][2]}))\n\n self.prediction_figure = go.Figure(data=data, layout=figure_layout)\n" ]
[ [ "numpy.flipud", "sklearn.metrics.roc_curve", "sklearn.metrics.auc", "numpy.asarray", "sklearn.metrics.confusion_matrix", "numpy.arange", "numpy.meshgrid" ] ]
SBCV/Open3D
[ "d335451e2b56897c3c8e37f68e1d41eee9faf5e9" ]
[ "python/test/test_color_map.py" ]
[ "import open3d as o3d\nimport numpy as np\nimport re\nimport os\nimport sys\nfrom open3d_test import download_fountain_dataset\n\n\ndef get_file_list(path, extension=None):\n\n def sorted_alphanum(file_list_ordered):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [\n convert(c) for c in re.split('([0-9]+)', key)\n ]\n return sorted(file_list_ordered, key=alphanum_key)\n\n if extension is None:\n file_list = [\n path + f\n for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))\n ]\n else:\n file_list = [\n path + f\n for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f)) and\n os.path.splitext(f)[1] == extension\n ]\n file_list = sorted_alphanum(file_list)\n return file_list\n\n\ndef test_color_map():\n path = download_fountain_dataset()\n depth_image_path = get_file_list(os.path.join(path, \"depth/\"),\n extension=\".png\")\n color_image_path = get_file_list(os.path.join(path, \"image/\"),\n extension=\".jpg\")\n assert (len(depth_image_path) == len(color_image_path))\n\n rgbd_images = []\n for i in range(len(depth_image_path)):\n depth = o3d.io.read_image(os.path.join(depth_image_path[i]))\n color = o3d.io.read_image(os.path.join(color_image_path[i]))\n rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n color, depth, convert_rgb_to_intensity=False)\n rgbd_images.append(rgbd_image)\n\n camera = o3d.io.read_pinhole_camera_trajectory(\n os.path.join(path, \"scene/key.log\"))\n mesh = o3d.io.read_triangle_mesh(\n os.path.join(path, \"scene\", \"integrated.ply\"))\n\n # Computes averaged color without optimization\n option = o3d.pipelines.color_map.ColorMapOptimizationOption()\n option.maximum_iteration = 0\n with o3d.utility.VerbosityContextManager(\n o3d.utility.VerbosityLevel.Debug) as cm:\n o3d.pipelines.color_map.color_map_optimization(mesh, rgbd_images,\n camera, option)\n\n # Rigid Optimization\n option.maximum_iteration = 5\n option.non_rigid_camera_coordinate = False\n with o3d.utility.VerbosityContextManager(\n o3d.utility.VerbosityLevel.Debug) as cm:\n o3d.pipelines.color_map.color_map_optimization(mesh, rgbd_images,\n camera, option)\n\n # Non-rigid Optimization\n option.maximum_iteration = 5\n option.non_rigid_camera_coordinate = True\n with o3d.utility.VerbosityContextManager(\n o3d.utility.VerbosityLevel.Debug) as cm:\n o3d.pipelines.color_map.color_map_optimization(mesh, rgbd_images,\n camera, option)\n\n # Black box test with hard-coded result values. The results of\n # color_map_optimization are deterministic. This test ensures the refactored\n # code produces the same output. This is only valid for using exactly the\n # same inputs and optimization options.\n vertex_colors = np.asarray(mesh.vertex_colors)\n assert vertex_colors.shape == (536872, 3)\n # We need to account for the acceptable variation in the least significant bit\n # which can occur with different JPEG libraries. The test value is pretty much\n # exact with libjpeg-turbo, but not with the original libjpeg.\n np.testing.assert_allclose(np.mean(vertex_colors, axis=0),\n [0.40307181, 0.37264626, 0.5436129],\n rtol=1. / 256.)\n" ]
[ [ "numpy.asarray", "numpy.mean" ] ]
mvoicer/cbic-2021-learning-preferences
[ "9ddd5449d5cae5a8f1cdafc6fa34b13ee83076b2" ]
[ "data_preparation.py" ]
[ "import pandas as pd\r\nimport numpy as np\r\n\r\n\r\ndef create_subsample(df_var, df_pref, nobj, index):\r\n \"\"\"\r\n Create sub-dataframes with the features (alternatives) and target (value in the objective space).\r\n :param df_var:\r\n :param df_pref:\r\n :param nobj:\r\n :param index:\r\n :return:\r\n \"\"\"\r\n\r\n # Create a df_aux that receive the features concatenated (objectives) and targets (preference)\r\n sub_df = pd.DataFrame(np.zeros((len(index), df_var.shape[1]*2 + nobj)))\r\n cont = 0\r\n for i in index:\r\n for j in index:\r\n # Concatenate the two rows - i.e. values of the objectives\r\n # and the preference between the two objectives\r\n sub_df.loc[cont] = pd.concat([df_var.loc[i], df_var.loc[j], df_pref.loc[i, j]], axis = 0, ignore_index = True)\r\n cont += 1\r\n return sub_df\r\n\r\ndef merge_matrices(idx_N_Q, preference_matrix, ml_predicted):\r\n \"\"\"\r\n Replace the predicted values in the preference matrix to calculate\r\n if the rankings (predicted vs preference) are equal or not.\r\n :param idx_N_Q: N-Q index\r\n :param preference_matrix: preference matrix\r\n :param ml_predicted: ranking obtained with the ML method\r\n :return: dataframe merged with the real values and the predicted values\r\n \"\"\"\r\n df_merged = preference_matrix.copy()\r\n nobj = ml_predicted.shape[1]\r\n\r\n # Gera todas as combinações do N-Q\r\n comb_idx = []\r\n for i in idx_N_Q:\r\n for k in idx_N_Q:\r\n comb_idx.append(tuple([i, k]))\r\n\r\n results = pd.DataFrame()\r\n x = 0\r\n for _ in range(0, df_merged.shape[1], df_merged.shape[0]):\r\n m = df_merged.iloc[:, nobj:nobj+df_merged.shape[0]].to_numpy()\r\n\r\n for i, idx in enumerate(comb_idx):\r\n m[idx] = ml_predicted.values[i, x]\r\n x += 1\r\n m = pd.DataFrame(m)\r\n results = pd.concat([results, m], ignore_index=False, axis=1)\r\n return results\r\n" ]
[ [ "pandas.DataFrame", "pandas.concat" ] ]
HarshTrivedi/optuna
[ "9ea2ebd690127ca7af6f3df7b53807c648733a95" ]
[ "tests/integration_tests/lightgbm_tuner_tests/test_optimize.py" ]
[ "import contextlib\nfrom tempfile import TemporaryDirectory\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import List\nfrom typing import Optional\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\n\nimport optuna\nfrom optuna.integration._lightgbm_tuner.optimize import _BaseTuner\nfrom optuna.integration._lightgbm_tuner.optimize import _OptunaObjective\nfrom optuna.integration._lightgbm_tuner.optimize import _OptunaObjectiveCV\nfrom optuna.integration._lightgbm_tuner.optimize import LightGBMTuner\nfrom optuna.integration._lightgbm_tuner.optimize import LightGBMTunerCV\nimport optuna.integration.lightgbm as lgb\nfrom optuna import type_checking\n\nif type_checking.TYPE_CHECKING:\n from typing import Union # NOQA\n\n from optuna.study import Study # NOQA\n\n\[email protected]\ndef turnoff_train(metric: Optional[str] = \"binary_logloss\") -> Generator[None, None, None]:\n\n unexpected_value = 0.5\n dummy_num_iterations = 1234\n\n class DummyBooster(object):\n def __init__(self):\n # type: () -> None\n\n self.best_score = {\n \"valid_0\": {metric: unexpected_value},\n }\n\n def current_iteration(self):\n # type: () -> int\n\n return dummy_num_iterations\n\n dummy_booster = DummyBooster()\n\n with mock.patch(\"lightgbm.train\", return_value=dummy_booster):\n yield\n\n\[email protected]\ndef turnoff_cv(metric: Optional[str] = \"binary_logloss\") -> Generator[None, None, None]:\n\n unexpected_value = 0.5\n dummy_results = {\"{}-mean\".format(metric): [unexpected_value]}\n\n with mock.patch(\"lightgbm.cv\", return_value=dummy_results):\n yield\n\n\nclass TestOptunaObjective(object):\n def test_init_(self):\n # type: () -> None\n\n target_param_names = [\"learning_rate\"] # Invalid parameter name.\n\n with pytest.raises(NotImplementedError) as execinfo:\n _OptunaObjective(target_param_names, {}, None, {}, 0, \"tune_learning_rate\", None)\n\n assert execinfo.type is NotImplementedError\n\n def test_call(self):\n # type: () -> None\n\n target_param_names = [\"lambda_l1\"]\n lgbm_params = {} # type: Dict[str, Any]\n train_set = lgb.Dataset(None)\n val_set = lgb.Dataset(None)\n\n lgbm_kwargs = {\"valid_sets\": val_set}\n best_score = -np.inf\n\n with turnoff_train():\n objective = _OptunaObjective(\n target_param_names,\n lgbm_params,\n train_set,\n lgbm_kwargs,\n best_score,\n \"tune_lambda_l1\",\n None,\n )\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=10)\n\n assert study.best_value == 0.5\n\n\nclass TestOptunaObjectiveCV(object):\n def test_call(self) -> None:\n target_param_names = [\"lambda_l1\"]\n lgbm_params = {} # type: Dict[str, Any]\n train_set = lgb.Dataset(None)\n lgbm_kwargs = {} # type: Dict[str, Any]\n best_score = -np.inf\n\n with turnoff_cv():\n objective = _OptunaObjectiveCV(\n target_param_names,\n lgbm_params,\n train_set,\n lgbm_kwargs,\n best_score,\n \"tune_lambda_l1\",\n None,\n )\n study = optuna.create_study(direction=\"minimize\")\n study.optimize(objective, n_trials=10)\n\n assert study.best_value == 0.5\n\n\nclass TestBaseTuner(object):\n def test_get_booster_best_score(self):\n # type: () -> None\n\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self):\n # type: () -> None\n\n self.best_score = {\"valid_0\": {\"binary_logloss\": expected_value}}\n\n booster = DummyBooster()\n dummy_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(lgbm_kwargs=dict(valid_sets=dummy_dataset))\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_higher_is_better(self):\n # type: () -> None\n\n for metric in [\n \"auc\",\n \"ndcg\",\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n \"map\",\n \"mean_average_precision\",\n ]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert tuner.higher_is_better()\n\n for metric in [\"rmsle\", \"rmse\", \"binary_logloss\"]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert not tuner.higher_is_better()\n\n def test_get_booster_best_score__using_valid_names_as_str(self):\n # type: () -> None\n\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self):\n # type: () -> None\n\n self.best_score = {\"dev\": {\"binary_logloss\": expected_value}}\n\n booster = DummyBooster()\n dummy_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(lgbm_kwargs={\"valid_names\": \"dev\", \"valid_sets\": dummy_dataset,})\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_get_booster_best_score__using_valid_names_as_list(self):\n # type: () -> None\n\n unexpected_value = 0.5\n expected_value = 1.0\n\n class DummyBooster(object):\n def __init__(self):\n # type: () -> None\n\n self.best_score = {\n \"train\": {\"binary_logloss\": unexpected_value},\n \"val\": {\"binary_logloss\": expected_value},\n }\n\n booster = DummyBooster()\n dummy_train_dataset = lgb.Dataset(None)\n dummy_val_dataset = lgb.Dataset(None)\n\n tuner = _BaseTuner(\n lgbm_kwargs={\n \"valid_names\": [\"train\", \"val\"],\n \"valid_sets\": [dummy_train_dataset, dummy_val_dataset],\n }\n )\n val_score = tuner._get_booster_best_score(booster)\n assert val_score == expected_value\n\n def test_compare_validation_metrics(self):\n # type: () -> None\n\n for metric in [\n \"auc\",\n \"ndcg\",\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n \"map\",\n \"mean_average_precision\",\n ]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert tuner.compare_validation_metrics(0.5, 0.1)\n assert not tuner.compare_validation_metrics(0.5, 0.5)\n assert not tuner.compare_validation_metrics(0.1, 0.5)\n\n for metric in [\"rmsle\", \"rmse\", \"binary_logloss\"]:\n tuner = _BaseTuner(lgbm_params={\"metric\": metric})\n assert not tuner.compare_validation_metrics(0.5, 0.1)\n assert not tuner.compare_validation_metrics(0.5, 0.5)\n assert tuner.compare_validation_metrics(0.1, 0.5)\n\n @pytest.mark.parametrize(\n \"metric, eval_at_param, expected\",\n [\n (\"auc\", {\"eval_at\": 5}, \"auc\"),\n (\"accuracy\", {\"eval_at\": 5}, \"accuracy\"),\n (\"rmsle\", {\"eval_at\": 5}, \"rmsle\"),\n (\"rmse\", {\"eval_at\": 5}, \"rmse\"),\n (\"binary_logloss\", {\"eval_at\": 5}, \"binary_logloss\"),\n (\"ndcg\", {\"eval_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"ndcg_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"ndcg_eval_at\": 5}, \"ndcg@5\"),\n (\"ndcg\", {\"eval_at\": [20]}, \"ndcg@20\"),\n (\"ndcg\", {\"eval_at\": [10, 20]}, \"ndcg@10\"),\n (\"ndcg\", {}, \"ndcg@1\"),\n (\"map\", {\"eval_at\": 5}, \"map@5\"),\n (\"map\", {\"eval_at\": [20]}, \"map@20\"),\n (\"map\", {\"eval_at\": [10, 20]}, \"map@10\"),\n (\"map\", {}, \"map@1\"),\n ],\n )\n def test_metric_with_eval_at(self, metric, eval_at_param, expected):\n # type: (str, Dict[str, Union[int, List[int]]], str) -> None\n\n params = {\"metric\": metric} # type: Dict[str, Union[str, int, List[int]]]\n params.update(eval_at_param)\n tuner = _BaseTuner(lgbm_params=params)\n assert tuner._metric_with_eval_at(metric) == expected\n\n def test_metric_with_eval_at_error(self):\n # type: () -> None\n\n tuner = _BaseTuner(lgbm_params={\"metric\": \"ndcg\", \"eval_at\": \"1\"})\n with pytest.raises(ValueError):\n tuner._metric_with_eval_at(\"ndcg\")\n\n\nclass TestLightGBMTuner(object):\n def _get_tuner_object(self, params={}, train_set=None, kwargs_options={}, study=None):\n # type: (Dict[str, Any], lgb.Dataset, Dict[str, Any], Optional[Study]) -> lgb.LightGBMTuner\n\n # Required keyword arguments.\n dummy_dataset = lgb.Dataset(None)\n\n kwargs = dict(\n num_boost_round=5, early_stopping_rounds=2, valid_sets=dummy_dataset, study=study\n )\n kwargs.update(kwargs_options)\n\n runner = lgb.LightGBMTuner(params, train_set, **kwargs)\n return runner\n\n def test_no_eval_set_args(self):\n # type: () -> None\n\n params = {} # type: Dict[str, Any]\n train_set = lgb.Dataset(None)\n with pytest.raises(ValueError) as excinfo:\n lgb.LightGBMTuner(params, train_set, num_boost_round=5, early_stopping_rounds=2)\n\n assert excinfo.type == ValueError\n assert str(excinfo.value) == \"`valid_sets` is required.\"\n\n @pytest.mark.parametrize(\n \"metric, study_direction\",\n [\n (\"auc\", \"minimize\"),\n (\"mse\", \"maximize\"),\n (None, \"maximize\"), # The default metric is binary_logloss.\n ],\n )\n def test_inconsistent_study_direction(self, metric: str, study_direction: str) -> None:\n\n params = {} # type: Dict[str, Any]\n if metric is not None:\n params[\"metric\"] = metric\n train_set = lgb.Dataset(None)\n valid_set = lgb.Dataset(None)\n study = optuna.create_study(direction=study_direction)\n with pytest.raises(ValueError) as excinfo:\n lgb.LightGBMTuner(\n params,\n train_set,\n valid_sets=[train_set, valid_set],\n num_boost_round=5,\n early_stopping_rounds=2,\n study=study,\n )\n\n assert excinfo.type == ValueError\n assert str(excinfo.value).startswith(\"Study direction is inconsistent with the metric\")\n\n def test_with_minimum_required_args(self):\n # type: () -> None\n\n runner = self._get_tuner_object()\n assert \"num_boost_round\" in runner.lgbm_kwargs\n assert \"num_boost_round\" not in runner.auto_options\n assert runner.lgbm_kwargs[\"num_boost_round\"] == 5\n\n def test__parse_args_wrapper_args(self):\n # type: () -> None\n\n params = {} # type: Dict[str, Any]\n train_set = lgb.Dataset(None)\n val_set = lgb.Dataset(None)\n kwargs = dict(\n num_boost_round=12,\n early_stopping_rounds=10,\n valid_sets=val_set,\n time_budget=600,\n sample_size=1000,\n )\n runner = lgb.LightGBMTuner(params, train_set, **kwargs)\n new_args = [\"time_budget\", \"time_budget\", \"sample_size\"]\n for new_arg in new_args:\n assert new_arg not in runner.lgbm_kwargs\n assert new_arg in runner.auto_options\n\n @pytest.mark.parametrize(\n \"metric, study_direction, expected\",\n [(\"auc\", \"maximize\", -np.inf), (\"mse\", \"minimize\", np.inf),],\n )\n def test_best_score(self, metric: str, study_direction: str, expected: float) -> None:\n with turnoff_train(metric=metric):\n study = optuna.create_study(direction=study_direction)\n runner = self._get_tuner_object(\n params=dict(lambda_l1=0.0, metric=metric), kwargs_options={}, study=study,\n )\n assert runner.best_score == expected\n runner.tune_regularization_factors()\n assert runner.best_score == 0.5\n\n def test_best_params(self) -> None:\n unexpected_value = 20 # out of scope.\n\n with turnoff_train():\n study = optuna.create_study()\n runner = self._get_tuner_object(\n params=dict(lambda_l1=unexpected_value,), kwargs_options={}, study=study,\n )\n assert runner.best_params[\"lambda_l1\"] == unexpected_value\n runner.tune_regularization_factors()\n assert runner.best_params[\"lambda_l1\"] != unexpected_value\n\n def test_sample_train_set(self):\n # type: () -> None\n\n sample_size = 3\n\n X_trn = np.random.uniform(10, size=50).reshape((10, 5))\n y_trn = np.random.randint(2, size=10)\n train_dataset = lgb.Dataset(X_trn, label=y_trn)\n runner = self._get_tuner_object(\n train_set=train_dataset, kwargs_options=dict(sample_size=sample_size)\n )\n runner.sample_train_set()\n\n # Workaround for mypy.\n if not type_checking.TYPE_CHECKING:\n runner.train_subset.construct() # Cannot get label before construct `lgb.Dataset`.\n assert runner.train_subset.get_label().shape[0] == sample_size\n\n def test_time_budget(self) -> None:\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n kwargs_options=dict(time_budget=0,),\n )\n assert len(runner.study.trials) == 0\n # No trials run because `time_budget` is set to zero.\n runner.tune_feature_fraction()\n assert runner.lgbm_params[\"feature_fraction\"] == unexpected_value\n assert len(runner.study.trials) == 0\n\n def test_tune_feature_fraction(self):\n # type: () -> None\n\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 7\n\n def test_tune_num_leaves(self):\n # type: () -> None\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(num_leaves=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_num_leaves()\n\n assert runner.lgbm_params[\"num_leaves\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_num_leaves_negative_max_depth(self):\n # type: () -> None\n\n params = {\n \"metric\": \"binary_logloss\",\n \"max_depth\": -1,\n } # type: Dict[str, Any]\n X_trn = np.random.uniform(10, size=(10, 5))\n y_trn = np.random.randint(2, size=10)\n train_dataset = lgb.Dataset(X_trn, label=y_trn)\n valid_dataset = lgb.Dataset(X_trn, label=y_trn)\n\n runner = lgb.LightGBMTuner(\n params,\n train_dataset,\n num_boost_round=3,\n early_stopping_rounds=2,\n valid_sets=valid_dataset,\n )\n runner.tune_num_leaves()\n assert len(runner.study.trials) == 20\n\n def test_tune_bagging(self):\n # type: () -> None\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(bagging_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_bagging()\n\n assert runner.lgbm_params[\"bagging_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 10\n\n def test_tune_feature_fraction_stage2(self):\n # type: () -> None\n\n unexpected_value = 0.5\n\n with turnoff_train():\n runner = self._get_tuner_object(params=dict(feature_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction_stage2()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 6\n\n def test_tune_regularization_factors(self):\n # type: () -> None\n\n unexpected_value = 20 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(lambda_l1=unexpected_value,), # set default as unexpected value.\n )\n assert len(runner.study.trials) == 0\n runner.tune_regularization_factors()\n\n assert runner.lgbm_params[\"lambda_l1\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_min_data_in_leaf(self):\n # type: () -> None\n\n unexpected_value = 1 # out of scope.\n\n with turnoff_train():\n runner = self._get_tuner_object(\n params=dict(\n min_child_samples=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_min_data_in_leaf()\n\n assert runner.lgbm_params[\"min_child_samples\"] != unexpected_value\n assert len(runner.study.trials) == 5\n\n def test_when_a_step_does_not_improve_best_score(self):\n # type: () -> None\n\n params = {} # type: Dict\n valid_data = np.zeros((10, 10))\n valid_sets = lgb.Dataset(valid_data)\n\n tuner = LightGBMTuner(params, None, valid_sets=valid_sets)\n assert not tuner.higher_is_better()\n\n with mock.patch(\"lightgbm.train\"), mock.patch.object(\n _BaseTuner, \"_get_booster_best_score\", return_value=0.9\n ):\n tuner.tune_feature_fraction()\n\n assert \"feature_fraction\" in tuner.best_params\n assert tuner.best_score == 0.9\n\n # Assume that tuning `num_leaves` doesn't improve the `best_score`.\n with mock.patch(\"lightgbm.train\"), mock.patch.object(\n _BaseTuner, \"_get_booster_best_score\", return_value=1.1\n ):\n tuner.tune_num_leaves()\n\n def test_resume_run(self) -> None:\n params = {\"verbose\": -1} # type: Dict\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner.tune_regularization_factors()\n\n n_trials = len(study.trials)\n assert n_trials == len(study.trials)\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner2.tune_regularization_factors()\n assert n_trials == len(study.trials)\n\n def test_get_best_booster(self) -> None:\n unexpected_value = 20 # out of scope.\n\n params = {\"verbose\": -1, \"lambda_l1\": unexpected_value} # type: Dict\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n\n with pytest.raises(ValueError):\n tuner.get_best_booster()\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=0.0):\n tuner.tune_regularization_factors()\n\n best_booster = tuner.get_best_booster()\n assert best_booster.params[\"lambda_l1\"] != unexpected_value\n\n # TODO(toshihikoyanase): Remove this check when LightGBMTuner.best_booster is removed.\n with pytest.warns(DeprecationWarning):\n tuner.best_booster\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n\n # Resumed study does not have the best booster.\n with pytest.raises(ValueError):\n tuner2.get_best_booster()\n\n def test_best_booster_with_model_dir(self) -> None:\n params = {\"verbose\": -1} # type: Dict\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n with TemporaryDirectory() as tmpdir:\n tuner = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, model_dir=tmpdir\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=0.0):\n tuner.tune_regularization_factors()\n\n best_booster = tuner.get_best_booster()\n\n tuner2 = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, model_dir=tmpdir\n )\n best_booster2 = tuner2.get_best_booster()\n\n assert best_booster.params == best_booster2.params\n\n @pytest.mark.parametrize(\"direction, overall_best\", [(\"minimize\", 1), (\"maximize\", 2),])\n def test_create_stepwise_study(self, direction: str, overall_best: int) -> None:\n\n tuner = LightGBMTuner({}, None, valid_sets=lgb.Dataset(np.zeros((10, 10))))\n\n def objective(trial: optuna.trial.Trial, value: float) -> float:\n\n trial.set_system_attr(\n optuna.integration._lightgbm_tuner.optimize._STEP_NAME_KEY,\n \"step{:.0f}\".format(value),\n )\n return trial.suggest_uniform(\"x\", value, value)\n\n study = optuna.create_study(direction=direction)\n study_step1 = tuner._create_stepwise_study(study, \"step1\")\n\n with pytest.raises(ValueError):\n study_step1.best_trial\n\n study_step1.optimize(lambda t: objective(t, 1), n_trials=1)\n\n study_step2 = tuner._create_stepwise_study(study, \"step2\")\n\n # `study` has a trial, but `study_step2` has no trials.\n with pytest.raises(ValueError):\n study_step2.best_trial\n\n study_step2.optimize(lambda t: objective(t, 2), n_trials=2)\n\n assert len(study_step1.trials) == 1\n assert len(study_step2.trials) == 2\n assert len(study.trials) == 3\n\n assert study_step1.best_trial.value == 1\n assert study_step2.best_trial.value == 2\n assert study.best_trial.value == overall_best\n\n def test_optuna_callback(self) -> None:\n params = {\"verbose\": -1} # type: Dict[str, Any]\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n callback_mock = mock.MagicMock()\n\n study = optuna.create_study()\n tuner = LightGBMTuner(\n params, dataset, valid_sets=dataset, study=study, optuna_callbacks=[callback_mock],\n )\n\n with mock.patch.object(_BaseTuner, \"_get_booster_best_score\", return_value=1.0):\n tuner._tune_params([\"num_leaves\"], 10, optuna.samplers.TPESampler(), \"num_leaves\")\n\n assert callback_mock.call_count == 10\n\n\nclass TestLightGBMTunerCV(object):\n def _get_tunercv_object(\n self,\n params: Dict[str, Any] = {},\n train_set: lgb.Dataset = None,\n kwargs_options: Dict[str, Any] = {},\n study: Optional[optuna.study.Study] = None,\n ) -> LightGBMTunerCV:\n\n # Required keyword arguments.\n kwargs = dict(\n num_boost_round=5, early_stopping_rounds=2, study=study\n ) # type: Dict[str, Any]\n kwargs.update(kwargs_options)\n\n runner = LightGBMTunerCV(params, train_set, **kwargs)\n return runner\n\n @pytest.mark.parametrize(\n \"metric, study_direction\",\n [\n (\"auc\", \"minimize\"),\n (\"mse\", \"maximize\"),\n (None, \"maximize\"), # The default metric is binary_logloss.\n ],\n )\n def test_inconsistent_study_direction(self, metric: str, study_direction: str) -> None:\n\n params = {} # type: Dict[str, Any]\n if metric is not None:\n params[\"metric\"] = metric\n train_set = lgb.Dataset(None)\n study = optuna.create_study(direction=study_direction)\n with pytest.raises(ValueError) as excinfo:\n LightGBMTunerCV(\n params, train_set, num_boost_round=5, early_stopping_rounds=2, study=study,\n )\n\n assert excinfo.type == ValueError\n assert str(excinfo.value).startswith(\"Study direction is inconsistent with the metric\")\n\n def test_with_minimum_required_args(self) -> None:\n\n runner = self._get_tunercv_object()\n assert \"num_boost_round\" in runner.lgbm_kwargs\n assert \"num_boost_round\" not in runner.auto_options\n assert runner.lgbm_kwargs[\"num_boost_round\"] == 5\n\n def test_tune_feature_fraction(self) -> None:\n unexpected_value = 1.1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(\n feature_fraction=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 7\n\n def test_tune_num_leaves(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(num_leaves=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_num_leaves()\n\n assert runner.lgbm_params[\"num_leaves\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_bagging(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(bagging_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_bagging()\n\n assert runner.lgbm_params[\"bagging_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 10\n\n def test_tune_feature_fraction_stage2(self) -> None:\n unexpected_value = 0.5\n\n with turnoff_cv():\n runner = self._get_tunercv_object(params=dict(feature_fraction=unexpected_value))\n assert len(runner.study.trials) == 0\n runner.tune_feature_fraction_stage2()\n\n assert runner.lgbm_params[\"feature_fraction\"] != unexpected_value\n assert len(runner.study.trials) == 6\n\n def test_tune_regularization_factors(self) -> None:\n unexpected_value = 20 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(lambda_l1=unexpected_value,), # set default as unexpected value.\n )\n assert len(runner.study.trials) == 0\n runner.tune_regularization_factors()\n\n assert runner.lgbm_params[\"lambda_l1\"] != unexpected_value\n assert len(runner.study.trials) == 20\n\n def test_tune_min_data_in_leaf(self) -> None:\n unexpected_value = 1 # out of scope.\n\n with turnoff_cv():\n runner = self._get_tunercv_object(\n params=dict(\n min_child_samples=unexpected_value, # set default as unexpected value.\n ),\n )\n assert len(runner.study.trials) == 0\n runner.tune_min_data_in_leaf()\n\n assert runner.lgbm_params[\"min_child_samples\"] != unexpected_value\n assert len(runner.study.trials) == 5\n\n def test_resume_run(self) -> None:\n params = {\"verbose\": -1} # type: Dict\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n study = optuna.create_study()\n tuner = LightGBMTunerCV(params, dataset, study=study)\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner.tune_regularization_factors()\n\n n_trials = len(study.trials)\n assert n_trials == len(study.trials)\n\n tuner2 = LightGBMTuner(params, dataset, valid_sets=dataset, study=study)\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner2.tune_regularization_factors()\n assert n_trials == len(study.trials)\n\n def test_optuna_callback(self) -> None:\n params = {\"verbose\": -1} # type: Dict[str, Any]\n dataset = lgb.Dataset(np.zeros((10, 10)))\n\n callback_mock = mock.MagicMock()\n\n study = optuna.create_study()\n tuner = LightGBMTunerCV(params, dataset, study=study, optuna_callbacks=[callback_mock],)\n\n with mock.patch.object(_OptunaObjectiveCV, \"_get_cv_scores\", return_value=[1.0]):\n tuner._tune_params([\"num_leaves\"], 10, optuna.samplers.TPESampler(), \"num_leaves\")\n\n assert callback_mock.call_count == 10\n" ]
[ [ "numpy.random.uniform", "numpy.random.randint", "numpy.zeros" ] ]
CAVED123/mol-cycle-gan
[ "feb8d7504d0078798ee70d6d5cda3f37b4f7a903" ]
[ "decode.py" ]
[ "import argparse\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport tqdm\n\nfrom jtvae import (Vocab,\n JTNNVAE)\n\n\nclass Options:\n def __init__(self,\n jtvae_path=\"./jtvae/\",\n hidden_size=450,\n latent_size=56,\n depth=3,\n jtnn_model_path=\"molvae/MPNVAE-h450-L56-d3-beta0.005/model.iter-4\",\n vocab_path=\"data/zinc/vocab.txt\"):\n self.jtvae_path = jtvae_path\n self.vocab_path = os.path.join(jtvae_path, vocab_path)\n self.hidden_size = hidden_size\n self.latent_size = latent_size\n self.depth = depth\n self.model_path = os.path.join(jtvae_path, jtnn_model_path)\n\n\ndef load_model(opts):\n vocab = [x.strip(\"\\r\\n \") for x in open(opts.vocab_path)]\n vocab = Vocab(vocab)\n\n hidden_size = int(opts.hidden_size)\n latent_size = int(opts.latent_size)\n depth = int(opts.depth)\n\n model = JTNNVAE(vocab, hidden_size, latent_size, depth)\n model.load_state_dict(torch.load(opts.model_path))\n\n return model.cuda()\n\n\ndef decode_from_jtvae(data_path, opts, model):\n smiles_df = pd.read_csv(data_path, index_col=0)\n mols = smiles_df.values\n returned_smiles = []\n\n tree_dims = int(opts.latent_size / 2)\n\n for i in tqdm.tqdm(range(mols.shape[0])):\n tree_vec = np.expand_dims(mols[i, 0:tree_dims], 0)\n mol_vec = np.expand_dims(mols[i, tree_dims:], 0)\n tree_vec = torch.autograd.Variable(torch.from_numpy(tree_vec).cuda().float())\n mol_vec = torch.autograd.Variable(torch.from_numpy(mol_vec).cuda().float())\n smi = model.decode(tree_vec, mol_vec, prob_decode=False)\n returned_smiles.append(smi)\n\n return returned_smiles\n\n\ndef decode(jtvae_path_tuple,\n jtvae_setting_tuple,\n encoding_data_tuple):\n jtvae_path, jtnn_model_path, vocab_path = jtvae_path_tuple\n hidden_size, latent_size, depth = jtvae_setting_tuple\n data_path, file_to_encode, save_name = encoding_data_tuple\n\n path_A_to_B = os.path.join(data_path, file_to_encode + 'A_to_B.csv')\n path_B_to_A = os.path.join(data_path, file_to_encode + 'B_to_A.csv')\n\n save_path_A_to_B = os.path.join(data_path, save_name + 'A_to_B.csv')\n save_path_B_to_A = os.path.join(data_path, save_name + 'B_to_A.csv')\n\n opts = Options(jtvae_path=jtvae_path,\n hidden_size=hidden_size,\n latent_size=latent_size,\n depth=depth,\n jtnn_model_path=jtnn_model_path,\n vocab_path=vocab_path)\n model = load_model(opts)\n\n smiles_A_to_B = decode_from_jtvae(path_A_to_B, opts, model)\n smiles_B_to_A = decode_from_jtvae(path_B_to_A, opts, model)\n\n df_to_save_A_to_B = pd.DataFrame(smiles_A_to_B, columns=['SMILES'])\n df_to_save_B_to_A = pd.DataFrame(smiles_B_to_A, columns=['SMILES'])\n\n df_to_save_A_to_B.to_csv(save_path_A_to_B, index=False)\n df_to_save_B_to_A.to_csv(save_path_B_to_A, index=False)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--jtvae_path\", default=\"./jtvae/\")\n parser.add_argument(\"--jtnn_model_path\", default=\"molvae/MPNVAE-h450-L56-d3-beta0.005/model.iter-4\")\n parser.add_argument(\"--vocab_path\", default=\"data/zinc/vocab.txt\")\n\n parser.add_argument(\"--hidden_size\", default=450, type=int)\n parser.add_argument(\"--latent_size\", default=56, type=int)\n parser.add_argument(\"--depth\", default=3, type=int)\n\n parser.add_argument(\"--data_path\", default=\"./data/results/aromatic_rings/\")\n parser.add_argument(\"--file_to_encode\", default=\"X_cycle_GAN_encoded_\")\n parser.add_argument(\"--save_name\", default=\"smiles_list_\")\n\n args = parser.parse_args()\n\n jtvae_path_tuple = (args.jtvae_path, args.jtnn_model_path, args.vocab_path)\n jtvae_setting_tuple = (args.hidden_size, args.latent_size, args.depth)\n encoding_data_tuple = (args.data_path, args.file_to_encode, args.save_name)\n\n decode(jtvae_path_tuple,\n jtvae_setting_tuple,\n encoding_data_tuple)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "torch.load", "pandas.read_csv", "pandas.DataFrame", "numpy.expand_dims", "torch.from_numpy" ] ]
CoastalHydrodynamicsLab/python-ctd
[ "af67120346eb0d0d506628a6584601cf3daf7f6a" ]
[ "tests/test_plotting.py" ]
[ "from pathlib import Path\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nimport ctd\n\n\nmatplotlib.use(\"Agg\")\n\ndata_path = Path(__file__).parent.joinpath(\"data\")\n\n\ndef _assert_is_valid_plot_return_object(objs):\n if isinstance(objs, np.ndarray):\n for el in objs.flat:\n assert isinstance(el, plt.Axes), (\n \"one of 'objs' is not a \"\n \"matplotlib Axes instance, \"\n \"type encountered {0!r}\"\n \"\".format(el.__class__.__name__)\n )\n else:\n assert isinstance(objs, (plt.Artist, tuple, dict)), (\n \"objs is neither an ndarray of Artist instances nor a \"\n 'single Artist instance, tuple, or dict, \"objs\" is a {0!r} '\n \"\".format(objs.__class__.__name__)\n )\n\n\ndef _check_plot_works(f, *args, **kwargs):\n ax = f(*args, **kwargs)\n\n _assert_is_valid_plot_return_object(ax)\n plt.close()\n\n\n# BasicPlotting.\[email protected]\ndef xbt():\n yield ctd.from_edf(data_path.joinpath(\"XBT.EDF.zip\"))\n plt.close(\"all\")\n\n\[email protected]\ndef fsi():\n yield ctd.from_fsi(data_path.joinpath(\"FSI.txt.gz\"), skiprows=9)\n plt.close(\"all\")\n\n\[email protected]\ndef cnv():\n yield ctd.from_cnv(data_path.joinpath(\"small.cnv.bz2\"))\n plt.close(\"all\")\n\n\ndef test_xbt_plot(xbt):\n _check_plot_works(xbt[\"temperature\"].plot_cast)\n\n\ndef test_cnv_temperature(cnv):\n _check_plot_works(cnv[\"t090C\"].plot_cast)\n" ]
[ [ "matplotlib.use", "matplotlib.pyplot.close" ] ]
Gavin666Github/chineseocr
[ "0cfd0ff28984c01b192646acfc63f8ec26d0752b" ]
[ "detector/detectors.py" ]
[ "#coding:utf-8\nfrom detector.other import normalize\nimport numpy as np\nimport numpy as np\nfrom detector.utils.cython_nms import nms as cython_nms\ntry:\n from detector.utils.gpu_nms import gpu_nms\nexcept:\n gpu_nms =cython_nms\n\ndef nms(dets, thresh):\n if dets.shape[0] == 0:\n return []\n \n try:\n return gpu_nms(dets, thresh, device_id=0)\n except:\n return cython_nms(dets, thresh)\n\n \n\nfrom detector.text_proposal_connector import TextProposalConnector\n\nclass TextDetector:\n \"\"\"\n Detect text from an image\n \"\"\"\n def __init__(self,MAX_HORIZONTAL_GAP=30,MIN_V_OVERLAPS=0.6,MIN_SIZE_SIM=0.6):\n \"\"\"\n pass\n \"\"\"\n self.text_proposal_connector=TextProposalConnector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)\n \n def detect(self, text_proposals,scores,size,\n TEXT_PROPOSALS_MIN_SCORE=0.7,\n TEXT_PROPOSALS_NMS_THRESH=0.3,\n TEXT_LINE_NMS_THRESH = 0.3,\n MIN_RATIO=1.0,\n LINE_MIN_SCORE=0.8,\n TEXT_PROPOSALS_WIDTH=5,\n MIN_NUM_PROPOSALS=1\n ):\n \"\"\"\n Detecting texts from an image\n :return: the bounding boxes of the detected texts\n @@param:TEXT_PROPOSALS_MIN_SCORE:TEXT_PROPOSALS_MIN_SCORE=0.7##过滤字符box阀值\n @@param:TEXT_PROPOSALS_NMS_THRESH:TEXT_PROPOSALS_NMS_THRESH=0.3##nms过滤重复字符box\n @@param:TEXT_LINE_NMS_THRESH:TEXT_LINE_NMS_THRESH=0.3##nms过滤行文本重复过滤阀值\n @@param:MIN_RATIO:MIN_RATIO=1.0#0.01 ##widths/heights宽度与高度比例\n @@param:LINE_MIN_SCORE:##行文本置信度\n @@param:TEXT_PROPOSALS_WIDTH##每个字符的默认最小宽度\n @@param:MIN_NUM_PROPOSALS,MIN_NUM_PROPOSALS=1##最小字符数\n \n \"\"\"\n #text_proposals, scores=self.text_proposal_detector.detect(im, cfg.MEAN)\n keep_inds=np.where(scores>TEXT_PROPOSALS_MIN_SCORE)[0]###\n \n text_proposals, scores=text_proposals[keep_inds], scores[keep_inds]\n\n sorted_indices=np.argsort(scores.ravel())[::-1]\n text_proposals, scores=text_proposals[sorted_indices], scores[sorted_indices]\n\n # nms for text proposals\n if len(text_proposals)>0:\n keep_inds=nms(np.hstack((text_proposals, scores)), TEXT_PROPOSALS_NMS_THRESH)##nms 过滤重复的box \n text_proposals, scores=text_proposals[keep_inds], scores[keep_inds]\n\n scores=normalize(scores)\n\n text_lines=self.text_proposal_connector.get_text_lines(text_proposals, scores, size)##合并文本行\n return text_lines\n else:\n return []\n\n" ]
[ [ "numpy.hstack", "numpy.where" ] ]
Chris19920210/Swin-Transformer-TF
[ "ad9a35fd8c6303fbfa988c7a96845f96560c1184" ]
[ "swintransformer/data_processing/train_validation_split.py" ]
[ "import tensorflow as tf\nimport pathlib\nimport os\nimport numpy as np\nimport shutil\n\nflags = tf.compat.v1.flags\nflags.DEFINE_string('input', './input', 'Directory to input.')\nflags.DEFINE_string('output', './output', 'Directory to output. ')\nflags.DEFINE_float('ratio', 0.2, 'ratio')\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n data_root = pathlib.Path(FLAGS.input)\n os.mkdir(os.path.join(FLAGS.output, \"train\"))\n os.mkdir(os.path.join(FLAGS.output, \"test\"))\n\n all_image_paths = list(data_root.glob('*/*'))\n\n for item in data_root.glob('*/'):\n if item.is_dir():\n test_dir = os.path.join(FLAGS.output, \"test\", item.name)\n train_dir = os.path.join(FLAGS.output, \"train\", item.name)\n\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n\n for path in all_image_paths:\n input_path = path.absolute()\n parent_name = path.parent.name\n file_name = path.name\n if np.random.uniform() < FLAGS.ratio:\n shutil.copy(input_path, os.path.join(FLAGS.output, \"test\", parent_name, file_name))\n else:\n shutil.copy(input_path, os.path.join(FLAGS.output, \"train\", parent_name, file_name))\n\n print(\"Split done!\")\n\n\nif __name__ == \"__main__\":\n main(0)\n\n\n\n\n\n\n\n\n\n" ]
[ [ "numpy.random.uniform" ] ]
CianciarusoCataldo/nn-object-detector
[ "a1a51f3e8ff295851759664c9155f8ceecb33256" ]
[ "detector/object_detection/keras_retinanet/initializers.py" ]
[ "\nimport keras\n\nimport numpy as np\nimport math\n\n\nclass PriorProbability(keras.initializers.Initializer):\n \"\"\"\n Initializer applies a prior probability.\n \"\"\"\n\n def __init__(self, probability=0.01):\n self.probability = probability\n\n def get_config(self):\n return {\n 'probability': self.probability\n }\n\n def __call__(self, shape, dtype=None):\n # set bias to -log((1 - p)/p) for foregound\n result = np.ones(shape, dtype=dtype) * -math.log((1 - self.probability) / self.probability)\n\n return result\n" ]
[ [ "numpy.ones" ] ]
MaxU11/playground
[ "240182f88836e860fc144a82e98c0f4028294334" ]
[ "pommerman/cli/Tournament.py" ]
[ "import csv\nimport os\nimport random\nimport time\nimport atexit\nimport numpy as np\nfrom datetime import datetime\n\nfrom pommerman.agents.abstract_mcts_skeleton import AbstractMCTSSkeleton\nfrom pommerman import utility\nfrom pommerman import constants\nfrom pommerman import make\n\n\ndef run(env, agent_names, config, render, do_sleep, record_pngs_dir=None, record_json_dir=None):\n '''Runs a game'''\n if record_pngs_dir and not os.path.isdir(record_pngs_dir):\n os.makedirs(record_pngs_dir)\n if record_json_dir and not os.path.isdir(record_json_dir):\n os.makedirs(record_json_dir)\n\n obs = env.reset()\n done = False\n\n observations = []\n\n steps = 0\n while not done:\n if render:\n env.render(\n record_pngs_dir=record_pngs_dir,\n record_json_dir=record_json_dir,\n do_sleep=do_sleep)\n if render is False and record_json_dir:\n env.save_json(record_json_dir)\n time.sleep(1.0 / env._render_fps)\n actions = env.act(obs)\n steps += 1\n obs, reward, done, info = env.step(actions)\n\n if max(reward) > 0 and done is False:\n raise ValueError('Why?????????????????????')\n observations.append(obs)\n\n if render:\n env.render(\n record_pngs_dir=record_pngs_dir,\n record_json_dir=record_json_dir,\n do_sleep=do_sleep)\n print(f'game end: reward={reward}')\n if do_sleep:\n time.sleep(5)\n else:\n time.sleep(5)\n env.render(close=True)\n\n if render is False and record_json_dir:\n env.save_json(record_json_dir)\n time.sleep(1.0 / env._render_fps)\n\n if record_json_dir:\n finished_at = datetime.now().isoformat()\n utility.join_json_state(record_json_dir, agent_names, finished_at,\n config, info)\n\n return info, steps, observations, reward\n\n\ndef run_tournament(tournament_name, agent_pool1, agent_pool2, match_count, AllVsAll=True, create_csv=True, get_observations=False, only_one_side=False, last_num=0, seed=None, csv_dir=None):\n '''Wrapper to help start the game'''\n config = 'OneVsOne-v0'\n record_pngs_dir = None #f'C:/tmp/Results/PNGS'\n record_json_dir = None #f'C:/tmp/Results/JSON'\n if csv_dir == None:\n csv_dir = f'C:/tmp/Results/CSV'\n game_state_file = None\n render_mode = 'human'\n do_sleep = False\n render = False\n\n match_observations = None\n if get_observations:\n match_observations = []\n\n if create_csv and not os.path.isdir(csv_dir):\n os.makedirs(csv_dir)\n game_details = [['p1','p2','result','winner','time','steps', 'add_info_p1', 'add_info_p2']]\n write_csv_pos = 0\n\n duel_num = 0\n if AllVsAll:\n total_duels = len(agent_pool1) * len(agent_pool2)\n else:\n total_duels = len(agent_pool1)\n game_num = 0\n if only_one_side:\n total_games = total_duels * match_count\n else:\n total_games = total_duels * match_count * 2\n tot_wins = tot_tie = tot_loss = 0\n p1_num = 0\n for p1_a in agent_pool1:\n if AllVsAll:\n tmp_agent_pool2 = agent_pool2\n else:\n tmp_agent_pool2 = [agent_pool2[p1_num]]\n p1_num += 1\n for p2_a in tmp_agent_pool2:\n duel_num += 1\n print(f'Duel {duel_num}/{total_duels}: {p1_a[0]} vs {p2_a[0]}')\n\n wins = ties = loss = 0\n side = 2\n if only_one_side:\n side = 1\n for d in range(side):\n if d == 0:\n agents = [p1_a[1](**p1_a[2]), p2_a[1](**p2_a[2])]\n agent_names = [p1_a[0], p2_a[0]]\n else:\n agents = [p2_a[1](**p2_a[2]), p1_a[1](**p1_a[2])]\n agent_names = [p2_a[0], p1_a[0]]\n\n env = make(config, agents, game_state_file, render_mode=render_mode)\n if seed is None:\n # Pick a random seed between 0 and 2^31 - 1\n seed = random.randint(0, np.iinfo(np.int32).max)\n np.random.seed(seed)\n random.seed(seed)\n env.seed(seed)\n\n m_wins = m_ties = m_loss = 0\n for i in range(match_count):\n game_num += 1\n\n if game_num > last_num:\n record_pngs_dir_ = None\n record_json_dir_ = None\n if record_pngs_dir:\n record_pngs_dir_ = f'{record_pngs_dir}/{tournament_name}/{agent_names[0]}_vs_{agent_names[1]}_{i+1}'\n if record_json_dir:\n record_json_dir_ = f'{record_json_dir}/{tournament_name}/{agent_names[0]}_vs_{agent_names[1]}_{i+1}'\n\n while True:\n try:\n start = time.time()\n info, steps, observations, reward = run(env, agent_names, config, render, do_sleep, record_pngs_dir_, record_json_dir_)\n break\n except Exception as e:\n print(f'\\n\\skipped game due to error:\\n{e}\\n')\n\n if match_observations:\n match_observations.append((observations, reward))\n\n total_time = time.time() - start\n winner = -1\n if info['result'] == constants.Result.Win:\n winner = int(info['winners'][0])\n if winner == 0: m_wins += 1\n else: m_loss += 1\n else:\n m_ties += 1\n\n agent_info_1 = {}\n agent_info_2 = {}\n if isinstance(agents[0], AbstractMCTSSkeleton):\n agents[0].get_agent_info(agent_info_1)\n if isinstance(agents[1], AbstractMCTSSkeleton):\n agents[1].get_agent_info(agent_info_2)\n\n game_details.append([agent_names[0], agent_names[1], info['result'], winner, total_time, steps, agent_info_1, agent_info_2])\n\n print(f\"-- {game_num} / {total_games} Result: \", game_details[-1])\n\n ties += m_ties\n if d == 0:\n wins += m_wins\n loss += m_loss\n else:\n wins += m_loss\n loss += m_wins\n\n env.close\n del env\n\n print(f'Result from {p1_a[0]} vs {p2_a[0]}: {wins} p1, {ties} ties, {loss} p2')\n tot_wins += wins\n tot_tie += ties\n tot_loss += loss\n\n if create_csv:\n f = open(f'{csv_dir}/{tournament_name}.csv', 'a')\n with f:\n writer = csv.writer(f, delimiter=';')\n while write_csv_pos < len(game_details):\n writer.writerow(game_details[write_csv_pos])\n write_csv_pos += 1\n\n if get_observations:\n return match_observations\n else:\n return tot_wins, tot_tie, tot_loss\n\n\ndef run_single_match(agent1, agent2, render=False, seed=None):\n '''Wrapper to help start the game'''\n config = 'OneVsOne-v0'\n record_pngs_dir = None #f'C:/tmp/Results/PNGS'\n record_json_dir = None #f'C:/tmp/Results/JSON'\n game_state_file = None\n render_mode = 'human'\n do_sleep = False\n\n agents = [agent1, agent2]\n\n env = make(config, agents, game_state_file, render_mode=render_mode)\n if seed is None:\n # Pick a random seed between 0 and 2^31 - 1\n seed = random.randint(0, np.iinfo(np.int32).max)\n np.random.seed(seed)\n random.seed(seed)\n env.seed(seed)\n\n record_pngs_dir_ = None\n record_json_dir_ = None\n\n info, steps, observations, reward = run(env, None, config, render, do_sleep, record_pngs_dir_, record_json_dir_)\n\n env.close\n del env\n\n return reward" ]
[ [ "numpy.iinfo", "numpy.random.seed" ] ]
JiwonCocoder/da-faster-rcnn-PyTorch-master
[ "9ba9ffac51f1926bec4950b5f8cf3556250ddb59" ]
[ "batch_demo.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport _init_paths\nimport os\nimport sys\nimport numpy as np\nimport argparse\nimport pprint\nimport pdb\nimport time\nimport cv2\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dset\n# from scipy.misc import imread\nfrom imageio import imread\nfrom roi_data_layer.roidb import combined_roidb\nfrom roi_data_layer.roibatchLoader import roibatchLoader\nfrom model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\nfrom model.rpn.bbox_transform import clip_boxes\nfrom model.nms.nms_wrapper import nms\nfrom model.rpn.bbox_transform import bbox_transform_inv\nfrom model.utils.net_utils import save_net, load_net, vis_detections\nfrom model.utils.blob import im_list_to_blob\nfrom model.faster_rcnn.vgg16 import vgg16\nfrom model.faster_rcnn.resnet import resnet\nimport pdb\n\ntry:\n xrange # Python 2\nexcept NameError:\n xrange = range # Python 3\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')\n parser.add_argument('--dataset', dest='dataset',\n help='training dataset',\n default='pascal_voc', type=str)\n parser.add_argument('--cfg', dest='cfg_file',\n help='optional config file',\n default='cfgs/vgg16.yml', type=str)\n parser.add_argument('--net', dest='net',\n help='vgg16, res50, res101, res152',\n default='res101', type=str)\n parser.add_argument('--set', dest='set_cfgs',\n help='set config keys', default=None,\n nargs=argparse.REMAINDER)\n parser.add_argument('--load_dir', dest='load_dir',\n help='directory to load models',\n default=\"/srv/share/jyang375/models\")\n parser.add_argument('--image_dir', dest='image_dir',\n help='directory to load images for demo',\n default=\"images\")\n parser.add_argument('--cuda', dest='cuda',\n help='whether use CUDA',\n action='store_true')\n parser.add_argument('--mGPUs', dest='mGPUs',\n help='whether use multiple GPUs',\n action='store_true')\n parser.add_argument('--cag', dest='class_agnostic',\n help='whether perform class_agnostic bbox regression',\n action='store_true')\n parser.add_argument('--parallel_type', dest='parallel_type',\n help='which part of model to parallel, 0: all, 1: model before roi pooling',\n default=0, type=int)\n parser.add_argument('--checksession', dest='checksession',\n help='checksession to load model',\n default=1, type=int)\n parser.add_argument('--checkepoch', dest='checkepoch',\n help='checkepoch to load network',\n default=1, type=int)\n parser.add_argument('--checkpoint', dest='checkpoint',\n help='checkpoint to load network',\n default=10021, type=int)\n parser.add_argument('--bs', dest='batch_size',\n help='batch_size',\n default=1, type=int)\n parser.add_argument('--vis', dest='vis',\n help='visualization mode',\n action='store_true')\n parser.add_argument('--webcam_num', dest='webcam_num',\n help='webcam ID number',\n default=-1, type=int)\n\n args = parser.parse_args()\n return args\n\n\nlr = cfg.TRAIN.LEARNING_RATE\nmomentum = cfg.TRAIN.MOMENTUM\nweight_decay = cfg.TRAIN.WEIGHT_DECAY\n\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n\n print('Called with args:')\n print(args)\n\n if args.cfg_file is not None:\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n cfg.USE_GPU_NMS = args.cuda\n\n print('Using config:')\n pprint.pprint(cfg)\n np.random.seed(cfg.RNG_SEED)\n\n # train set\n # -- Note: Use validation set and disable the flipped to enable faster loading.\n\n # input_dir = args.load_dir + \"/\" + args.net + \"/\" + args.dataset\n # if not os.path.exists(input_dir):\n # raise Exception('There is no input directory for loading network from ' + input_dir)\n # load_name = os.path.join(input_dir,\n # 'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))\n load_name = args.load_dir\n pascal_classes = np.asarray(['__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'])\n\n # initilize the network here.\n if args.net == 'vgg16':\n fasterRCNN = vgg16(pascal_classes, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res101':\n fasterRCNN = resnet(pascal_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res50':\n fasterRCNN = resnet(pascal_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)\n elif args.net == 'res152':\n fasterRCNN = resnet(pascal_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)\n else:\n print(\"network is not defined\")\n pdb.set_trace()\n\n fasterRCNN.create_architecture()\n\n print(\"load checkpoint %s\" % (load_name))\n if args.cuda > 0:\n checkpoint = torch.load(load_name)\n else:\n checkpoint = torch.load(load_name, map_location=(lambda storage, loc: storage))\n fasterRCNN.load_state_dict(checkpoint['model'])\n if 'pooling_mode' in checkpoint.keys():\n cfg.POOLING_MODE = checkpoint['pooling_mode']\n\n print('load model successfully!')\n\n # pdb.set_trace()\n\n print(\"load checkpoint %s\" % (load_name))\n\n # initilize the tensor holder here.\n im_data = torch.FloatTensor(1)\n im_info = torch.FloatTensor(1)\n num_boxes = torch.LongTensor(1)\n gt_boxes = torch.FloatTensor(1)\n\n # ship to cuda\n if args.cuda > 0:\n im_data = im_data.cuda()\n im_info = im_info.cuda()\n num_boxes = num_boxes.cuda()\n gt_boxes = gt_boxes.cuda()\n\n # make variable\n im_data = Variable(im_data, volatile=True)\n im_info = Variable(im_info, volatile=True)\n num_boxes = Variable(num_boxes, volatile=True)\n gt_boxes = Variable(gt_boxes, volatile=True)\n\n if args.cuda > 0:\n cfg.CUDA = True\n\n if args.cuda > 0:\n fasterRCNN.cuda()\n\n fasterRCNN.eval()\n\n start = time.time()\n max_per_image = 100\n thresh = 0.05\n vis = True\n\n webcam_num = args.webcam_num\n # Set up webcam or get image directories\n if webcam_num >= 0:\n cap = cv2.VideoCapture(webcam_num)\n num_images = 0\n else:\n imglist = os.listdir(args.image_dir)\n num_images = len(imglist)\n\n s = time.time()\n print('Loaded Photo: {} images.'.format(num_images))\n\n while (num_images >= 0):\n total_tic = time.time()\n if webcam_num == -1:\n num_images -= 1\n\n\n im_file = os.path.join(args.image_dir, imglist[num_images])\n # im = cv2.imread(im_file)\n im_in = np.array(imread(im_file))\n if len(im_in.shape) == 2:\n im_in = im_in[:, :, np.newaxis]\n im_in = np.concatenate((im_in, im_in, im_in), axis=2) # one channel to three same channel\n # rgb -> bgr\n im = im_in[:, :, ::-1]\n\n blobs, im_scales = _get_image_blob(im)\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n im_blob = blobs\n im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)\n\n im_data_pt = torch.from_numpy(im_blob)\n im_data_pt = im_data_pt.permute(0, 3, 1, 2)\n im_info_pt = torch.from_numpy(im_info_np)\n\n im_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)\n im_info.data.resize_(im_info_pt.size()).copy_(im_info_pt)\n gt_boxes.data.resize_(1, 1, 5).zero_()\n num_boxes.data.resize_(1).zero_()\n\n # pdb.set_trace()\n det_tic = time.time()\n\n rois, cls_prob, bbox_pred, \\\n rpn_loss_cls, rpn_loss_box, \\\n RCNN_loss_cls, RCNN_loss_bbox, \\\n rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)\n\n scores = cls_prob.data\n boxes = rois.data[:, :, 1:5]\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred.data\n if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:\n # Optionally normalize targets by a precomputed mean and stdev\n if args.class_agnostic:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n\n box_deltas = box_deltas.view(1, -1, 4)\n else:\n if args.cuda > 0:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()\n else:\n box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS) \\\n + torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS)\n box_deltas = box_deltas.view(1, -1, 4 * len(pascal_classes))\n\n pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)\n pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n pred_boxes /= im_scales[0]\n\n scores = scores.squeeze()\n pred_boxes = pred_boxes.squeeze()\n det_toc = time.time()\n detect_time = det_toc - det_tic\n misc_tic = time.time()\n if vis:\n im2show = np.copy(im)\n for j in xrange(1, len(pascal_classes)):\n inds = torch.nonzero(scores[:, j] > thresh).view(-1)\n # if there is det\n if inds.numel() > 0:\n cls_scores = scores[:, j][inds]\n _, order = torch.sort(cls_scores, 0, True)\n if args.class_agnostic:\n cls_boxes = pred_boxes[inds, :]\n else:\n cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]\n\n cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)\n # cls_dets = torch.cat((cls_boxes, cls_scores), 1)\n cls_dets = cls_dets[order]\n keep = nms(cls_dets, cfg.TEST.NMS, force_cpu=not cfg.USE_GPU_NMS)\n cls_dets = cls_dets[keep.view(-1).long()]\n if vis:\n im2show = vis_detections(im2show, pascal_classes[j], cls_dets.cpu().numpy(), 0.5)\n\n misc_toc = time.time()\n nms_time = misc_toc - misc_tic\n\n if webcam_num == -1:\n sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \\r' \\\n .format(num_images + 1, len(imglist), detect_time, nms_time))\n sys.stdout.flush()\n\n if vis and webcam_num == -1:\n # cv2.imshow('test', im2show)\n # cv2.waitKey(0)\n result_path = os.path.join(args.image_dir, imglist[num_images][:-4] + \"_det.jpg\")\n cv2.imwrite(result_path, im2show)\n else:\n im2showRGB = cv2.cvtColor(im2show, cv2.COLOR_BGR2RGB)\n cv2.imshow(\"frame\", im2showRGB)\n total_toc = time.time()\n total_time = total_toc - total_tic\n frame_rate = 1 / total_time\n print('Frame rate:', frame_rate)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n e = time.time()\n print(e - s, 's')\n\n if webcam_num >= 0:\n cap.release()\n cv2.destroyAllWindows()\n" ]
[ [ "numpy.tile", "torch.FloatTensor", "torch.nonzero", "torch.load", "torch.autograd.Variable", "numpy.random.seed", "numpy.asarray", "numpy.copy", "numpy.max", "torch.from_numpy", "numpy.min", "numpy.array", "numpy.concatenate", "torch.LongTensor", "numpy.round", "torch.sort" ] ]
stupiding/insightface
[ "85a3b65c07b39e7ad02aabddd6cb6529baf4e605" ]
[ "recognition/eval/verification.py" ]
[ "\"\"\"Helper for evaluation on the Labeled Faces in the Wild dataset \n\"\"\"\n\n# MIT License\n# \n# Copyright (c) 2016 David Sandberg\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\nimport sys\nimport numpy as np\nfrom scipy import misc\nfrom sklearn.model_selection import KFold\nfrom scipy import interpolate\nimport sklearn\nimport cv2\nimport math\nimport datetime\nimport pickle\nfrom sklearn.decomposition import PCA\nimport mxnet as mx\nfrom mxnet import ndarray as nd\n\n\nclass LFold:\n def __init__(self, n_splits = 2, shuffle = False):\n self.n_splits = n_splits\n if self.n_splits>1:\n self.k_fold = KFold(n_splits = n_splits, shuffle = shuffle)\n\n def split(self, indices):\n if self.n_splits>1:\n return self.k_fold.split(indices)\n else:\n return [(indices, indices)]\n\n\ndef calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10, pca = 0):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n tprs = np.zeros((nrof_folds,nrof_thresholds))\n fprs = np.zeros((nrof_folds,nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n indices = np.arange(nrof_pairs)\n #print('pca', pca)\n \n if pca==0:\n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n #print('train_set', train_set)\n #print('test_set', test_set)\n if pca>0:\n print('doing pca on', fold_idx)\n embed1_train = embeddings1[train_set]\n embed2_train = embeddings2[train_set]\n _embed_train = np.concatenate( (embed1_train, embed2_train), axis=0 )\n #print(_embed_train.shape)\n pca_model = PCA(n_components=pca)\n pca_model.fit(_embed_train)\n embed1 = pca_model.transform(embeddings1)\n embed2 = pca_model.transform(embeddings2)\n embed1 = sklearn.preprocessing.normalize(embed1)\n embed2 = sklearn.preprocessing.normalize(embed2)\n #print(embed1.shape, embed2.shape)\n diff = np.subtract(embed1, embed2)\n dist = np.sum(np.square(diff),1)\n \n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n for threshold_idx, threshold in enumerate(thresholds):\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])\n best_threshold_index = np.argmax(acc_train)\n #print('threshold', thresholds[best_threshold_index])\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])\n \n tpr = np.mean(tprs,0)\n fpr = np.mean(fprs,0)\n return tpr, fpr, accuracy\n\ndef calculate_accuracy(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n tp = np.sum(np.logical_and(predict_issame, actual_issame))\n fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))\n fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))\n \n tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)\n fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)\n acc = float(tp+tn)/dist.size\n return tpr, fpr, acc\n\n\n \ndef calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n val = np.zeros(nrof_folds)\n far = np.zeros(nrof_folds)\n \n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n indices = np.arange(nrof_pairs)\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n \n # Find the threshold that gives FAR = far_target\n far_train = np.zeros(nrof_thresholds)\n for threshold_idx, threshold in enumerate(thresholds):\n _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])\n if np.max(far_train)>=far_target:\n f = interpolate.interp1d(far_train, thresholds, kind='slinear')\n threshold = f(far_target)\n else:\n threshold = 0.0\n \n val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])\n \n val_mean = np.mean(val)\n far_mean = np.mean(far)\n val_std = np.std(val)\n return val_mean, val_std, far_mean\n\n\ndef calculate_val_far(threshold, dist, actual_issame):\n predict_issame = np.less(dist, threshold)\n true_accept = np.sum(np.logical_and(predict_issame, actual_issame))\n false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))\n n_same = np.sum(actual_issame)\n n_diff = np.sum(np.logical_not(actual_issame))\n #print(true_accept, false_accept)\n #print(n_same, n_diff)\n val = float(true_accept) / float(n_same)\n far = float(false_accept) / float(n_diff)\n return val, far\n\ndef evaluate(embeddings, actual_issame, nrof_folds=10, pca = 0):\n # Calculate evaluation metrics\n thresholds = np.arange(0, 4, 0.01)\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n tpr, fpr, accuracy = calculate_roc(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), nrof_folds=nrof_folds, pca = pca)\n thresholds = np.arange(0, 4, 0.001)\n val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,\n np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)\n return tpr, fpr, accuracy, val, val_std, far\n\ndef load_bin(path, image_size):\n bins, issame_list = pickle.load(open(path, 'rb'))\n data_list = []\n for flip in [0,1]:\n data = nd.empty((len(issame_list)*2, 3, image_size[0], image_size[1]))\n data_list.append(data)\n for i in xrange(len(issame_list)*2):\n _bin = bins[i]\n img = mx.image.imdecode(_bin)\n if img.shape[1]!=image_size[0]:\n img = mx.image.resize_short(img, image_size[0])\n img = nd.transpose(img, axes=(2, 0, 1))\n for flip in [0,1]:\n if flip==1:\n img = mx.ndarray.flip(data=img, axis=2)\n data_list[flip][i][:] = img\n if i%1000==0:\n print('loading bin', i)\n print(data_list[0].shape)\n return (data_list, issame_list)\n\ndef test(data_set, mx_model, batch_size, nfolds=10, data_extra = None, label_shape = None):\n print('testing verification..')\n data_list = data_set[0]\n issame_list = data_set[1]\n model = mx_model\n embeddings_list = []\n if data_extra is not None:\n _data_extra = nd.array(data_extra)\n time_consumed = 0.0\n if label_shape is None:\n _label = nd.ones( (batch_size,) )\n else:\n _label = nd.ones( label_shape )\n for i in xrange( len(data_list) ):\n data = data_list[i]\n embeddings = None\n ba = 0\n while ba<data.shape[0]:\n bb = min(ba+batch_size, data.shape[0])\n count = bb-ba\n _data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)\n #print(_data.shape, _label.shape)\n time0 = datetime.datetime.now()\n if data_extra is None:\n db = mx.io.DataBatch(data=(_data,), label=(_label,))\n else:\n db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))\n model.forward(db, is_train=False)\n net_out = model.get_outputs()\n #_arg, _aux = model.get_params()\n #__arg = {}\n #for k,v in _arg.iteritems():\n # __arg[k] = v.as_in_context(_ctx)\n #_arg = __arg\n #_arg[\"data\"] = _data.as_in_context(_ctx)\n #_arg[\"softmax_label\"] = _label.as_in_context(_ctx)\n #for k,v in _arg.iteritems():\n # print(k,v.context)\n #exe = sym.bind(_ctx, _arg ,args_grad=None, grad_req=\"null\", aux_states=_aux)\n #exe.forward(is_train=False)\n #net_out = exe.outputs\n _embeddings = net_out[0].asnumpy()\n time_now = datetime.datetime.now()\n diff = time_now - time0\n time_consumed+=diff.total_seconds()\n #print(_embeddings.shape)\n if embeddings is None:\n embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )\n embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]\n ba = bb\n embeddings_list.append(embeddings)\n\n _xnorm = 0.0\n _xnorm_cnt = 0\n for embed in embeddings_list:\n for i in xrange(embed.shape[0]):\n _em = embed[i]\n _norm=np.linalg.norm(_em)\n #print(_em.shape, _norm)\n _xnorm+=_norm\n _xnorm_cnt+=1\n _xnorm /= _xnorm_cnt\n\n embeddings = embeddings_list[0].copy()\n embeddings = sklearn.preprocessing.normalize(embeddings)\n acc1 = 0.0\n std1 = 0.0\n #_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=10)\n #acc1, std1 = np.mean(accuracy), np.std(accuracy)\n\n #print('Validation rate: %2.5f+-%2.5f @ FAR=%2.5f' % (val, val_std, far))\n #embeddings = np.concatenate(embeddings_list, axis=1)\n embeddings = embeddings_list[0] + embeddings_list[1]\n embeddings = sklearn.preprocessing.normalize(embeddings)\n print(embeddings.shape)\n print('infer time', time_consumed)\n _, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)\n acc2, std2 = np.mean(accuracy), np.std(accuracy)\n return acc1, std1, acc2, std2, _xnorm, embeddings_list\n\ndef test_badcase(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):\n print('testing verification badcase..')\n data_list = data_set[0]\n issame_list = data_set[1]\n model = mx_model\n embeddings_list = []\n if data_extra is not None:\n _data_extra = nd.array(data_extra)\n time_consumed = 0.0\n if label_shape is None:\n _label = nd.ones( (batch_size,) )\n else:\n _label = nd.ones( label_shape )\n for i in xrange( len(data_list) ):\n data = data_list[i]\n embeddings = None\n ba = 0\n while ba<data.shape[0]:\n bb = min(ba+batch_size, data.shape[0])\n count = bb-ba\n _data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)\n #print(_data.shape, _label.shape)\n time0 = datetime.datetime.now()\n if data_extra is None:\n db = mx.io.DataBatch(data=(_data,), label=(_label,))\n else:\n db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))\n model.forward(db, is_train=False)\n net_out = model.get_outputs()\n _embeddings = net_out[0].asnumpy()\n time_now = datetime.datetime.now()\n diff = time_now - time0\n time_consumed+=diff.total_seconds()\n if embeddings is None:\n embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )\n embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]\n ba = bb\n embeddings_list.append(embeddings)\n embeddings = embeddings_list[0] + embeddings_list[1]\n embeddings = sklearn.preprocessing.normalize(embeddings)\n thresholds = np.arange(0, 4, 0.01)\n actual_issame = np.asarray(issame_list)\n nrof_folds = 10\n embeddings1 = embeddings[0::2]\n embeddings2 = embeddings[1::2]\n assert(embeddings1.shape[0] == embeddings2.shape[0])\n assert(embeddings1.shape[1] == embeddings2.shape[1])\n nrof_pairs = min(len(actual_issame), embeddings1.shape[0])\n nrof_thresholds = len(thresholds)\n k_fold = LFold(n_splits=nrof_folds, shuffle=False)\n \n tprs = np.zeros((nrof_folds,nrof_thresholds))\n fprs = np.zeros((nrof_folds,nrof_thresholds))\n accuracy = np.zeros((nrof_folds))\n indices = np.arange(nrof_pairs)\n \n diff = np.subtract(embeddings1, embeddings2)\n dist = np.sum(np.square(diff),1)\n data = data_list[0]\n\n pouts = []\n nouts = []\n \n for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):\n \n # Find the best threshold for the fold\n acc_train = np.zeros((nrof_thresholds))\n #print(train_set)\n #print(train_set.__class__)\n for threshold_idx, threshold in enumerate(thresholds):\n p2 = dist[train_set]\n p3 = actual_issame[train_set]\n _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, p2, p3)\n best_threshold_index = np.argmax(acc_train)\n for threshold_idx, threshold in enumerate(thresholds):\n tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])\n _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])\n best_threshold = thresholds[best_threshold_index]\n for iid in test_set:\n ida = iid*2\n idb = ida+1\n asame = actual_issame[iid]\n _dist = dist[iid]\n violate = _dist - best_threshold\n if not asame:\n violate *= -1.0\n if violate>0.0:\n imga = data[ida].asnumpy().transpose( (1,2,0) )[...,::-1] #to bgr\n imgb = data[idb].asnumpy().transpose( (1,2,0) )[...,::-1]\n #print(imga.shape, imgb.shape, violate, asame, _dist)\n if asame:\n pouts.append( (imga, imgb, _dist, best_threshold, ida) )\n else:\n nouts.append( (imga, imgb, _dist, best_threshold, ida) )\n\n \n tpr = np.mean(tprs,0)\n fpr = np.mean(fprs,0)\n acc = np.mean(accuracy)\n pouts = sorted(pouts, key = lambda x: x[2], reverse=True)\n nouts = sorted(nouts, key = lambda x: x[2], reverse=False)\n print(len(pouts), len(nouts))\n print('acc', acc)\n gap = 10\n image_shape = (112,224,3)\n out_dir = \"./badcases\"\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n if len(nouts)>0:\n threshold = nouts[0][3]\n else:\n threshold = pouts[-1][3]\n \n for item in [(pouts, 'positive(false_negative).png'), (nouts, 'negative(false_positive).png')]:\n cols = 4\n rows = 8000\n outs = item[0]\n if len(outs)==0:\n continue\n #if len(outs)==9:\n # cols = 3\n # rows = 3\n\n _rows = int(math.ceil(len(outs)/cols))\n rows = min(rows, _rows)\n hack = {}\n\n if name.startswith('cfp') and item[1].startswith('pos'):\n hack = {0:'manual/238_13.jpg.jpg', 6:'manual/088_14.jpg.jpg', 10:'manual/470_14.jpg.jpg', 25:'manual/238_13.jpg.jpg', 28:'manual/143_11.jpg.jpg'}\n\n filename = item[1]\n if len(name)>0:\n filename = name+\"_\"+filename\n filename = os.path.join(out_dir, filename)\n img = np.zeros( (image_shape[0]*rows+20, image_shape[1]*cols+(cols-1)*gap, 3), dtype=np.uint8 )\n img[:,:,:] = 255\n text_color = (0,0,153)\n text_color = (255,178,102)\n text_color = (153,255,51)\n for outi, out in enumerate(outs):\n row = outi//cols\n col = outi%cols\n if row==rows:\n break\n imga = out[0].copy()\n imgb = out[1].copy()\n if outi in hack:\n idx = out[4]\n print('noise idx',idx)\n aa = hack[outi]\n imgb = cv2.imread(aa)\n #if aa==1:\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 1)\n #elif aa==3:\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 0)\n #else:\n # for ii in xrange(2):\n # imgb = cv2.transpose(imgb)\n # imgb = cv2.flip(imgb, 1)\n dist = out[2]\n _img = np.concatenate( (imga, imgb), axis=1 )\n k = \"%.3f\"%dist\n #print(k)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(_img,k,(80,image_shape[0]//2+7), font, 0.6, text_color, 2)\n #_filename = filename+\"_%d.png\"%outi\n #cv2.imwrite(_filename, _img)\n img[row*image_shape[0]:(row+1)*image_shape[0], (col*image_shape[1]+gap*col):((col+1)*image_shape[1]+gap*col),:] = _img\n #threshold = outs[0][3]\n font = cv2.FONT_HERSHEY_SIMPLEX\n k = \"threshold: %.3f\"%threshold\n cv2.putText(img,k,(img.shape[1]//2-70,img.shape[0]-5), font, 0.6, text_color, 2)\n cv2.imwrite(filename, img)\n\ndef dumpR(data_set, mx_model, batch_size, name='', data_extra = None, label_shape = None):\n print('dump verification embedding..')\n data_list = data_set[0]\n issame_list = data_set[1]\n model = mx_model\n embeddings_list = []\n if data_extra is not None:\n _data_extra = nd.array(data_extra)\n time_consumed = 0.0\n if label_shape is None:\n _label = nd.ones( (batch_size,) )\n else:\n _label = nd.ones( label_shape )\n for i in xrange( len(data_list) ):\n data = data_list[i]\n embeddings = None\n ba = 0\n while ba<data.shape[0]:\n bb = min(ba+batch_size, data.shape[0])\n count = bb-ba\n _data = nd.slice_axis(data, axis=0, begin=bb-batch_size, end=bb)\n #print(_data.shape, _label.shape)\n time0 = datetime.datetime.now()\n if data_extra is None:\n db = mx.io.DataBatch(data=(_data,), label=(_label,))\n else:\n db = mx.io.DataBatch(data=(_data,_data_extra), label=(_label,))\n model.forward(db, is_train=False)\n net_out = model.get_outputs()\n _embeddings = net_out[0].asnumpy()\n time_now = datetime.datetime.now()\n diff = time_now - time0\n time_consumed+=diff.total_seconds()\n if embeddings is None:\n embeddings = np.zeros( (data.shape[0], _embeddings.shape[1]) )\n embeddings[ba:bb,:] = _embeddings[(batch_size-count):,:]\n ba = bb\n embeddings_list.append(embeddings)\n embeddings = embeddings_list[0] + embeddings_list[1]\n embeddings = sklearn.preprocessing.normalize(embeddings)\n actual_issame = np.asarray(issame_list)\n outname = os.path.join('temp.bin')\n with open(outname, 'wb') as f:\n pickle.dump((embeddings, issame_list), f, protocol=pickle.HIGHEST_PROTOCOL)\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='do verification')\n # general\n parser.add_argument('--data-dir', default='', help='')\n parser.add_argument('--model', default='../model/softmax,50', help='path to load model.')\n parser.add_argument('--target', default='lfw,cfp_ff,cfp_fp,agedb_30', help='test targets.')\n parser.add_argument('--gpu', default=0, type=int, help='gpu id')\n parser.add_argument('--batch-size', default=32, type=int, help='')\n parser.add_argument('--max', default='', type=str, help='')\n parser.add_argument('--mode', default=0, type=int, help='')\n parser.add_argument('--nfolds', default=10, type=int, help='')\n args = parser.parse_args()\n sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))\n import face_image\n\n prop = face_image.load_property(args.data_dir)\n image_size = prop.image_size\n print('image_size', image_size)\n ctx = mx.gpu(args.gpu)\n nets = []\n vec = args.model.split(',')\n prefix = args.model.split(',')[0]\n epochs = []\n if len(vec)==1:\n pdir = os.path.dirname(prefix)\n for fname in os.listdir(pdir):\n if not fname.endswith('.params'):\n continue\n _file = os.path.join(pdir, fname)\n if _file.startswith(prefix):\n epoch = int(fname.split('.')[0].split('-')[1])\n epochs.append(epoch)\n epochs = sorted(epochs, reverse=True)\n if len(args.max)>0:\n _max = [int(x) for x in args.max.split(',')]\n assert len(_max)==2\n if len(epochs)>_max[1]:\n epochs = epochs[_max[0]:_max[1]]\n\n else:\n epochs = [int(x) for x in vec[1].split('|')]\n print('model number', len(epochs))\n time0 = datetime.datetime.now()\n for epoch in epochs:\n print('loading',prefix, epoch)\n sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)\n #arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)\n all_layers = sym.get_internals()\n sym = all_layers['fc1_output']\n model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)\n #model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])\n model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))])\n model.set_params(arg_params, aux_params)\n nets.append(model)\n time_now = datetime.datetime.now()\n diff = time_now - time0\n print('model loading time', diff.total_seconds())\n\n ver_list = []\n ver_name_list = []\n for name in args.target.split(','):\n path = os.path.join(args.data_dir,name+\".bin\")\n if os.path.exists(path):\n print('loading.. ', name)\n data_set = load_bin(path, image_size)\n ver_list.append(data_set)\n ver_name_list.append(name)\n\n if args.mode==0:\n for i in xrange(len(ver_list)):\n results = []\n for model in nets:\n acc1, std1, acc2, std2, xnorm, embeddings_list = test(ver_list[i], model, args.batch_size, args.nfolds)\n print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))\n print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))\n print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))\n results.append(acc2)\n print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))\n elif args.mode==1:\n model = nets[0]\n test_badcase(ver_list[0], model, args.batch_size, args.target)\n else:\n model = nets[0]\n dumpR(ver_list[0], model, args.batch_size, args.target)\n\n\n" ]
[ [ "numpy.sum", "scipy.interpolate.interp1d", "numpy.subtract", "numpy.less", "numpy.asarray", "numpy.logical_and", "numpy.logical_not", "sklearn.model_selection.KFold", "sklearn.preprocessing.normalize", "numpy.mean", "numpy.zeros", "numpy.argmax", "numpy.arange", "numpy.max", "numpy.std", "numpy.square", "numpy.linalg.norm", "sklearn.decomposition.PCA", "numpy.concatenate" ] ]
Brlaney/python-matrix-fem
[ "4dd0e1ee4db994cef896c57a0522fc48529a2d01" ]
[ "beam11.py" ]
[ "# beam11.py\n# UNITS: METRIC (meters & kN)\nfrom lib.beams import *\nimport numpy as np\n\n# Node coordinates (modeled with 4 nodes)\nnodes = np.array([[0], [4], [6], [11]])\n\n# Member/element connection matrix\nmembers = np.array([[1, 2], [2, 3], [3, 4]])\n\n# Pre-define arrays to contain each members\nn = len(nodes) # number of nodes\nm = len(members) # number of members\nL = [] # length in meters\nE = np.repeat(1, 5) # Modulus of elasticity kPa\nI = np.repeat(1, 5) # Moment of inertia m^4\n\nKl = [] # Will contain each elems local [k] (global coords)\n\n# 1 => Un-restrained global degrees of freedom\ndgf = np.array([0, 0, 1, 1, 0, 1, 0, 1])\nfg = np.array([[2, -100]]) # External forces (kN)\nKg = np.zeros((2*n, 2*n)) # global stiffness matrix\n# ds = np.array([]) # Initial displacements\n\n# fixed-end moment vector for members 1 and 2\nfem = np.array([0, 0, 0, 0, 0, 0])\n\nnewKg = KgBeam(nodes, members, n, m, L, E, I,\n Kl, dgf, fg, Kg, fem)\n\nprint(newKg)\n" ]
[ [ "numpy.array", "numpy.repeat", "numpy.zeros" ] ]
phww/Andrew.Ng-ML-Study
[ "f0a84814bd538b0e0ec0d2a64d89338b730d1632" ]
[ "code-homework/ML/ex1_Linear Regression/ex1_batch.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndata = pd.read_csv(\"ex1data1.txt\",names = ['population','profit'])\nx = data.population\ny = data.profit\n\"初始化,所有变量都是matrix\"\ndf = data.copy()#因为insert会改变原数组,所以先复制一份,坑1.\ndf.insert(0,\"one\",1)\nX = df.iloc[:,0:df.shape[1]-1]\ny = df.iloc[:,df.shape[1]-1:df.shape[1]]#df.iloc[:,-1]是个一维数组(series),reshape(97,1)都不行,坑2.\ntheta = np.zeros(X.shape[1])\ny = np.matrix(y)\nX = np.matrix(X)\nx = np.matrix(x)\nx = x.T #行向量/列向量傻傻分不清 坑3\ntheta = np.matrix(theta)\nH = X*(theta.T)\n\"\"\"计算代价\"\"\"\ndef costfunction(X,y,H):\n n = np.power((H-y),2)\n return np.sum(n)/(2*len(X))\n\"\"\"批量梯度下降\"\"\"\nalpha = 0.01\nm = len(X)\ntimes = 1000\ndef gradient_descent(theta,X,y,alpha,m,H,times):\n thetas_0 = [0]\n thetas_1 = [0]\n cost = [costfunction(X,y,H)]\n for i in range(times):\n H = X*theta.T\n erro = H - y\n temp = np.matrix([0,0])\n temp = theta - erro.T * X * alpha/m #矩阵运算是精髓,临时变量很重要.坑4\n thetas_0.append(temp[0,0])\n thetas_1.append(temp[0,1])\n theta = temp\n cost.append(costfunction(X,y,H))\n return theta,cost,thetas_0,thetas_1\nfinal_theta,cost,thetas_0,thetas_1= gradient_descent(theta,X,y,alpha,m,H,times)\nprint(final_theta,'\\n',cost,'\\n',thetas_0,'\\n',thetas_1)\n\"\"\"绘图\"\"\"\nfig,(ax1,ax2) = plt.subplots(2,1)\nH = final_theta * X.T\nH = H.T\nax1.plot(x,H,c = 'r',label = 'Prediction')\nax1.scatter(data.population,data.profit,label = 'data')\nax1.legend(loc = 2)\nax2.plot(cost)\nax1.set_xlabel('population')\nax1.set_ylabel('profit')\nax1.set_title('relationship between population and profit'.title())\nax2.set_xlabel('times')\nax2.set_ylabel('cost')\nax2.set_title('how does cost changed'.title())\nfig.subplots_adjust(hspace = 0.8)\nplt.show()" ]
[ [ "numpy.sum", "numpy.zeros", "pandas.read_csv", "numpy.matrix", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "numpy.power" ] ]
hydrocode-de/RUINSapp
[ "2dd0f8b0b0ed04e95ef2ace9154414b1f83a89dc" ]
[ "ruins/processing/sdm.py" ]
[ "# BIAS CORRECTION\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gamma\nfrom scipy.stats import norm\nfrom scipy.signal import detrend\n\n'''\nScaled distribution mapping for climate data\n\nThis is a excerpt from pyCAT and the method after Switanek et al. (2017) containing the functions to perform a relative and absolute bias correction on climate data.\n(cc) [email protected], July 2020\n\nIt is intended to be used on pandas time series at single locations/pixels.\n\nSwitanek, M. B., P. A. Troch, C. L. Castro, A. Leuprecht, H.-I. Chang, R. Mukherjee, and E. M. C. Demaria (2017), Scaled distribution mapping: a bias correction method that preserves raw climate model projected changes, Hydrol. Earth Syst. Sci., 21(6), 2649–2666, https://doi.org/10.5194/hess-21-2649-2017\n'''\n\n\ndef relSDM(obs, mod, sce, cdf_threshold=0.9999999, lower_limit=0.1):\n '''relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked!)\n\n returns corrected timeseries\n tested with pandas series.\n '''\n\n obs_r = obs[obs >= lower_limit]\n mod_r = mod[mod >= lower_limit]\n sce_r = sce[sce >= lower_limit]\n\n obs_fr = 1. * len(obs_r) / len(obs)\n mod_fr = 1. * len(mod_r) / len(mod)\n sce_fr = 1. * len(sce_r) / len(sce)\n sce_argsort = np.argsort(sce)\n\n obs_gamma = gamma.fit(obs_r, floc=0)\n mod_gamma = gamma.fit(mod_r, floc=0)\n sce_gamma = gamma.fit(sce_r, floc=0)\n\n obs_cdf = gamma.cdf(np.sort(obs_r), *obs_gamma)\n mod_cdf = gamma.cdf(np.sort(mod_r), *mod_gamma)\n obs_cdf[obs_cdf > cdf_threshold] = cdf_threshold\n mod_cdf[mod_cdf > cdf_threshold] = cdf_threshold\n\n expected_sce_raindays = min(int(np.round(len(sce) * obs_fr * sce_fr / mod_fr)), len(sce))\n sce_cdf = gamma.cdf(np.sort(sce_r), *sce_gamma)\n sce_cdf[sce_cdf > cdf_threshold] = cdf_threshold\n\n # interpolate cdf-values for obs and mod to the length of the scenario\n obs_cdf_intpol = np.interp(np.linspace(1, len(obs_r), len(sce_r)), np.linspace(1, len(obs_r), len(obs_r)), obs_cdf)\n mod_cdf_intpol = np.interp(np.linspace(1, len(mod_r), len(sce_r)), np.linspace(1, len(mod_r), len(mod_r)), mod_cdf)\n\n # adapt the observation cdfs\n obs_inverse = 1. / (1 - obs_cdf_intpol)\n mod_inverse = 1. / (1 - mod_cdf_intpol)\n sce_inverse = 1. / (1 - sce_cdf)\n adapted_cdf = 1 - 1. / (obs_inverse * sce_inverse / mod_inverse)\n adapted_cdf[adapted_cdf < 0.] = 0.\n\n # correct by adapted observation cdf-values\n xvals = gamma.ppf(np.sort(adapted_cdf), *obs_gamma) * gamma.ppf(sce_cdf, *sce_gamma) / gamma.ppf(sce_cdf,\n *mod_gamma)\n\n # interpolate to the expected length of future raindays\n correction = np.zeros(len(sce))\n if len(sce_r) > expected_sce_raindays:\n xvals = np.interp(np.linspace(1, len(sce_r), expected_sce_raindays), np.linspace(1, len(sce_r), len(sce_r)),\n xvals)\n else:\n xvals = np.hstack((np.zeros(expected_sce_raindays - len(sce_r)), xvals))\n\n correction[sce_argsort[-expected_sce_raindays:]] = xvals\n\n return pd.Series(correction, index=sce.index)\n\n\ndef absSDM(obs, mod, sce, cdf_threshold=0.9999999):\n '''absolute scaled distribution mapping assuming a normal distributed parameter\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n cdf_threshold :: upper and lower threshold of CDF\n\n returns corrected timeseries\n tested with pandas series.\n '''\n\n obs_len = len(obs)\n mod_len = len(mod)\n sce_len = len(sce)\n obs_mean = np.mean(obs)\n mod_mean = np.mean(mod)\n smean = np.mean(sce)\n odetrend = detrend(obs)\n mdetrend = detrend(mod)\n sdetrend = detrend(sce)\n\n obs_norm = norm.fit(odetrend)\n mod_norm = norm.fit(mdetrend)\n sce_norm = norm.fit(sdetrend)\n\n sce_diff = sce - sdetrend\n sce_argsort = np.argsort(sdetrend)\n\n obs_cdf = norm.cdf(np.sort(odetrend), *obs_norm)\n mod_cdf = norm.cdf(np.sort(mdetrend), *mod_norm)\n sce_cdf = norm.cdf(np.sort(sdetrend), *sce_norm)\n obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold), 1 - cdf_threshold)\n mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold), 1 - cdf_threshold)\n sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold), 1 - cdf_threshold)\n\n # interpolate cdf-values for obs and mod to the length of the scenario\n obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len), np.linspace(1, obs_len, obs_len), obs_cdf)\n mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len), np.linspace(1, mod_len, mod_len), mod_cdf)\n\n # adapt the observation cdfs\n # split the tails of the cdfs around the center\n obs_cdf_shift = obs_cdf_intpol - .5\n mod_cdf_shift = mod_cdf_intpol - .5\n sce_cdf_shift = sce_cdf - .5\n obs_inverse = 1. / (.5 - np.abs(obs_cdf_shift))\n mod_inverse = 1. / (.5 - np.abs(mod_cdf_shift))\n sce_inverse = 1. / (.5 - np.abs(sce_cdf_shift))\n adapted_cdf = np.sign(obs_cdf_shift) * (1. - 1. / (obs_inverse * sce_inverse / mod_inverse))\n adapted_cdf[adapted_cdf < 0] += 1.\n adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold), 1 - cdf_threshold)\n\n xvals = norm.ppf(np.sort(adapted_cdf), *obs_norm) \\\n + obs_norm[-1] / mod_norm[-1] \\\n * (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))\n xvals -= xvals.mean()\n xvals += obs_mean + (smean - mod_mean)\n\n correction = np.zeros(sce_len)\n correction[sce_argsort] = xvals\n correction += sce_diff - smean\n\n return correction\n\n\ndef SDM(obs, mod, sce, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):\n '''scaled distribution mapping - wrapper to relative and absolute bias correction functions\n rewritten from pyCAT for 1D data\n\n obs :: observed variable time series\n mod :: modelled variable for same time series as obs\n sce :: to unbias modelled time series\n meth :: 'rel' for relative SDM, else absolute SDM will be performed\n cdf_threshold :: upper and lower threshold of CDF\n lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')\n\n The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.\n\n returns corrected timeseries\n tested with pandas series.\n '''\n\n if meth == 'rel':\n return relSDM(obs, mod, sce, cdf_threshold, lower_limit)\n else:\n return absSDM(obs, mod, sce, cdf_threshold)" ]
[ [ "scipy.signal.detrend", "scipy.stats.norm.fit", "pandas.Series", "numpy.sign", "numpy.zeros", "scipy.stats.norm.ppf", "numpy.argsort", "numpy.abs", "scipy.stats.gamma.ppf", "scipy.stats.gamma.fit", "numpy.sort", "numpy.linspace", "numpy.mean", "numpy.minimum" ] ]
IBM/oct-glaucoma-vf-estimate
[ "ea79352547f33fe05ee532ab9faad6a5e4811a76" ]
[ "python_code/train_tp_si.py" ]
[ "#!/usr/bin/struture_function1 python\n# File: train_tp.py\n# Author: Yasmeen George\n\nimport tensorflow as tf\n#from tensorflow import keras\nimport argparse\nfrom tensorpack.tfutils.summary import *\nfrom oct_dataflow_tp import *\nimport tensorflow.contrib.slim as slim\nfrom keras import backend as K\nfrom contextlib import contextmanager\ndef model_summary():\n model_vars = tf.trainable_variables()\n slim.model_analyzer.analyze_vars(model_vars, print_info=True)\nfrom tensorpack.utils.viz import stack_patches\nfrom vft_utils import perf_measures\n\ndef get_features(image,scope):\n with tf.compat.v1.variable_scope(scope):\n l = tf.layers.conv3d(image, 32, 3, padding='SAME',name='conv0')# input_shape=input_shape)\n l=tf.nn.relu(l,name='relu0')\n l = tf.layers.conv3d( l, 16, 3, padding='SAME',name = 'conv1')\n l = tf.nn.relu(l,name='relu1')\n i = 2\n name =\"\"\n for nbchannel in nfilters:\n l = tf.layers.conv3d(l, nbchannel, 3, padding='SAME',name='conv'+str(i))\n l = tf.layers.batch_normalization(l,axis=-1, momentum=0.8) # input_shape=(input_shape[0], input_shape[1], input_shape[2], nbchannel)\n l = tf.nn.relu(l,name='relu'+str(i))\n name = l.name\n l = tf.layers.max_pooling3d(l,2,2,name = 'maxpool3d'+str(i))\n i +=1\n\n return l,name\n\n\ndef get_keras_model(l):\n l = tf.layers.conv3d(l, 32, 3, padding='valid',name='conv0') # input_shape=input_shape)\n l = tf.nn.relu(l,name='relu0')\n i=1\n name = \"\"\n for nbchannel in nfilters_merged:\n l = tf.layers.conv3d(l, nbchannel, 3,padding='valid',name='conv'+str(i))\n l = tf.layers.batch_normalization(l,axis=-1, momentum=0.8)# input_shape=(input_shape[0],input_shape[1],input_shape[2],nbchannel),\n l = tf.nn.relu(l,name='relu'+str(i))\n name = l.name\n i+=1\n\n if CNN_OUT_GarWayHeathmap: # PREDICT GARWAY HEATHMAP AVERAGE REGIONS\n l = tf.reduce_mean(l, axis=[1,2,3])\n l=tf.layers.dense(l, 64, tf.nn.relu)\n l = tf.layers.dropout(l,rate=0.5)\n l = tf.layers.dense(l, out_num)\n\n else: # predict VFT THRESHOLD VALUES\n l = tf.layers.conv3d(l,1,2,padding='valid')\n l = tf.reduce_mean(l,axis=(3,4))\n #l = tf.layers.conv2d(l,10,2,padding='valid')\n #l = tf.nn.softmax(l,name='pred')\n #l = tf.math.sigmoid(l,name='pred')\n\n\n\n return l,tf.math.sigmoid(l,name='pred'),name\n\n\n@contextmanager\ndef guided_relu():\n \"\"\"\n Returns:\n A context where the gradient of :meth:`tf.nn.relu` is replaced by\n guided back-propagation, as described in the paper:\n `Striving for Simplicity: The All Convolutional Net\n <https://arxiv.org/abs/1412.6806>`_\n \"\"\"\n from tensorflow.python.ops import gen_nn_ops # noqa\n\n @tf.RegisterGradient(\"GuidedReLU\")\n def GuidedReluGrad(op, grad):\n return tf.where(0. < grad,\n gen_nn_ops.relu_grad(grad, op.outputs[0]),\n tf.zeros(grad.get_shape()))\n\n g = tf.get_default_graph()\n with g.gradient_override_map({'Relu': 'GuidedReLU'}):\n yield\n\ndef saliency_map(output, input, name=\"saliency_map\"):\n \"\"\"\n Produce a saliency map as described in the paper:\n `Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps\n <https://arxiv.org/abs/1312.6034>`_.\n The saliency map is the gradient of the max element in output w.r.t input.\n\n Returns:\n tf.Tensor: the saliency map. Has the same shape as input.\n \"\"\"\n max_outp = tf.reduce_max(output, 1)\n saliency_op = tf.gradients(max_outp, input)[:][0]\n return tf.identity(saliency_op, name=name)\n\n\nclass Model(ModelDesc):\n def inputs(self):\n return [tf.TensorSpec((None,)+(SHAPE, SHAPE, dpth),tf.uint8, 'input1'),\n tf.TensorSpec((None,out_num), tf.float32, 'label'),\n tf.TensorSpec((None,) + vft_shape, tf.float32, 'vft_threshold'),\n tf.TensorSpec((None,), tf.string, 'uid')]\n \n def build_graph(self, image ,label,vft_threshold,uid):\n\n image = tf.expand_dims(tf.cast(image,tf.float32),axis=-1) / 128.0 - 1\n\n f1,n1=get_features(image, 'pathway1')\n pred,sig_pred,n = get_keras_model(f1)\n\n model_summary()\n print(f1)\n print(pred)\n\n\n '''\n \n with guided_relu():\n saliency_map(pred, tf.get_default_graph().get_tensor_by_name(n1), name=\"saliency_p1\")\n saliency_map(pred, tf.get_default_graph().get_tensor_by_name(n), name=\"saliency_p5\")\n '''\n def dice_coef_loss(y_true, y_pred):\n def dice_coef(y_true, y_pred, smooth=1):\n \"\"\"\n Dice = (2*|X & Y|)/ (|X|+ |Y|)\n = 2*sum(|A*B|)/(sum(A^2)+sum(B^2))\n ref: https://arxiv.org/pdf/1606.04797v1.pdf\n \"\"\"\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n return (2. * intersection + smooth) / (\n K.sum(K.square(y_true), -1) + K.sum(K.square(y_pred), -1) + smooth)\n\n return 1 - dice_coef(y_true, y_pred)\n\n if CNN_OUT_GarWayHeathmap:\n y_true, y_pred = label, sig_pred\n else:\n y_true, y_pred = vft_threshold, sig_pred # vft_threshold[..., 1:], pred[..., 1:]\n print(y_true, y_pred)\n\n # dice_loss = dice_coef_loss(y_true, y_pred)\n # ce_loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)\n # dice_cost = tf.reduce_mean(dice_loss, name='dice_loss')\n # ce_cost = tf.reduce_mean(ce_loss, name='cross_entropy_loss')\n\n sce_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=pred)\n mse_loss = tf.keras.losses.mean_squared_error(y_true, y_pred)\n mae_loss = tf.keras.losses.MAE(y_true, y_pred)\n\n\n mse_cost = tf.reduce_mean(mse_loss, name='mean_squared_error')\n mae_cost = tf.reduce_mean(mae_loss, name='mean_absolute_error')\n sce_cost = tf.reduce_mean(sce_loss, name='sigmoid_cross_entropy')\n\n print(sce_loss, mse_loss, mae_loss)\n print(\"READUCED_MEAN\")\n print(sce_cost, mse_cost, mae_cost)\n\n # weight decay on all W\n\n wd_cost = tf.multiply(1e-4, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')\n add_moving_summary(sce_cost, mse_cost, mae_cost, wd_cost)\n\n add_param_summary(('.*/W', ['histogram'])) # monitor W\n self.cost = tf.add_n([sce_cost, wd_cost], name='cost')\n print(self.cost)\n return self.cost\n def optimizer(self):\n lr = tf.compat.v1.get_variable('learning_rate', initializer=0.01, trainable=False)\n tf.summary.scalar('learning_rate', lr)\n return tf.compat.v1.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)\n\ndef test(ds, model_path='',oct_type = 'onh',csv_save_path = 'test_results.csv',vft_type= 'THRESHOLD'):\n\n in_names = ['input1', 'label', 'vft_threshold', 'uid']\n\n pred = PredictConfig(\n session_init=SmartInit(model_path),\n model=Model(),\n input_names=in_names,\n output_names=['uid', 'vft_threshold', 'pred', 'logistic_loss', 'Mean_1', 'Mean_2'])\n\n df_result = perf_measures(ds, pred=pred, oct_type=oct_type,vft_type= vft_type)\n df_result.to_csv(csv_save_path)\n return df_result\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--out_dir', default=os.getcwd() , help='output dir name')# metavar='out_dir'\n parser.add_argument('--data_dir', default=None, help='data dir name') # ,metavar='data_dir'\n parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode\n parser.add_argument('--load', help='load model')\n parser.add_argument('--drop_1', default=70, help='Epoch to drop learning rate to 0.01.')#150 # nargs='*' in multi mode\n parser.add_argument('--drop_2', default=120, help='Epoch to drop learning rate to 0.001')#225\n parser.add_argument('--depth', default=20, help='The depth of densenet')# 40\n parser.add_argument('--max_epoch', default=150, help='max epoch') #300\n parser.add_argument('--task', help='task to perform: \"train\" or \"test\" or \"all\" ',\n choices=['all', 'test', 'train'], default='train')\n parser.add_argument('--oct_type', help='OCT type to use \"onh\" or \"mac\" ',\n choices=['onh', 'mac'], default='onh')\n parser.add_argument('--fold', help='fold number ',\n choices=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'], default='1')\n parser.add_argument('--pred', help='Prediction map',\n choices=['THRESHOLD', 'PATTERN'], default='THRESHOLD')\n parser.add_argument('--load_model', help='load model directory '\n , default=None)\n parser.add_argument('--model_name', help='model name e.g. model-150000 '\n , default=None)\n args = parser.parse_args()\n\n # prepare dataset\n if args.data_dir is None:\n #octdatadir,vftdatadir='../sample_data/oct/', '/Users/gyasmeen/Desktop/Results/nyu_vft_xml/'\n base_dir = '/dccstor/aurmmaret1/Datasets/NYU/'\n octdatadir,vftdatadir = base_dir+'MAC_ONH_1pairPerVisit/MAC_ONH_1pairPerVisit/',base_dir +'nyu_vft_xml/'\n else:\n octdatadir,vftdatadir = args.data_dir +'/', args.data_dir+'/'\n\n\n # oct-vft_si_exp10\n # log_dir = '/mnt/results/structure-function-results/training-ybXjrevMg/train_log/'\n\n # oct-vft_si_exp11_linear\n # log_dir = '/mnt/results/structure-function-results/training-1HwkNRdMg/train_log/'\n\n # oct-vft_si_exp13_global\n #log_dir = '/mnt/results/structure-function-results/training-h2xVagdMg/train_log/'\n\n if args.load_model is None:\n log_dir = args.out_dir + \"/train_log\"\n else:\n log_dir = args.load_model\n #log_dir = '/mnt/results/structure-function-results/training-mocNhn5Gg/train_log/'\n\n # onh: log_dir = '/mnt/results/structure-function-results/training-SycBtn5Gg/train_log/'\n\n if args.task != 'train':\n # oct-onh-vft_si_exp11_linear (set: 3000)\n #model_path = '/mnt/results/structure-function-results/training-1HwkNRdMg/train_log/'\n #model_name = 'model-116250'\n\n # oct-mac-vft_si_exp15_vft-linear (set: 3000)\n if args.load_model is None:\n print('You must enter model path directory')\n exit()\n if args.model_name is None:\n print('You must enter model name')\n exit()\n model_path = args.load_model\n model_name = args.model_name\n\n\n dataset_test, te_batch_num = get_data(octdatadir, vftdatadir, SHAPE=SHAPE, BATCH=BATCH, task= args.task,Multi_Input=False,OCT_TYPE=args.oct_type,vft_type = args.pred)\n df_result = test(dataset_test,model_path=model_path+model_name,oct_type=args.oct_type,csv_save_path=model_path+'perf_measures_oct-'+args.oct_type+'-f'+str(args.fold)+'_input-single.csv', vft_type = args.pred)\n print('Test is finished for {} samples', len(df_result))\n elif args.task =='train':\n\n\n if args.out_dir is None:\n logger.auto_set_dir()\n else:\n logger_dir = os.path.join(log_dir)\n logger.set_logger_dir(logger_dir,action='k')\n\n dataset_train,batch_num = get_data(octdatadir,vftdatadir, SHAPE=SHAPE,BATCH=BATCH ,task=args.task,Multi_Input=False,OCT_TYPE=args.oct_type,fold = args.fold,vft_type = args.pred)\n\n steps_per_epoch = batch_num\n dataset_val,v_batch_num = get_data(octdatadir,vftdatadir, SHAPE=SHAPE,BATCH=BATCH ,task='val',Multi_Input=False,OCT_TYPE=args.oct_type,fold = args.fold,vft_type = args.pred)\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.9\n session = tf.Session(config=config)\n\n '''\n \n extra_display = [\"cost\"]\n extra_callbacks = [\n ProgressBar(extra_display),\n ]\n monitors = [\n ScalarPrinter(enable_step=True),\n ]\n '''\n #cfg = TrainConfig(\n cfg = AutoResumeTrainConfig(\n model=Model(),\n dataflow=dataset_train,\n callbacks=[\n PeriodicTrigger(ModelSaver(max_to_keep=10,keep_checkpoint_every_n_hours=1), every_k_epochs=5),\n InferenceRunner(\n dataset_val,\n ScalarStats(['sigmoid_cross_entropy', 'mean_squared_error', 'mean_absolute_error'])),\n #ScalarStats(['dice_loss','cross_entropy_loss','mean_squared_error','mean_absolute_error'])),\n # record GPU utilization during training\n GPUUtilizationTracker(),\n ScheduledHyperParamSetter('learning_rate',\n [(args.drop_1, 0.001), (args.drop_2, 0.0001)]),\n\n ],\n steps_per_epoch=steps_per_epoch,\n max_epoch=args.max_epoch,sess=session\n )\n\n\n print('1#1'*100)\n print(get_num_gpu())\n if get_num_gpu() <= 1:\n # single GPU:\n launch_train_with_config(cfg, SimpleTrainer())\n else:\n # multi GPU:\n launch_train_with_config(cfg, SyncMultiGPUTrainerParameterServer(get_num_gpu()))\n # \"Replicated\" multi-gpu trainer is not supported for Keras model\n # since Keras does not respect variable scopes.\n" ]
[ [ "tensorflow.summary.scalar", "tensorflow.reduce_max", "tensorflow.math.sigmoid", "tensorflow.python.ops.gen_nn_ops.relu_grad", "tensorflow.identity", "tensorflow.compat.v1.get_variable", "tensorflow.keras.losses.mean_squared_error", "tensorflow.layers.conv3d", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.add_n", "tensorflow.layers.batch_normalization", "tensorflow.contrib.slim.model_analyzer.analyze_vars", "tensorflow.cast", "tensorflow.gradients", "tensorflow.RegisterGradient", "tensorflow.keras.losses.MAE", "tensorflow.Session", "tensorflow.layers.dense", "tensorflow.layers.dropout", "tensorflow.ConfigProto", "tensorflow.compat.v1.train.MomentumOptimizer", "tensorflow.reduce_mean", "tensorflow.trainable_variables", "tensorflow.compat.v1.variable_scope", "tensorflow.get_default_graph", "tensorflow.TensorSpec", "tensorflow.nn.relu" ] ]
ocefpaf/cf-xarray
[ "7bdb033395c8959a5d8147fe148ec987c1122387" ]
[ "cf_xarray/datasets.py" ]
[ "import numpy as np\nimport xarray as xr\n\nairds = xr.tutorial.open_dataset(\"air_temperature\").isel(time=slice(4), lon=slice(50))\nairds.air.attrs[\"cell_measures\"] = \"area: cell_area\"\nairds.air.attrs[\"standard_name\"] = \"air_temperature\"\nairds.coords[\"cell_area\"] = (\n xr.DataArray(np.cos(airds.lat * np.pi / 180))\n * xr.ones_like(airds.lon)\n * 105e3\n * 110e3\n)\n\nds_no_attrs = airds.copy(deep=True)\nfor variable in ds_no_attrs.variables:\n ds_no_attrs[variable].attrs = {}\n\n\npopds = xr.Dataset()\npopds.coords[\"TLONG\"] = (\n (\"nlat\", \"nlon\"),\n np.ones((20, 30)),\n {\"units\": \"degrees_east\"},\n)\npopds.coords[\"TLAT\"] = (\n (\"nlat\", \"nlon\"),\n 2 * np.ones((20, 30)),\n {\"units\": \"degrees_north\"},\n)\npopds.coords[\"ULONG\"] = (\n (\"nlat\", \"nlon\"),\n 0.5 * np.ones((20, 30)),\n {\"units\": \"degrees_east\"},\n)\npopds.coords[\"ULAT\"] = (\n (\"nlat\", \"nlon\"),\n 2.5 * np.ones((20, 30)),\n {\"units\": \"degrees_north\"},\n)\npopds[\"UVEL\"] = (\n (\"nlat\", \"nlon\"),\n np.ones((20, 30)) * 15,\n {\"coordinates\": \"ULONG ULAT\", \"standard_name\": \"sea_water_x_velocity\"},\n)\npopds[\"TEMP\"] = (\n (\"nlat\", \"nlon\"),\n np.ones((20, 30)) * 15,\n {\"coordinates\": \"TLONG TLAT\", \"standard_name\": \"sea_water_potential_temperature\"},\n)\npopds[\"nlon\"] = (\"nlon\", np.arange(popds.sizes[\"nlon\"]), {\"axis\": \"X\"})\npopds[\"nlat\"] = (\"nlat\", np.arange(popds.sizes[\"nlat\"]), {\"axis\": \"Y\"})\n\n# This dataset has ancillary variables\n\nanc = xr.Dataset()\nanc[\"q\"] = (\n (\"x\", \"y\"),\n np.random.randn(10, 20),\n dict(\n standard_name=\"specific_humidity\",\n units=\"g/g\",\n ancillary_variables=\"q_error_limit q_detection_limit\",\n ),\n)\nanc[\"q_error_limit\"] = (\n (\"x\", \"y\"),\n np.random.randn(10, 20),\n dict(standard_name=\"specific_humidity standard_error\", units=\"g/g\"),\n)\nanc[\"q_detection_limit\"] = xr.DataArray(\n 1e-3, attrs=dict(standard_name=\"specific_humidity detection_minimum\", units=\"g/g\")\n)\n\n\nmultiple = xr.Dataset()\nmultiple.coords[\"x1\"] = (\"x1\", range(30), {\"axis\": \"X\"})\nmultiple.coords[\"y1\"] = (\"y1\", range(20), {\"axis\": \"Y\"})\nmultiple.coords[\"x2\"] = (\"x2\", range(10), {\"axis\": \"X\"})\nmultiple.coords[\"y2\"] = (\"y2\", range(5), {\"axis\": \"Y\"})\n\nmultiple[\"v1\"] = ((\"x1\", \"y1\"), np.ones((30, 20)) * 15)\nmultiple[\"v2\"] = ((\"x2\", \"y2\"), np.ones((10, 5)) * 15)\n\n\nromsds = xr.Dataset()\nromsds[\"s_rho\"] = (\n # fmt: off\n \"s_rho\",\n [-0.983333, -0.95 , -0.916667, -0.883333, -0.85 , -0.816667,\n -0.783333, -0.75 , -0.716667, -0.683333, -0.65 , -0.616667,\n -0.583333, -0.55 , -0.516667, -0.483333, -0.45 , -0.416667,\n -0.383333, -0.35 , -0.316667, -0.283333, -0.25 , -0.216667,\n -0.183333, -0.15 , -0.116667, -0.083333, -0.05 , -0.016667],\n # fmt: on\n {\n \"long_name\": \"S-coordinate at RHO-points\",\n \"valid_min\": -1.0,\n \"valid_max\": 0.0,\n \"standard_name\": \"ocean_s_coordinate_g2\",\n \"formula_terms\": \"s: s_rho C: Cs_r eta: zeta depth: h depth_c: hc\",\n \"field\": \"s_rho, scalar\",\n }\n)\nromsds.coords[\"hc\"] = 20.0\nromsds.coords[\"h\"] = 603.9\nromsds.coords[\"Vtransform\"] = 2.0\nromsds.coords[\"Cs_r\"] = (\n # fmt: off\n \"s_rho\",\n [-9.33010396e-01, -8.09234736e-01, -6.98779853e-01, -6.01008926e-01,\n -5.15058562e-01, -4.39938913e-01, -3.74609181e-01, -3.18031817e-01,\n -2.69209327e-01, -2.27207488e-01, -1.91168387e-01, -1.60316097e-01,\n -1.33957253e-01, -1.11478268e-01, -9.23404709e-02, -7.60741092e-02,\n -6.22718662e-02, -5.05823390e-02, -4.07037635e-02, -3.23781605e-02,\n -2.53860004e-02, -1.95414261e-02, -1.46880431e-02, -1.06952600e-02,\n -7.45515186e-03, -4.87981407e-03, -2.89916971e-03, -1.45919898e-03,\n -5.20560097e-04, -5.75774004e-05],\n # fmt: on\n)\nromsds[\"zeta\"] = (\"ocean_time\", [-0.155356, -0.127435])\nromsds[\"temp\"] = (\n (\"ocean_time\", \"s_rho\"),\n [np.linspace(20, 30, 30)] * 2,\n {\"coordinates\": \"z_rho_dummy\", \"standard_name\": \"sea_water_potential_temperature\"},\n)\nromsds[\"temp\"].encoding[\"coordinates\"] = \"s_rho\"\nromsds.coords[\"z_rho_dummy\"] = (\n (\"ocean_time\", \"s_rho\"),\n np.random.randn(2, 30),\n {\"positive\": \"up\"},\n)\n\n\n# Dataset with random data on a grid that is some sort of Mollweide projection\nXX, YY = np.mgrid[:11, :11] * 5 - 25\nXX_bnds, YY_bnds = np.mgrid[:12, :12] * 5 - 27.5\n\nR = 50\ntheta = np.arcsin(YY / (R * np.sqrt(2)))\nlat = np.rad2deg(np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi))\nlon = np.rad2deg(XX * np.pi / (R * 2 * np.sqrt(2) * np.cos(theta)))\n\ntheta_bnds = np.arcsin(YY_bnds / (R * np.sqrt(2)))\nlat_vertices = np.rad2deg(np.arcsin((2 * theta_bnds + np.sin(2 * theta_bnds)) / np.pi))\nlon_vertices = np.rad2deg(XX_bnds * np.pi / (R * 2 * np.sqrt(2) * np.cos(theta_bnds)))\n\nlon_bounds = np.stack(\n (\n lon_vertices[:-1, :-1],\n lon_vertices[:-1, 1:],\n lon_vertices[1:, 1:],\n lon_vertices[1:, :-1],\n ),\n axis=0,\n)\nlat_bounds = np.stack(\n (\n lat_vertices[:-1, :-1],\n lat_vertices[:-1, 1:],\n lat_vertices[1:, 1:],\n lat_vertices[1:, :-1],\n ),\n axis=0,\n)\n\nmollwds = xr.Dataset(\n coords=dict(\n lon=xr.DataArray(\n lon,\n dims=(\"x\", \"y\"),\n attrs={\"units\": \"degrees_east\", \"bounds\": \"lon_bounds\"},\n ),\n lat=xr.DataArray(\n lat,\n dims=(\"x\", \"y\"),\n attrs={\"units\": \"degrees_north\", \"bounds\": \"lat_bounds\"},\n ),\n ),\n data_vars=dict(\n lon_bounds=xr.DataArray(\n lon_bounds, dims=(\"bounds\", \"x\", \"y\"), attrs={\"units\": \"degrees_east\"}\n ),\n lat_bounds=xr.DataArray(\n lat_bounds, dims=(\"bounds\", \"x\", \"y\"), attrs={\"units\": \"degrees_north\"}\n ),\n lon_vertices=xr.DataArray(lon_vertices, dims=(\"x_vertices\", \"y_vertices\")),\n lat_vertices=xr.DataArray(lat_vertices, dims=(\"x_vertices\", \"y_vertices\")),\n ),\n)\n\nforecast = xr.decode_cf(\n xr.Dataset.from_dict(\n {\n \"coords\": {\n \"L\": {\n \"dims\": (\"L\",),\n \"attrs\": {\n \"long_name\": \"Lead\",\n \"standard_name\": \"forecast_period\",\n \"pointwidth\": 1.0,\n \"gridtype\": 0,\n \"units\": \"months\",\n },\n \"data\": [0, 1],\n },\n \"M\": {\n \"dims\": (\"M\",),\n \"attrs\": {\n \"standard_name\": \"realization\",\n \"long_name\": \"Ensemble Member\",\n \"pointwidth\": 1.0,\n \"gridtype\": 0,\n \"units\": \"unitless\",\n },\n \"data\": [0, 1, 2],\n },\n \"S\": {\n \"dims\": (\"S\",),\n \"attrs\": {\n \"calendar\": \"360_day\",\n \"long_name\": \"Forecast Start Time\",\n \"standard_name\": \"forecast_reference_time\",\n \"pointwidth\": 0,\n \"gridtype\": 0,\n \"units\": \"months since 1960-01-01\",\n },\n \"data\": [0, 1, 2, 3],\n },\n \"X\": {\n \"dims\": (\"X\",),\n \"attrs\": {\n \"standard_name\": \"longitude\",\n \"pointwidth\": 1.0,\n \"gridtype\": 1,\n \"units\": \"degree_east\",\n },\n \"data\": [0, 1, 2, 3, 4],\n },\n \"Y\": {\n \"dims\": (\"Y\",),\n \"attrs\": {\n \"standard_name\": \"latitude\",\n \"pointwidth\": 1.0,\n \"gridtype\": 0,\n \"units\": \"degree_north\",\n },\n \"data\": [0, 1, 2, 3, 4, 5],\n },\n },\n \"attrs\": {\"Conventions\": \"IRIDL\"},\n \"dims\": {\"L\": 2, \"M\": 3, \"S\": 4, \"X\": 5, \"Y\": 6},\n \"data_vars\": {\n \"sst\": {\n \"dims\": (\"S\", \"L\", \"M\", \"Y\", \"X\"),\n \"attrs\": {\n \"pointwidth\": 0,\n \"PDS_TimeRange\": 3,\n \"center\": \"US Weather Service - National Met. Center\",\n \"grib_name\": \"TMP\",\n \"gribNumBits\": 21,\n \"gribcenter\": 7,\n \"gribparam\": 11,\n \"gribleveltype\": 1,\n \"GRIBgridcode\": 3,\n \"process\": 'Spectral Statistical Interpolation (SSI) analysis from \"Final\" run.',\n \"PTVersion\": 2,\n \"gribfield\": 1,\n \"units\": \"Celsius_scale\",\n \"scale_min\": -69.97389221191406,\n \"scale_max\": 43.039306640625,\n \"long_name\": \"Sea Surface Temperature\",\n \"standard_name\": \"sea_surface_temperature\",\n },\n \"data\": np.arange(np.prod((4, 2, 3, 6, 5))).reshape(\n (4, 2, 3, 6, 5)\n ),\n }\n },\n }\n )\n)\n\n\nbasin = xr.DataArray(\n [1, 2, 1, 1, 2, 2, 3, 3, 3, 3],\n dims=(\"time\",),\n attrs={\n \"flag_values\": [1, 2, 3],\n \"flag_meanings\": \"atlantic_ocean pacific_ocean indian_ocean\",\n \"standard_name\": \"region\",\n },\n name=\"basin\",\n)\n\n\nambig = xr.Dataset(\n data_vars={},\n coords={\n \"lat\": (\"lat\", np.zeros(5)),\n \"lon\": (\"lon\", np.zeros(5)),\n \"vertices_latitude\": ([\"lat\", \"bnds\"], np.zeros((5, 2))),\n \"vertices_longitude\": ([\"lon\", \"bnds\"], np.zeros((5, 2))),\n },\n)\nambig[\"lat\"].attrs = {\n \"bounds\": \"vertices_latitude\",\n \"units\": \"degrees_north\",\n \"standard_name\": \"latitude\",\n \"axis\": \"Y\",\n}\nambig[\"lon\"].attrs = {\n \"bounds\": \"vertices_longitude\",\n \"units\": \"degrees_east\",\n \"standard_name\": \"longitude\",\n \"axis\": \"X\",\n}\nambig[\"vertices_latitude\"].attrs = {\n \"units\": \"degrees_north\",\n}\nambig[\"vertices_longitude\"].attrs = {\n \"units\": \"degrees_east\",\n}\n\n\nvert = xr.Dataset.from_dict(\n {\n \"coords\": {\n \"lat\": {\n \"dims\": (\"lat\",),\n \"attrs\": {\n \"standard_name\": \"latitude\",\n \"axis\": \"Y\",\n \"bounds\": \"lat_bnds\",\n \"units\": \"degrees_north\",\n },\n \"data\": [0.0, 1.0],\n },\n \"lon\": {\n \"dims\": (\"lon\",),\n \"attrs\": {\n \"standard_name\": \"longitude\",\n \"axis\": \"X\",\n \"bounds\": \"lon_bnds\",\n \"units\": \"degrees_east\",\n },\n \"data\": [0.0, 1.0],\n },\n \"lev\": {\n \"dims\": (\"lev\",),\n \"attrs\": {\n \"standard_name\": \"atmosphere_hybrid_sigma_pressure_coordinate\",\n \"formula\": \"p = ap + b*ps\",\n \"formula_terms\": \"ap: ap b: b ps: ps\",\n \"postitive\": \"down\",\n \"axis\": \"Z\",\n \"bounds\": \"lev_bnds\",\n },\n \"data\": [0.0, 1.0],\n },\n \"time\": {\n \"dims\": (\"time\",),\n \"attrs\": {\n \"standard_name\": \"time\",\n \"axis:\": \"T\",\n \"bounds\": \"time_bnds\",\n \"units\": \"days since 1850-01-01\",\n \"calendar\": \"proleptic_gregorian\",\n },\n \"data\": [0.5],\n },\n \"lat_bnds\": {\n \"dims\": (\n \"lat\",\n \"bnds\",\n ),\n \"attrs\": {\n \"units\": \"degrees_north\",\n },\n \"data\": [[0.0, 0.5], [0.5, 1.0]],\n },\n \"lon_bnds\": {\n \"dims\": (\n \"lon\",\n \"bnds\",\n ),\n \"attrs\": {\n \"units\": \"degrees_east\",\n },\n \"data\": [[0.0, 0.5], [0.5, 1.0]],\n },\n \"lev_bnds\": {\n \"dims\": (\n \"lev\",\n \"bnds\",\n ),\n \"attrs\": {\n \"standard_name\": \"atmosphere_hybrid_sigma_pressure_coordinate\",\n \"formula\": \"p = ap + b*ps\",\n \"formula_terms\": \"ap: ap b: b ps: ps\",\n },\n \"data\": [[0.0, 0.5], [0.5, 1.0]],\n },\n \"time_bnds\": {\n \"dims\": (\"time\", \"bnds\"),\n \"attrs\": {\n \"units\": \"days since 1850-01-01\",\n \"calendar\": \"proleptic_gregorian\",\n },\n \"data\": [[0.0, 1.0]],\n },\n \"ap\": {\n \"dims\": (\"lev\",),\n \"data\": [0.0, 0.0],\n },\n \"b\": {\n \"dims\": (\"lev\",),\n \"data\": [1.0, 0.9],\n },\n \"ap_bnds\": {\n \"dims\": (\n \"lev\",\n \"bnds\",\n ),\n \"data\": [[0.0, 0.0], [0.0, 0.0]],\n },\n \"b_bnds\": {\n \"dims\": (\n \"lev\",\n \"bnds\",\n ),\n \"data\": [[1.0, 0.95], [0.95, 0.9]],\n },\n },\n \"dims\": {\"time\": 1, \"lev\": 2, \"lat\": 2, \"lon\": 2, \"bnds\": 2},\n \"data_vars\": {\n \"o3\": {\n \"dims\": (\"time\", \"lev\", \"lat\", \"lon\"),\n \"attrs\": {\n \"cell_methods\": \"area: time: mean\",\n \"cell_measures\": \"area: areacella\",\n \"missing_value\": 1e20,\n \"_FillValue\": 1e20,\n },\n \"data\": np.ones(8, dtype=np.float32).reshape((1, 2, 2, 2)),\n },\n \"areacella\": {\n \"dims\": (\"lat\", \"lon\"),\n \"attrs\": {\n \"standard_name\": \"cell_area\",\n \"cell_methods\": \"area: sum\",\n \"missing_value\": 1e20,\n \"_FillValue\": 1e20,\n },\n \"data\": np.ones(4, dtype=np.float32).reshape((2, 2)),\n },\n \"ps\": {\n \"dims\": (\"time\", \"lat\", \"lon\"),\n \"data\": np.ones(4, dtype=np.float32).reshape((1, 2, 2)),\n },\n },\n }\n)\n" ]
[ [ "numpy.sqrt", "numpy.ones", "numpy.zeros", "numpy.random.randn", "numpy.cos", "numpy.arange", "numpy.prod", "numpy.stack", "numpy.sin", "numpy.linspace" ] ]
m1258218761/p-score
[ "6031d0352561ba3b5baa352645c6cfdf560224f2" ]
[ "Model/data_util.py" ]
[ "# coding=utf-8\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MaxAbsScaler\n\n\nclass data(object):\n def __init__(self, workpath, label_number, features_norm=False, Standardization=False, discretization=0,\n run_model='Train', train_file='', validation_file='', test_file='', discretization_f=[]):\n self.workpath = workpath\n self.train_file = train_file\n self.validation_file = validation_file\n self.test_file = test_file\n self.label_number = label_number\n self.run_model = run_model\n self.Standardization = Standardization ##是否对特征进行标准化处理,True:是,False:否\n self.discretization = discretization ##0:等宽离散,1:等频离散,2:回归,不进行离散化\n self.discretization_f = discretization_f ##等频离散分段阈值,内置变量,自动获取,无需手动添加\n self.features_norm = features_norm ##碱性、质量、螺旋性、疏水性、等电点是否进行/Max了处理,True:进行了处理,False:未进行处理\n if self.features_norm:\n ## t/max\n self.dicB = {'N': 0.8978902953586498, 'Q': 0.9037974683544303, 'D': 0.880168776371308,\n 'Y': 0.8991561181434599, 'W': 0.9118143459915612,\n 'I': 0.889451476793249, 'L': 0.8843881856540085, 'V': 0.880590717299578,\n 'c': 0.8700421940928269, 'P': 0.9046413502109705,\n 'm': 0.9, 'MissV': 0.0, 'K': 0.9358649789029536, 'T': 0.8932489451476793,\n 'A': 0.8708860759493671, 'H': 0.9438818565400844,\n 'R': 1.0, 'C': 0.8700421940928269, 'F': 0.8949367088607595, 'G': 0.8552742616033755,\n 'E': 0.9097046413502109, 'n': 0.8978902953586498,\n 'S': 0.8759493670886076, 'M': 0.9}\n self.dicM = {'V': 0.5323988594046454, 'D': 0.6181608323113273, 'E': 0.6934816714418974,\n 'm': 0.7901759595812781, 'L': 0.6077196985352155,\n 'T': 0.5430355280815122, 'K': 0.6883890580571952, 'n': 0.6181608324456787,\n 'Q': 0.6881935231564403, 'Y': 0.8763108933017181,\n 'A': 0.38175718114350515, 'C': 0.5535767697078718, 'S': 0.4677146889509421,\n 'R': 0.8388955681494804, 'N': 0.6128726786518177,\n 'MissV': 0.0, 'I': 0.6077196985352155, 'H': 0.7365617907241521, 'F': 0.7903533801202285,\n 'M': 0.7042184479690119, 'c': 0.8600131099989604,\n 'G': 0.30643634201293507, 'W': 1.0, 'P': 0.521566650452971}\n self.dicS = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7,\n 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'P': 12, 'Q': 13, 'R': 14,\n 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19,\n 'c': 20, 'm': 21, 'n': 22, 'MissV': -1} # 氨基酸编号\n self.dicHe = {'c': 0.6124031007751938, 'K': 0.6821705426356589, 'H': 0.751937984496124,\n 'P': 0.441860465116279, 'I': 1.0, 'T': 0.8449612403100776,\n 'n': 0.7286821705426356, 'm': 0.945736434108527, 'Q': 0.7441860465116279,\n 'R': 0.7364341085271318, 'Y': 0.8604651162790699,\n 'G': 0.8914728682170542, 'A': 0.9612403100775193, 'N': 0.7286821705426356,\n 'M': 0.945736434108527, 'F': 0.9767441860465116,\n 'E': 0.6589147286821705, 'V': 0.9844961240310077, 'S': 0.7751937984496123,\n 'W': 0.8294573643410853, 'L': 0.9922480620155039,\n 'MissV': 0.0, 'C': 0.6124031007751938, 'D': 0.689922480620155}\n self.dicHy = {'M': 0.646, 'A': 0.032, 'L': 0.952, 'K': -1.0, 'P': -0.984, 'C': 0.5, 'E': -0.3,\n 'S': -0.5700000000000001, 'I': 0.882,\n 'H': -0.9259999999999999, 'Y': 0.4, 'V': 0.604, 'G': -0.662, 'MissV': 0.0,\n 'T': -0.21600000000000003, 'm': 0.646, 'R': -0.554,\n 'N': -0.758, 'c': 0.5, 'W': 0.976, 'F': 1.0, 'Q': -0.5519999999999999, 'n': -0.758,\n 'D': -0.49800000000000005}\n self.dicP = {'H': 0.7053903345724907, 'c': 0.466542750929368, 'T': 0.6068773234200744,\n 'N': 0.5027881040892194, 'V': 0.5548327137546468,\n 'n': 0.5027881040892194, 'R': 1.0, 'S': 0.5278810408921933, 'Q': 0.525092936802974,\n 'K': 0.9052044609665428, 'M': 0.5343866171003717,\n 'E': 0.2992565055762082, 'G': 0.5548327137546468, 'Y': 0.5260223048327137,\n 'P': 0.5855018587360594, 'MissV': 0.0, 'D': 0.2760223048327138,\n 'I': 0.5594795539033457, 'L': 0.5557620817843867, 'C': 0.466542750929368,\n 'W': 0.5473977695167286, 'm': 0.5343866171003717,\n 'F': 0.5092936802973979, 'A': 0.5594795539033457}\n ## t/max\n self.PROTON = 0.005413156628447999\n self.H = 0.0054161046488816296\n self.O = 0.08595751115063499\n self.H2O = self.H * 2 + self.O\n else:\n self.dicB = {'A': 206.4, 'C': 206.2, 'D': 208.6, 'E': 215.6, 'F': 212.1,\n 'G': 202.7, 'H': 223.7, 'I': 210.8, 'K': 221.8, 'L': 209.6,\n 'M': 213.3, 'N': 212.8, 'P': 214.4, 'Q': 214.2, 'R': 237.0,\n 'S': 207.6, 'T': 211.7, 'V': 208.7, 'W': 216.1, 'Y': 213.1,\n 'c': 206.2, 'm': 213.3, 'n': 212.8, 'MissV': 0} # 碱性\n self.dicM = {'A': 71.037114, 'C': 103.009185, 'D': 115.026943, 'E': 129.042593, 'F': 147.068414,\n 'G': 57.021464, 'H': 137.058912, 'I': 113.084064, 'K': 128.094963, 'L': 113.084064,\n 'M': 131.040485, 'N': 114.042927, 'P': 97.052764, 'Q': 128.058578, 'R': 156.101111,\n 'S': 87.032028, 'T': 101.047678, 'V': 99.068414, 'W': 186.079313, 'Y': 163.063329,\n 'c': 160.0306486796, 'm': 147.035399708, 'n': 115.026943025, 'MissV': 0} # 质量\n self.dicS = {'A': 0, 'C': 1, 'D': 2, 'E': 3, 'F': 4, 'G': 5, 'H': 6, 'I': 7,\n 'K': 8, 'L': 9, 'M': 10, 'N': 11, 'P': 12, 'Q': 13, 'R': 14,\n 'S': 15, 'T': 16, 'V': 17, 'W': 18, 'Y': 19,\n 'c': 20, 'm': 21, 'n': 22, 'MissV': -1} # 氨基酸编号\n self.dicHe = {'A': 1.24, 'C': 0.79, 'D': 0.89, 'E': 0.85, 'F': 1.26, 'G': 1.15, 'H': 0.97,\n 'I': 1.29, 'K': 0.88, 'L': 1.28, 'M': 1.22, 'N': 0.94, 'P': 0.57, 'Q': 0.96,\n 'R': 0.95, 'S': 1.00, 'T': 1.09, 'V': 1.27, 'W': 1.07, 'Y': 1.11,\n 'c': 0.79, 'm': 1.22, 'n': 0.94, 'MissV': 0} # 螺旋性\n self.dicHy = {'A': 0.16, 'C': 2.50, 'D': -2.49, 'E': -1.50, 'F': 5.00, 'G': -3.31, 'H': -4.63, 'I': 4.41,\n 'K': -5.00, 'L': 4.76, 'M': 3.23, 'N': -3.79, 'P': -4.92, 'Q': -2.76, 'R': -2.77, 'S': -2.85,\n 'T': -1.08, 'V': 3.02, 'W': 4.88, 'Y': 2.00,\n 'c': 2.50, 'm': 3.23, 'n': -3.79, 'MissV': 0} # 疏水性\n self.dicP = {'A': 6.02, 'C': 5.02, 'D': 2.97, 'E': 3.22, 'F': 5.48, 'G': 5.97, 'H': 7.59, 'I': 6.02,\n 'K': 9.74, 'L': 5.98, 'M': 5.75, 'N': 5.41, 'P': 6.30, 'Q': 5.65, 'R': 10.76, 'S': 5.68,\n 'T': 6.53, 'V': 5.97, 'W': 5.89, 'Y': 5.66,\n 'c': 5.02, 'm': 5.75, 'n': 5.41, 'MissV': 0} # 等电点\n\n self.PROTON = 1.007276466583\n self.H = 1.0078250322\n self.O = 15.9949146221\n self.H2O = self.H * 2 + self.O\n\n self.prev = 1\n self.next = 1\n self.aa2vector = self.AAVectorDict()\n self.AA_idx = dict(zip(\"ACDEFGHIKLMNPQRSTVWY\", range(0, len(self.aa2vector))))\n\n def AAVectorDict(self):\n aa2vector_map = {}\n s = \"ACDEFGHIKLMNPQRSTVWY\"\n v = [0] * len(s)\n v[0] = 1\n for i in range(len(s)):\n aa2vector_map[s[i]] = list(v)\n v[i], v[(i + 1) % 20] = 0, 1\n return aa2vector_map\n\n def get_features_2_norm_105(self, line, fragmentation_window_size=1):\n features_list = []\n line = line.replace('\\n', '').split('\\t')\n peptide = line[0]\n charge = int(line[1])\n b_ion = line[2].split(',')[0]\n y_ion = line[2].split(',')[1]\n peptide_list = []\n for i in range(fragmentation_window_size - 1):\n peptide_list.append('MissV')\n peptide_list.extend(list(peptide))\n for i in range(fragmentation_window_size - 1):\n peptide_list.append('MissV')\n\n # 获取碎裂点左右各fragmentation_window_siez个氨基酸\n len_b = len(b_ion)\n r_l_fragmentation = peptide_list[len_b - 1:len_b - 1 + 2 * fragmentation_window_size]\n\n # 碎裂窗口\n temp_features = [0] * 23 * 2 * fragmentation_window_size\n j = 0\n for i in range(len(r_l_fragmentation)):\n aa_index = self.dicS[r_l_fragmentation[i]]\n if aa_index != -1:\n temp_features[j * 23 + aa_index] = 1\n j += 1\n features_list.extend(temp_features)\n\n # C端,N端肽的身份\n temp_features = [0] * 46\n temp_features[self.dicS[peptide[0]]] = 1\n temp_features[23 + self.dicS[peptide[-1]]] = 1\n features_list.extend(temp_features)\n\n # 碎裂点是否在肽的一端\n if len(b_ion) == 1:\n features_list.extend([1])\n else:\n features_list.extend([0])\n\n # 肽和b/y离子中碱性氨基酸的数量\n features_list.extend([peptide.count('K') + peptide.count('R') + peptide.count('H')])\n features_list.extend([b_ion.count('K') + b_ion.count('R') + b_ion.count('H')])\n features_list.extend([y_ion.count('K') + y_ion.count('R') + y_ion.count('H')])\n\n # 碎裂点距离肽N端和C端的距离\n features_list.extend([len(b_ion)])\n features_list.extend([len(peptide) - len(b_ion)])\n\n # 肽序列的长度\n features_list.extend([len(peptide)])\n\n # 肽带电量\n p_charge = [0] * 5\n p_charge[charge - 1] = 1\n features_list.extend(p_charge)\n\n # 肽的电子迁移率 Charge-Arg-0.5*(His+Lys)\n Mob = charge - peptide_list.count('R') - 0.5 * (peptide_list.count('H') + peptide_list.count('K'))\n features_list.extend([Mob])\n\n # 标签值\n if self.label_number == 2:\n norm_intensity = line[6].split(',')[0:3:2]\n label = [0] * 2\n label = list(map(float, norm_intensity))\n elif self.label_number == 4:\n norm_intensity = line[6].split(',')\n label = [0] * 4\n label = list(map(float, norm_intensity))\n return features_list, label\n ## 105\n\n def label_discretization(self, label, length):\n print('[data processing]start label discretization !')\n if self.discretization == 0: ##等宽离散\n label = np.ceil(np.array(label) * 10).astype(int).tolist() ##区间宽度 0.1\n # label = np.ceil(np.array(label)*20).astype(int).tolist() ##区间宽度 0.05\n if self.discretization == 1: ##等频离散\n class_number = 9\n w = [1.0 * i / class_number for i in range(class_number)]\n p_label = pd.DataFrame(np.array(label).reshape(-1))\n if self.run_model == 'Train':\n _p_label = p_label[p_label[0].isin([0])] ##除开0,对其他数值进行等频离散\n print(_p_label.describe(w))\n w = _p_label.describe(w)[4:4 + class_number + 2]\n p_label = np.array(p_label).reshape(-1)\n w = np.array(w).reshape(-1)\n w[0] = w[0] - 1e-10\n print('标签离散化分段值(单独计算0): ' + str(w))\n self.discretization_f = w\n elif self.run_model == 'Test':\n p_label = np.array(p_label).reshape(-1)\n w = self.discretization_f\n label = pd.cut(p_label, w[:], labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], right=True)\n label = pd.DataFrame(np.array(label))\n label.fillna(0, inplace=True)\n if self.label_number == 2:\n label = np.array(label, dtype=np.int32).reshape(-1, 2)\n else:\n label = np.array(label, dtype=np.int32).reshape(-1, 4)\n label = label.tolist()\n if self.discretization == 2: ## 未进行离散化\n pass\n d_label = []\n start = 0\n for l in length:\n d_label.append(label[start:start + l[0]])\n start += l[0]\n print('[data processing]label discretization end !')\n return d_label\n\n def get_batch(self, data, label, length, batch_size):\n batch_data = []\n batch_label = []\n batch_length = []\n batch_index = []\n print('[data processing]start get batch !')\n data_size = len(data)\n if self.run_model == 'Train':\n p = np.random.permutation(data_size).tolist()\n else:\n p = np.linspace(0, data_size - 1, data_size, dtype=int).tolist()\n batch_count = 0\n end = 0\n start = 0\n while (end + batch_size - data_size) < batch_size:\n # while (end+batch_size) < data_size:\n start = batch_size * batch_count\n batch_count += 1\n end = start + batch_size\n temp_index = p[start:end]\n temp_data = []\n temp_label = []\n temp_length = []\n batch_index.append(temp_index)\n for i in temp_index:\n temp_data.append(data[i])\n temp_label.append(label[i])\n temp_length.extend(length[i])\n batch_data.append(temp_data)\n batch_label.append(temp_label)\n batch_length.append(temp_length)\n print('[data processing]get batch end !')\n return batch_data, batch_label, batch_length, batch_index\n\n def padding(self, batch_data, batch_label, batch_length, batch_size):\n for index in range(len(batch_data)):\n batch_max_len = max(batch_length[index])\n for l in range(len(batch_length[index])):\n if batch_length[index][l] < batch_max_len:\n for ii in range(batch_max_len - batch_length[index][l]):\n batch_data[index][l].append([0.0] * len(batch_data[index][0][0]))\n if self.label_number == 2:\n batch_label[index][l].append([-1, -1])\n else:\n batch_label[index][l].append([-1, -1, -1, -1])\n return batch_data, batch_label, batch_length\n\n def data_Standardization(self, data, length):\n print('[data processing]start data standardization !')\n # data = np.array(data)\n if self.Standardization:\n # scaler = StandardScaler()\n # trans_data = scaler.fit_transform(data)\n scaler = MaxAbsScaler()\n data = scaler.fit_transform(data)\n else:\n # data = data\n pass\n _data = []\n start = 0\n for l in length:\n _data.append(data[start:(start + l[0])])\n start += l[0]\n print('[data processing]data standardization end !')\n return _data\n\n def read_data(self, train_file):\n with open(self.workpath + '/' + train_file) as r:\n _content = []\n _label = []\n _length = []\n line = r.readline()\n while True:\n if not line.strip('\\n'):\n break\n pairs_count = len(line.split('\\t')[0]) - 2\n i = 0\n one_seq_content = []\n one_seq_label = []\n one_seq_length = [pairs_count + 1]\n while i <= pairs_count and line.strip('\\n'):\n features, labels = self.get_features_2_norm_105(line)\n one_seq_content.extend([features])\n one_seq_label.extend([labels])\n line = r.readline()\n i += 1\n _content.extend(one_seq_content)\n _label.extend(one_seq_label)\n _length.append(one_seq_length)\n print('[data processing]read data end !')\n return _content, _label, _length\n\n def GetData(self, batch_size):\n print('[data processing]data processing model: ' + self.run_model)\n if self.run_model == 'Train':\n print('[data processing]Start processing data files!')\n train_content, train_label, train_length = self.read_data(self.train_file)\n validation_content, validation_label, validation_length = self.read_data(self.validation_file)\n train_label = self.label_discretization(train_label, train_length)\n train_content = self.data_Standardization(train_content, train_length)\n validation_label = self.label_discretization(validation_label, validation_length)\n validation_content = self.data_Standardization(validation_content, validation_length)\n train_content, train_label, train_length, batch_index = self.get_batch(train_content, train_label,\n train_length, batch_size)\n validation_content, validation_label, validation_length, v_batch_index = self.get_batch(validation_content,\n validation_label,\n validation_length,\n batch_size)\n train_content, train_label, train_length = self.padding(train_content, train_label, train_length,\n batch_size)\n validation_content, validation_label, validation_length = self.padding(validation_content, validation_label,\n validation_length, batch_size)\n _l = int(len(train_content) * 0.8)\n train_content = train_content[:_l]\n train_label = train_label[:_l]\n train_length = train_length[:_l]\n elif self.run_model == 'Test':\n train_content, train_label, train_length = self.read_data(self.test_file)\n train_label = self.label_discretization(train_label, train_length)\n train_content = self.data_Standardization(train_content, train_length)\n train_content, train_label, train_length, batch_index = self.get_batch(train_content, train_label,\n train_length, batch_size)\n train_content, train_label, train_length = self.padding(train_content, train_label, train_length,\n batch_size)\n validation_content = []\n validation_label = []\n validation_length = []\n return train_content, train_label, train_length, validation_content, validation_label, validation_length\n # format:btach_size,seq_length,features_number\n\n\nif __name__ == '__main__':\n test = data('E:/data/1/test', 2, discretization=0)\n Train_data, Train_label, Train_length, Test_data, Test_label, Test_length = test.GetData(3)\n print(len(Train_data[0][0]))\n print(Train_label[0])\n print(Train_length)\n" ]
[ [ "numpy.random.permutation", "sklearn.preprocessing.MaxAbsScaler", "pandas.cut", "numpy.array", "numpy.linspace" ] ]
maroomir/deep-rl-stock-simple
[ "45101312056c7c7d222c391a875c024b97022809" ]
[ "train.py" ]
[ "import gym\nimport numpy as np\n\nfrom stable_baselines3.ddpg.policies import MlpPolicy\nfrom stable_baselines3.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise\nfrom stable_baselines3 import DDPG\nfrom stable_baselines3.common.monitor import Monitor\n\nfrom env import Stock\n\nfrom utils.callbacks import getBestRewardCallback, logDir\n\nenv = Stock(code=\"005930\", verbose=False)\nenv = Monitor(env, logDir(), allow_early_resets=True)\n\nbestRewardCallback = getBestRewardCallback()\n\n# the noise objects for DDPG\nn_actions = env.action_space.shape[-1]\nparam_noise = None\naction_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\n\nmodel = DDPG(MlpPolicy, env, verbose=1, action_noise=action_noise)\nmodel.learn(total_timesteps=20000, log_interval=100, callback=bestRewardCallback)\n" ]
[ [ "numpy.ones", "numpy.zeros" ] ]
labs15-baseball-pitch-predictor/flask_app
[ "86e63b94d12d309e5080926b5e75333cc09cb2f1" ]
[ "Utilities/pickles_to_csvs.py" ]
[ "## Takes Pickle Files and Exports Them to CSVs\nimport pandas as pd\nsource_path = \"Season_pickles/\"\ndestination_path = \"\"\n\nfilenames = [str(i) + '.pkl' for i in range(2010,2020)]\n\nseasons = ['df_' + str(i) for i in range(10,20)]\n\nseason_dataframes = {}\n\nfor i in list(zip(filenames, seasons)):\n path = source_path + i[0]\n df = pd.read_pickle(path, compression='zip')\n df = df.drop(columns = ['des'])\n season_dataframes[i[1]] = df\n\n\ni = 2010\nfor df in season_dataframes.values():\n path = destination_path + str(i) + \".csv\"\n df.to_csv(path)\n print(path[-16:], '...Done')\n i += 1\n" ]
[ [ "pandas.read_pickle" ] ]
vuiseng9/CalibTIP
[ "69077c92611b079234706784c344e8c9156f3283" ]
[ "models/modules/batch_norm.py" ]
[ "import torch\r\nimport torch.nn as nn\r\nfrom torch.nn import BatchNorm1d as _BatchNorm1d\r\nfrom torch.nn import BatchNorm2d as _BatchNorm2d\r\nfrom torch.nn import BatchNorm3d as _BatchNorm3d\r\n\r\n\"\"\"\r\nBatchNorm variants that can be disabled by removing all parameters and running stats\r\n\"\"\"\r\n\r\n\r\ndef has_running_stats(m):\r\n return getattr(m, 'running_mean', None) is not None\\\r\n or getattr(m, 'running_var', None) is not None\r\n\r\n\r\ndef has_parameters(m):\r\n return getattr(m, 'weight', None) is not None\\\r\n or getattr(m, 'bias', None) is not None\r\n\r\n\r\nclass BatchNorm1d(_BatchNorm1d):\r\n def forward(self, inputs):\r\n if not (has_parameters(self) or has_running_stats(self)):\r\n return inputs\r\n return super(BatchNorm1d, self).forward(inputs)\r\n\r\n\r\nclass BatchNorm2d(_BatchNorm2d):\r\n def forward(self, inputs):\r\n if not (has_parameters(self) or has_running_stats(self)):\r\n return inputs\r\n return super(BatchNorm2d, self).forward(inputs)\r\n\r\n\r\nclass BatchNorm3d(_BatchNorm3d):\r\n def forward(self, inputs):\r\n if not (has_parameters(self) or has_running_stats(self)):\r\n return inputs\r\n return super(BatchNorm3d, self).forward(inputs)\r\n\r\n\r\nclass MeanBatchNorm2d(nn.BatchNorm2d):\r\n \"\"\"BatchNorm with mean-only normalization\"\"\"\r\n\r\n def __init__(self, num_features, momentum=0.1, bias=True):\r\n nn.Module.__init__(self)\r\n self.register_buffer('running_mean', torch.zeros(num_features))\r\n self.momentum = momentum\r\n self.num_features = num_features\r\n if bias:\r\n self.bias = nn.Parameter(torch.zeros(num_features))\r\n else:\r\n self.register_parameter('bias', None)\r\n\r\n def forward(self, x):\r\n if not (has_parameters(self) or has_running_stats(self)):\r\n return x\r\n if self.training:\r\n numel = x.size(0) * x.size(2) * x.size(3)\r\n mean = x.sum((0, 2, 3)) / numel\r\n with torch.no_grad():\r\n self.running_mean.mul_(self.momentum)\\\r\n .add_(1 - self.momentum, mean)\r\n else:\r\n mean = self.running_mean\r\n if self.bias is not None:\r\n mean = mean - self.bias\r\n return x - mean.view(1, -1, 1, 1)\r\n\r\n def extra_repr(self):\r\n return '{num_features}, momentum={momentum}, bias={has_bias}'.format(\r\n has_bias=self.bias is not None, **self.__dict__)\r\n" ]
[ [ "torch.zeros", "torch.no_grad", "torch.nn.Module.__init__" ] ]
aivclab/vision
[ "6c644dd72f68bca608a2900e5d9461e90fe841eb" ]
[ "neodroidvision/classification/procedures/classification_procedures.py" ]
[ "import copy\nimport numpy\nimport string\nimport time\nimport torch\nimport tqdm\nfrom draugr.numpy_utilities import Split\nfrom draugr.python_utilities import (\n rgb_drop_alpha_batch_nhwc,\n torch_vision_normalize_batch_nchw,\n)\nfrom draugr.torch_utilities import (\n TorchEvalSession,\n TorchTrainSession,\n global_torch_device,\n to_tensor,\n uint_nhwc_to_nchw_float_batch,\n)\nfrom draugr.visualisation import confusion_matrix_plot\nfrom matplotlib import pyplot\nfrom munin.generate_report import ReportEntry, generate_html, generate_pdf\nfrom munin.utilities.html_embeddings import generate_math_html, plt_html\nfrom pathlib import Path\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\nfrom warg import NOD\n\n__all__ = [\"test_model\", \"pred_target_train_model\"]\n\n\ndef test_model(model, data_iterator, latest_model_path, num_columns: int = 2):\n model = model.eval().to(global_torch_device())\n\n inputs, labels = next(data_iterator)\n\n inputs = inputs.to(global_torch_device())\n labels = labels.to(global_torch_device())\n with torch.no_grad():\n pred = model(inputs)\n\n y_pred = pred.data.to(\"cpu\").numpy()\n y_pred_max = numpy.argmax(y_pred, axis=-1)\n accuracy_w = accuracy_score(labels, y_pred_max)\n precision_a, recall_a, fscore_a, support_a = precision_recall_fscore_support(\n labels, y_pred_max\n )\n precision_w, recall_w, fscore_w, support_w = precision_recall_fscore_support(\n labels, y_pred_max, average=\"weighted\"\n )\n\n _, predicted = torch.max(pred, 1)\n\n truth_labels = labels.data.to(\"cpu\").numpy()\n\n input_images_rgb = [\n default_torch_retransform(x) for x in inputs.to(global_torch_device())\n ]\n\n cell_width = (800 / num_columns) - 6 - 6 * 2\n\n pyplot.plot(numpy.random.random((3, 3)))\n\n alphabet = string.ascii_lowercase\n class_names = numpy.array([*alphabet])\n\n samples = len(y_pred)\n predictions = [\n [None for _ in range(num_columns)] for _ in range(samples // num_columns)\n ]\n for i, a, b, c in zip(range(samples), input_images_rgb, y_pred_max, truth_labels):\n pyplot.imshow(a)\n if b == c:\n outcome = \"tp\"\n else:\n outcome = \"fn\"\n\n gd = ReportEntry(\n name=i,\n figure=plt_html(a, format=\"jpg\", size=(cell_width, cell_width)),\n prediction=class_names[b],\n truth=class_names[c],\n outcome=outcome,\n explanation=None,\n )\n\n predictions[i // num_columns][i % num_columns] = gd\n\n cfmat = confusion_matrix_plot(y_pred_max, truth_labels, class_names)\n\n title = \"Classification Report\"\n model_name = latest_model_path\n confusion_matrix = plt_html(cfmat, format=\"png\", size=(800, 800))\n\n accuracy = generate_math_html(\"\\dfrac{tp+tn}{N}\"), None, accuracy_w\n precision = generate_math_html(\"\\dfrac{tp}{tp+fp}\"), precision_a, precision_w\n recall = generate_math_html(\"\\dfrac{tp}{tp+fn}\"), recall_a, recall_w\n f1_score = (\n generate_math_html(\"2*\\dfrac{precision*recall}{precision+recall}\"),\n fscore_a,\n fscore_w,\n )\n support = generate_math_html(\"N_{class_truth}\"), support_a, support_w\n metrics = NOD.nod_of(\n accuracy, precision, f1_score, recall, support\n ).as_flat_tuples()\n\n bundle = NOD.nod_of(title, model_name, confusion_matrix, metrics, predictions)\n\n file_name = Path(title.lower().replace(\" \", \"_\"))\n\n generate_html(file_name.with_suffix(\".html\"), **bundle)\n generate_pdf(file_name.with_suffix(\".html\"), file_name.with_suffix(\".pdf\"))\n\n # plot_utilities.plot_prediction(input_images_rgb, truth_labels, predicted, pred)\n # pyplot.show()\n\n\ndef pred_target_train_model(\n model,\n train_iterator,\n criterion,\n optimizer,\n scheduler,\n writer,\n interrupted_path,\n test_data_iterator=None,\n num_updates: int = 250000,\n early_stop=None,\n) -> torch.nn.Module:\n \"\"\"\n\n Args:\n model:\n train_iterator:\n criterion:\n optimizer:\n scheduler:\n writer:\n interrupted_path:\n test_data_iterator:\n num_updates:\n early_stop:\n\n Returns:\n\n \"\"\"\n best_model_wts = copy.deepcopy(model.state_dict())\n best_val_loss = 1e10\n since = time.time()\n\n try:\n sess = tqdm.tqdm(range(num_updates), leave=False, disable=False)\n val_loss = 0\n update_loss = 0\n val_acc = 0\n last_val = None\n last_out = None\n with torch.autograd.detect_anomaly():\n for update_i in sess:\n for phase in [Split.Training, Split.Validation]:\n if phase == Split.Training:\n with TorchTrainSession(model):\n\n input, true_label = zip(*next(train_iterator))\n\n rgb_imgs = torch_vision_normalize_batch_nchw(\n uint_nhwc_to_nchw_float_batch(\n rgb_drop_alpha_batch_nhwc(to_tensor(input))\n )\n )\n true_label = to_tensor(true_label, dtype=torch.long)\n optimizer.zero_grad()\n\n pred = model(rgb_imgs)\n loss = criterion(pred, true_label)\n loss.backward()\n optimizer.step()\n\n if last_out is None:\n last_out = pred\n else:\n if not torch.dist(last_out, pred) > 0:\n print(f\"Same output{last_out},{pred}\")\n last_out = pred\n\n update_loss = loss.data.cpu().numpy()\n writer.scalar(f\"loss/train\", update_loss, update_i)\n\n if scheduler:\n scheduler.step()\n elif test_data_iterator:\n with TorchEvalSession(model):\n test_rgb_imgs, test_true_label = zip(*next(train_iterator))\n test_rgb_imgs = torch_vision_normalize_batch_nchw(\n uint_nhwc_to_nchw_float_batch(\n rgb_drop_alpha_batch_nhwc(to_tensor(test_rgb_imgs))\n )\n )\n\n test_true_label = to_tensor(\n test_true_label, dtype=torch.long\n )\n\n with torch.no_grad():\n val_pred = model(test_rgb_imgs)\n val_loss = criterion(val_pred, test_true_label)\n\n _, cat = torch.max(val_pred, -1)\n val_acc = torch.sum(cat == test_true_label) / float(\n cat.size(0)\n )\n writer.scalar(f\"loss/acc\", val_acc, update_i)\n writer.scalar(f\"loss/val\", val_loss, update_i)\n\n if last_val is None:\n last_val = cat\n else:\n if all(last_val == cat):\n print(f\"Same val{last_val},{cat}\")\n last_val = cat\n\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n\n best_model_wts = copy.deepcopy(model.state_dict())\n sess.write(\n f\"New best validation model at update {update_i} with test_loss {best_val_loss}\"\n )\n torch.save(model.state_dict(), interrupted_path)\n\n if early_stop is not None and val_pred < early_stop:\n break\n sess.set_description_str(\n f\"Update {update_i} - {phase} \"\n f\"update_loss:{update_loss:2f} \"\n f\"test_loss:{val_loss}\"\n f\"val_acc:{val_acc}\"\n )\n\n except KeyboardInterrupt:\n print(\"Interrupt\")\n finally:\n pass\n\n model.load_state_dict(best_model_wts) # load best model weights\n\n time_elapsed = time.time() - since\n print(f\"{time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s\")\n print(f\"Best val loss: {best_val_loss:3f}\")\n\n return model\n" ]
[ [ "torch.sum", "sklearn.metrics.precision_recall_fscore_support", "torch.autograd.detect_anomaly", "torch.no_grad", "numpy.argmax", "numpy.random.random", "sklearn.metrics.accuracy_score", "matplotlib.pyplot.imshow", "torch.dist", "torch.max", "numpy.array" ] ]
MolecularAI/reinvent-scoring
[ "f7e052ceeffd29e17e1672c33607189873c82a45" ]
[ "reinvent_scoring/scoring/score_components/synthetic_accessibility/sas_component.py" ]
[ "import pickle\nfrom typing import List\n\nimport numpy as np\nfrom rdkit.Chem import Mol\nfrom rdkit.Chem.Descriptors import ExactMolWt\n\nfrom reinvent_chemistry import Descriptors\n\nfrom reinvent_scoring.scoring.component_parameters import ComponentParameters\nfrom reinvent_scoring.scoring.score_components import BaseScoreComponent\nfrom reinvent_scoring.scoring.score_components.synthetic_accessibility.sascorer import calculateScore\nfrom reinvent_scoring.scoring.score_summary import ComponentSummary\n\n\nclass SASComponent(BaseScoreComponent):\n def __init__(self, parameters: ComponentParameters):\n super().__init__(parameters)\n self.activity_model = self._load_model(parameters)\n self._descriptors = Descriptors()\n self.fp_parameters = dict(\n radius=3,\n size=4096, # Descriptors class calls this parameter \"size\", RDKit calls it \"nBits\".\n use_features=False, # RDKit has False as default, Descriptors class has True.\n )\n\n def calculate_score(self, molecules: List[Mol], step=-1) -> ComponentSummary:\n score = self.predict_from_molecules(molecules)\n score_summary = ComponentSummary(total_score=score, parameters=self.parameters)\n return score_summary\n\n def predict_from_molecules(self, molecules: List[Mol]) -> np.ndarray:\n if len(molecules) == 0:\n return np.array([])\n\n descriptors = self._calculate_descriptors(molecules)\n\n # Normally, predict_proba takes a 2d array, one row per observation,\n # but a list of 1d arrays works too.\n sas_predictions = self.activity_model.predict_proba(descriptors)\n\n return sas_predictions[:, 1]\n\n def _load_model(self, parameters: ComponentParameters):\n try:\n # TODO: in the future should use self.component_specific_parameters.MODEL_PATH\n # model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, \"\")\n model_path = self.parameters.specific_parameters.get(\"saz_model_path\", \"\")\n activity_model = self._load_scikit_model(model_path)\n except:\n # model_path = self.parameters.specific_parameters.get(self.component_specific_parameters.MODEL_PATH, \"\")\n model_path = self.parameters.specific_parameters.get(\"saz_model_path\", \"\")\n raise Exception(f\"The loaded file `{model_path}` isn't a valid scikit-learn model\")\n return activity_model\n\n def _load_scikit_model(self, model_path: str):\n with open(model_path, \"rb\") as f:\n scikit_model = pickle.load(f)\n return scikit_model\n\n def _calculate_descriptors(self, molecules: List[Mol]) -> List[np.ndarray]:\n descriptors = [self._sas_descriptor(mol) for mol in molecules]\n return descriptors\n\n def _sas_descriptor(self, mol: Mol) -> np.ndarray:\n \"\"\"Returns SAS descriptor for a molecule, to be used as input to SAS model.\n\n SAS descriptor consists of three parts:\n 1. SA score by Ertl and Schuffenhauer (Novartis), part of RDKit, copied to this repo.\n 2. Molecular weight.\n 3. Morgan fingerprint, with counts (ECFP6).\n\n The three parts are concatenated into one 1d numpy array.\n \"\"\"\n\n sascore = calculateScore(mol)\n molwt = ExactMolWt(mol)\n fp = self._fingerprint(mol)\n\n descriptor = np.concatenate([[sascore], [molwt], fp])\n\n return descriptor\n\n def _fingerprint(self, mol: Mol) -> np.ndarray:\n fps = self._descriptors.molecules_to_count_fingerprints([mol], parameters=self.fp_parameters)\n return fps[0]\n" ]
[ [ "numpy.array", "numpy.concatenate" ] ]
cta-observatory/cta-lstchain
[ "d2d22567a6d0028737dd3b663f45c6206e43c437" ]
[ "lstchain/visualization/plot_calib.py" ]
[ "from matplotlib import pyplot as plt\nfrom ctapipe.visualization import CameraDisplay\nimport numpy as np\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom ctapipe_io_lst import load_camera_geometry\nfrom ctapipe.coordinates import EngineeringCameraFrame\n\n# read back the monitoring containers written with the tool calc_camera_calibration.py\nfrom ctapipe.containers import (\n FlatFieldContainer,\n WaveformCalibrationContainer,\n PedestalContainer,\n PixelStatusContainer,\n)\n\nfrom ctapipe.io.hdf5tableio import HDF5TableReader\n\n__all__ = [\"read_file\", \"plot_all\"]\n\nff_data = FlatFieldContainer()\nped_data = PedestalContainer()\ncalib_data = WaveformCalibrationContainer()\nstatus_data = PixelStatusContainer()\n\nchannel = [\"HG\", \"LG\"]\n\nplot_dir = \"none\"\n\n\ndef read_file(file_name, tel_id=1):\n \"\"\"\n read camera calibration quantities\n\n Parameters\n ----------\n file_name: calibration hdf5 file\n\n tel_id: telescope id\n \"\"\"\n with HDF5TableReader(file_name) as h5_table:\n assert h5_table._h5file.isopen == True\n\n table = f\"/tel_{tel_id}/flatfield\"\n next(h5_table.read(table, ff_data))\n table = f\"/tel_{tel_id}/calibration\"\n next(h5_table.read(table, calib_data))\n table = f\"/tel_{tel_id}/pedestal\"\n next(h5_table.read(table, ped_data))\n table = f\"/tel_{tel_id}/pixel_status\"\n next(h5_table.read(table, status_data))\n\n\ndef plot_all(ped_data, ff_data, calib_data, run=0, plot_file=None):\n \"\"\"\n plot camera calibration quantities\n\n Parameters\n ----------\n ped_data: pedestal container PedestalContainer()\n\n ff_data: flat-field container FlatFieldContainer()\n\n calib_data: calibration container WaveformCalibrationContainer()\n\n run: run number\n\n plot_file: name of the output PDF file. No file is produced if name is not provided\n\n \"\"\"\n # read geometry\n camera = load_camera_geometry()\n camera = camera.transform_to(EngineeringCameraFrame())\n\n # plot open pdf\n if plot_file is not None:\n with PdfPages(plot_file) as pdf:\n\n plt.rc(\"font\", size=15)\n\n # first figure\n fig = plt.figure(1, figsize=(12, 24))\n plt.tight_layout()\n fig.suptitle(f\"Run {run}\", fontsize=25)\n pad = 420\n image = ff_data.charge_median\n mask = ff_data.charge_median_outliers\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n # disp.axes.text(lposx, 0, f'{channel[chan]} signal charge (ADC)', rotation=90)\n plt.title(f\"{channel[chan]} signal charge [ADC]\")\n disp.add_colorbar()\n\n image = ff_data.charge_std\n mask = ff_data.charge_std_outliers\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n # disp.axes.text(lposx, 0, f'{channel[chan]} signal std [ADC]', rotation=90)\n plt.title(f\"{channel[chan]} signal std [ADC]\")\n disp.add_colorbar()\n\n image = ped_data.charge_median\n mask = ped_data.charge_median_outliers\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n # disp.axes.text(lposx, 0, f'{channel[chan]} pedestal [ADC]', rotation=90)\n plt.title(f\"{channel[chan]} pedestal [ADC]\")\n disp.add_colorbar()\n\n image = ped_data.charge_std\n mask = ped_data.charge_std_outliers\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n # disp.axes.text(lposx, 0, f'{channel[chan]} pedestal std [ADC]', rotation=90)\n plt.title(f\"{channel[chan]} pedestal std [ADC]\")\n disp.add_colorbar()\n\n plt.subplots_adjust(top=0.92)\n\n pdf.savefig()\n plt.close()\n\n # second figure\n fig = plt.figure(2, figsize=(12, 24))\n plt.tight_layout()\n fig.suptitle(f\"Run {run}\", fontsize=25)\n pad = 420\n\n # time\n image = ff_data.time_median\n mask = ff_data.time_median_outliers\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n # disp.axes.text(lposx, 0, f'{channel[chan]} time', rotation=90)\n plt.title(f\"{channel[chan]} time\")\n disp.add_colorbar()\n\n image = ff_data.relative_gain_median\n mask = calib_data.unusable_pixels\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n disp.highlight_pixels(mask[chan], linewidth=2)\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.image = image[chan]\n disp.cmap = plt.cm.coolwarm\n disp.set_limits_minmax(0.7, 1.3)\n plt.title(f\"{channel[chan]} relative signal\")\n # disp.axes.text(lposx, 0, f'{channel[chan]} relative gain', rotation=90)\n disp.add_colorbar()\n\n # pe\n image = calib_data.n_pe\n mask = calib_data.unusable_pixels\n image = np.where(np.isnan(image), 0, image)\n for chan in np.arange(2):\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n disp = CameraDisplay(camera)\n disp.highlight_pixels(mask[chan], linewidth=2)\n disp.image = image[chan]\n mymin = np.median(image[chan]) - 2 * np.std(image[chan])\n mymax = np.median(image[chan]) + 2 * np.std(image[chan])\n disp.set_limits_minmax(mymin, mymax)\n disp.cmap = plt.cm.coolwarm\n plt.title(f\"{channel[chan]} photon-electrons\")\n # disp.axes.text(lposx, 0, f'{channel[chan]} photon-electrons', rotation=90)\n disp.add_colorbar()\n\n # pe histogram\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n for chan in np.arange(2):\n n_pe = calib_data.n_pe[chan]\n # select good pixels\n select = np.logical_not(mask[chan])\n median = int(np.median(n_pe[select]))\n rms = np.std(n_pe[select])\n mymin = median - 4 * rms\n mymax = median + 4 * rms\n label = f\"{channel[chan]} Median {median:3.2f}, std {rms:5.2f}\"\n plt.hist(\n n_pe[select],\n label=label,\n histtype=\"step\",\n range=(mymin, mymax),\n bins=50,\n stacked=True,\n alpha=0.5,\n fill=True,\n )\n plt.legend()\n plt.xlabel(f\"pe\", fontsize=20)\n plt.ylabel(\"pixels\", fontsize=20)\n\n # pe scatter plot\n pad += 1\n plt.subplot(pad)\n plt.tight_layout()\n HG = calib_data.n_pe[0]\n LG = calib_data.n_pe[1]\n HG = np.where(np.isnan(HG), 0, HG)\n LG = np.where(np.isnan(LG), 0, LG)\n mymin = np.median(LG) - 2 * np.std(LG)\n mymax = np.median(LG) + 2 * np.std(LG)\n plt.hist2d(LG, HG, bins=[100, 100])\n plt.xlabel(\"LG\", fontsize=20)\n plt.ylabel(\"HG\", fontsize=20)\n\n x = np.arange(mymin, mymax)\n plt.plot(x, x)\n plt.ylim(mymin, mymax)\n plt.xlim(mymin, mymax)\n plt.subplots_adjust(top=0.92)\n\n pdf.savefig()\n plt.close()\n\n # figures 3 and 4: histograms\n for chan in np.arange(2):\n n_pe = calib_data.n_pe[chan]\n\n gain_median = ff_data.relative_gain_median[chan]\n # charge_median = ff_data.charge_median[chan]\n charge_mean = ff_data.charge_mean[chan]\n charge_std = ff_data.charge_std[chan]\n # median_ped = ped_data.charge_median[chan]\n mean_ped = ped_data.charge_mean[chan]\n ped_std = ped_data.charge_std[chan]\n dc_to_pe = calib_data.dc_to_pe[chan]\n time_correction = calib_data.time_correction[chan]\n\n # select good pixels\n select = np.logical_not(mask[chan])\n fig = plt.figure(chan + 10, figsize=(12, 24))\n fig.tight_layout(rect=[0, 0.0, 1, 0.95])\n\n fig.suptitle(f\"Run {run} channel: {channel[chan]}\", fontsize=25)\n\n # charge\n plt.subplot(421)\n plt.tight_layout()\n median = int(np.median(charge_mean[select]))\n rms = np.std(charge_mean[select])\n label = f\"Median {median:3.2f}, std {rms:5.0f}\"\n plt.xlabel(\"charge (ADC)\", fontsize=20)\n plt.ylabel(\"pixels\", fontsize=20)\n plt.hist(charge_mean[select], bins=50, label=label)\n plt.legend()\n\n plt.subplot(422)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"charge std\", fontsize=20)\n median = np.median(charge_std[select])\n rms = np.std(charge_std[select])\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(charge_std[select], bins=50, label=label)\n plt.legend()\n\n # pedestal charge\n plt.subplot(423)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"pedestal\", fontsize=20)\n median = np.median(mean_ped[select])\n rms = np.std(mean_ped[select])\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(mean_ped[select], bins=50, label=label)\n plt.legend()\n\n # pedestal std\n plt.subplot(424)\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"pedestal std\", fontsize=20)\n median = np.median(ped_std[select])\n rms = np.std(ped_std[select])\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(ped_std[select], bins=50, label=label)\n plt.legend()\n\n # relative gain\n plt.subplot(425)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"relative signal\", fontsize=20)\n median = np.median(gain_median[select])\n rms = np.std(gain_median[select])\n label = f\"Relative gain {median:3.2f}, std {rms:5.2f}\"\n plt.hist(gain_median[select], bins=50, label=label)\n plt.legend()\n\n # photon electrons\n plt.subplot(426)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"time corrections [ns]\", fontsize=20)\n median = np.median(time_correction[select])\n rms = np.std(time_correction[select])\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(time_correction[select].value, bins=50, label=label)\n plt.legend()\n\n plt.subplots_adjust(top=0.92)\n # photon electrons\n plt.subplot(427)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"pe\", fontsize=20)\n median = np.median(n_pe[select])\n rms = np.std(n_pe[select])\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(n_pe[select], bins=50, label=label)\n plt.legend()\n plt.subplots_adjust(top=0.92)\n\n # gain\n plt.subplot(428)\n plt.tight_layout()\n plt.ylabel(\"pixels\", fontsize=20)\n plt.xlabel(\"flat-fielded gain [ADC/pe]\", fontsize=20)\n denominator = dc_to_pe[select]\n numerator = 1.\n\n gain = np.divide(numerator, denominator, out=np.zeros_like(denominator), where=denominator != 0)\n median = np.median(gain)\n rms = np.std(gain)\n label = f\"Median {median:3.2f}, std {rms:3.2f}\"\n plt.hist(gain, bins=50, label=label)\n plt.legend()\n plt.subplots_adjust(top=0.92)\n\n\n pdf.savefig(plt.gcf())\n plt.close()\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.gcf", "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "numpy.logical_not", "matplotlib.pyplot.hist", "numpy.isnan", "matplotlib.pyplot.rc", "numpy.median", "numpy.arange", "matplotlib.pyplot.hist2d", "matplotlib.pyplot.close", "matplotlib.pyplot.ylim", "numpy.std", "numpy.zeros_like", "matplotlib.pyplot.legend", "matplotlib.pyplot.subplot", "matplotlib.pyplot.xlabel" ] ]
AndyLiu11037/card-detector
[ "c4917f0235b623cb439e91a5e74d4acd08d03887" ]
[ "object_webcam.py" ]
[ "import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)\nimport pathlib\nimport tensorflow as tf\nimport sys\ntf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)\n\nimport time\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\nfrom object_detection.utils import config_util\nimport numpy as np\nfrom PIL import Image\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport warnings\nimport cv2\nwarnings.filterwarnings('ignore') # Suppress Matplotlib warnings\nmatplotlib.use('TKAgg', force=True)\n\n# Enable GPU dynamic memory allocation\ngpus = tf.config.experimental.list_physical_devices('GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nPATH_TO_SAVED_MODEL = os.path.join(os.getcwd(),\"my_model_2\")\nprint('Loading model...', end='')\n\nstart_time = time.time()\ncategory_index = label_map_util.create_category_index_from_labelmap(os.path.join(os.getcwd(),\"labelmap.pbtxt\"),\n use_display_name=True)\n# Load pipeline config and build a detection model\nconfigs = config_util.get_configs_from_pipeline_file(PATH_TO_SAVED_MODEL+\"/pipeline.config\")\nmodel_config = configs['model']\ndetection_model = model_builder.build(model_config=model_config, is_training=False)\n# Load saved model and build the detection function\nckpt = tf.compat.v2.train.Checkpoint(model=detection_model)\nckpt.restore(os.path.join(PATH_TO_SAVED_MODEL+\"/checkpoint\", 'ckpt-0')).expect_partial()\n \nend_time = time.time()\nelapsed_time = end_time - start_time\nprint('Done! Took {} seconds'.format(elapsed_time))\n\ndef detect_fn(image):\n \"\"\"Detect objects in image.\"\"\"\n\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])\n\ndef load_image_into_numpy_array(path):\n \"\"\"Load an image from file into a numpy array.\n\n Puts image into numpy array to feed into tensorflow graph.\n Note that by convention we put it into a numpy array with shape\n (height, width, channels), where channels=3 for RGB.\n\n Args:\n path: the file path to the image\n\n Returns:\n uint8 numpy array with shape (img_height, img_width, 3)\n \"\"\"\n return np.array(Image.open(path))\n\nprint('Running inference for video source... ', end='')\n\nvideo = cv2.VideoCapture(0) #depending on which webcame/videosource you want 0 is default\n\nwhile True:\n ret, image_np = video.read()\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n detections, predictions_dict, shapes = detect_fn(input_tensor)\n\n # input_tensor = np.expand_dims(image_np, 0)\n\n label_id_offset = 1\n image_np_with_detections = image_np.copy()\n viz_utils.visualize_boxes_and_labels_on_image_array(\n image_np_with_detections,\n detections['detection_boxes'][0].numpy(),\n (detections['detection_classes'][0].numpy() + label_id_offset).astype(int),\n detections['detection_scores'][0].numpy(),\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=200,\n min_score_thresh=.30,\n agnostic_mode=False)\n cv2.imshow('object detection', cv2.resize(image_np_with_detections, (800, 600)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\nvideo.release()\ncv2.destroyAllWindows()\n" ]
[ [ "tensorflow.config.experimental.set_memory_growth", "tensorflow.get_logger", "tensorflow.reshape", "numpy.expand_dims", "matplotlib.use", "tensorflow.config.experimental.list_physical_devices", "tensorflow.compat.v2.train.Checkpoint" ] ]
gpftc/qfunction
[ "5c3ceed0e270d343d51ee0b69d98d4fffad47b24" ]
[ ".history/qfunction/quantum/quantum_circuit_20210710225508.py" ]
[ "########################\n## Author: Reinan Br. ##\n## Date_init:01/07/21 ##\n########################\nfrom qfunction import q_sin,q_exp,q_cos,radian,limit\nimport numpy as np\nfrom numpy import sqrt,array\nfrom math import atan\n############ quantum equations #############\ndef q_rho(u,q=1,cpx=False,israd=True):\n\tu = radian(u) if(not israd) else u\n\tu = u*1j\n\tif cpx:\n\t\treturn sqrt(q_exp(u,q)*q_exp(-u,q))\n\telse:\n\t\treturn (sqrt(q_exp(u,q)*q_exp(-u,q))).real\n\n\n\ndef q_psi(theta,gamma,q=1,israd=True):\n import numpy as np\n gamma = radian(gamma) if(not israd) else gamma\n theta = radian(theta) if(not israd) else theta\n theta = 1j*theta/2\n gamma = gamma/2\n gamma = gamma if abs(gamma) <= 2*np.pi else np.nan\n theta = theta if abs(theta) <= np.pi else np.nan\n coluna0 = [q_exp(-theta,q).real*q_cos(gamma,q).real]\n coluna1 = [q_exp(theta,q).real*q_sin(gamma,q).real]\n return array([coluna0,coluna1])\n\n\ndef q_phi(u,q=1,israd=True):\n\tu = radian(u) if(not israd) else u\n\tparam_q = lambda q_: 1/(1-q_)\n\tparam_q = limit(param_q,q)\n\treturn param_q*atan((1-q)*u)\n\n\ndef q_qubit(state):\n pass\n\n\nfrom IPython.display import Math,display\nclass QuantumCircuit:\n def __init__(self,n_bits,q: float=1,israd=True)-> None:\n import numpy as np\n self.q = q\n self.theta = .5*np.pi\n self.israd = israd\n self.n_bits = n_bits\n circuit_painel = []\n for i in range(n_bits):\n circuit_painel.append(f'[q{i}]--')\n self.circuit_painel = circuit_painel\n q_qubits = []\n for i in range(n_bits):\n q_qubits.append(q_psi(gamma=0.5*np.pi,theta=1*np.pi,q=self.q))\n self.q_qubits = np.array(q_qubits)\n #print(self.q_qubits)\n self.divisor_circuits = False\n #creating a list for probabilistic\n self.probs_history = []\n \n \n def get_prob_from_qbits(self,qbits):\n #print(qbits)\n q_probs = []\n i = 0\n for qbit in qbits:\n qbit_prob = {}\n total_state = abs(qbit[0]) + abs(qbit[1])\n percent_zero_state = abs(qbit[0])/total_state\n percent_one_state = abs(qbit[1])/total_state\n \n #print(f'prob[{i}]: [{percent_zero_state},{percent_one_state}]')\n qbit_prob['alloc'] = i\n #print(f'[q{i}] {qbit}')\n qbit_prob['name_qbit'] = f'q{i}'\n qbit_prob['|0>'] = percent_zero_state\n qbit_prob['|1>'] = percent_one_state\n qbit_prob['state'] = '|0>' if qbit_prob['|0>']>=qbit_prob['|1>'] else '|1>'\n q_probs.append(qbit_prob)\n #print(q_probs[i])\n i+=1\n #print(f'qprobs: {q_probs}')\n return q_probs\n\n \n \n \n def R_x(self):\n theta = self.theta\n q = self.q\n israd = self.israd\n theta = theta if israd else radian(theta)\n theta = theta/2\n return np.array([[q_cos(u=theta,q=q,israd=israd),\n q_sin(u=theta,q=q)*-1j],\n [q_sin(theta,q=q,israd=israd)*-1j,\n q_cos(u=theta,q=q,israd=israd)]\n ])\n\n\n def R_y(self):\n theta = self.theta\n q = self.q\n israd = self.israd\n theta = theta if israd else radian(theta)\n theta = theta/2\n return np.array([[q_cos(u=theta,q=q,israd=israd),\n q_sin(u=theta,q=q,israd=israd)*-1],\n [q_sin(theta,q=q,israd=israd),\n q_cos(u=theta,q=q,israd=israd)]\n ]).real\n\n\n def R_z(self):\n theta = self.theta\n q = self.q\n israd = self.israd\n theta = theta if israd else radian(theta)\n theta = 1j*theta/2\n return np.array([[q_exp(u=-theta,q=q),0],\n [0,q_exp(u=theta,q=q)]]).real\n\n\n def H(self,*n_bits):\n str_cir = '--[H]--'\n str_nan = '-------'\n circuit_painel = self.circuit_painel\n i = 0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n line = line+str_cir\n new_circuit.append(line)\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n \n theta = self.theta\n q = self.q\n israd = self.israd\n R_x = self.R_x\n R_y = self.R_y\n \n H_matriz = (R_x()+R_y()).real/np.sqrt(2)\n \n new_q_qubits = []\n q_qubits = self.q_qubits\n i = 0\n for qbit in q_qubits:\n if i in n_bits:\n qbit = np.dot(H_matriz,qbit)\n new_q_qubits.append(qbit)\n else:\n qbit = qbit\n new_q_qubits.append(qbit)\n i+=1\n self.q_qubits = new_q_qubits\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n \n \n def X(self,*n_bits):\n str_cir = '--[X]--'\n str_nan = '-------'\n circuit_painel = self.circuit_painel\n i = 0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n line = line+str_cir\n new_circuit.append(line)\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n \n theta = self.theta\n q = self.q\n israd = self.israd\n R_x = self.R_x\n new_q_qubits = []\n q_qubits = self.q_qubits\n i = 0\n for qbit in q_qubits:\n if i in n_bits:\n qbit = np.dot(R_x(),qbit)\n new_q_qubits.append(qbit)\n else:\n qbit = qbit\n new_q_qubits.append(qbit)\n i+=1\n self.q_qubits = new_q_qubits\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n\n def Y(self,*n_bits):\n str_cir = '--[Y]--'\n str_nan = '-------'\n circuit_painel = self.circuit_painel\n i = 0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n line = line+str_cir\n new_circuit.append(line)\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n \n theta = self.theta\n q = self.q\n israd = self.israd\n R_y = self.R_y\n new_q_qubits = []\n q_qubits = self.q_qubits\n i = 0\n for qbit in q_qubits:\n if i in n_bits:\n qbit = np.dot(R_y(),qbit)\n new_q_qubits.append(qbit)\n else:\n qbit = qbit\n new_q_qubits.append(qbit)\n i+=1\n self.q_qubits = new_q_qubits\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n \n def Z(self,*n_bits):\n str_cir = '--[Z]--'\n str_nan = '-------'\n circuit_painel = self.circuit_painel\n i = 0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n line = line+str_cir\n new_circuit.append(line)\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n \n theta = self.theta\n q = self.q\n israd = self.israd\n R_z = self.R_z\n new_q_qubits = []\n q_qubits = self.q_qubits\n i = 0\n for qbit in q_qubits:\n if i in n_bits:\n qbit = np.dot(R_z(),qbit)\n new_q_qubits.append(qbit)\n else:\n qbit = qbit\n new_q_qubits.append(qbit)\n i+=1\n self.q_qubits = new_q_qubits\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n \n \n def states(self) -> str:\n string = []\n i = 0\n for qbit in self.q_qubits:\n string.append(f'[q{i}]|ψ〉 = {round(qbit[0][0],3)}|0〉 + {round(qbit[1][0],3)}|1〉')\n i += 1\n string = '\\n'.join(string)\n string = '\\n'+string\n #print(display(Math(r'$\\frac{df}{dx}$')))\n return string\n \n def med(self,*n_bits):\n #getting the probability of the two states of qbit\n q_probs = []\n i = 0\n new_qubits = []\n for qbit in self.q_qubits:\n if i in n_bits:\n qbit_prob = {}\n total_state = qbit[0]+qbit[1]\n percent_zero_state = qbit[0]/total_state\n percent_one_state = qbit[1]/total_state\n qbit_prob['alloc'] = i\n qbit_prob['name_qbit'] = f'q{i}'\n qbit_prob['|0>'] = percent_zero_state\n qbit_prob['|1>'] = percent_one_state\n qbit_prob['state'] = '|0>' if qbit_prob['|0>']>=qbit_prob['|1>'] else '|1>'\n q_probs.append(qbit_prob)\n if qbit_prob['state'] == '|1>':\n qbit = [0,1]\n else:\n qbit = [1,0]\n #print(f'[q{i}] {qbit}')\n else:\n pass\n new_qubits.append(qbit)\n i+=1\n #print(i)\n str_cir = '-[med]-'\n str_nan = '-----------'\n circuit_painel = self.circuit_painel\n i = 0\n c=0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n #print(q_probs)\n n_simb = q_probs[c]['state']\n line = line+str_cir+f'{n_simb}-'\n new_circuit.append(line)\n c+=1\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n self.q_probs = q_probs\n self.q_qubits = new_qubits\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n return q_probs\n\n def med_all(self):\n n_bits = [nq for nq in range(len(self.q_qubits))]\n\n #getting the probability of the two states of qbit\n q_probs = []\n i = 0\n new_qubits = []\n qbits = [0,0]\n for qbit in self.q_qubits:\n #print('after',qbit)\n if i in n_bits:\n qbit_prob = {}\n total_state = qbit[0]+qbit[1]\n percent_zero_state = qbit[0]/total_state\n percent_one_state = qbit[1]/total_state\n #print('percent_one',percent_one_state)\n #print('percent_zero',percent_zero_state)\n qbit_prob['alloc'] = i\n qbit_prob['name_qbit'] = f'q{i}'\n qbit_prob['|0>'] = percent_zero_state\n qbit_prob['|1>'] = percent_one_state\n qbit_prob['state'] = '|0>' if qbit_prob['|0>']>=qbit_prob['|1>'] else '|1>'\n q_probs.append(qbit_prob)\n #print(qbit_prob['state'])\n #print(1 if qbit_prob['state']=='|1>' else 0)\n if qbit_prob['state'] == '|1>':\n qbit = [0,1]\n else:\n qbit = [1,0]\n #print(f'[q{i}] {qbit}')\n else:\n pass\n new_qubits.append(qbit)\n #print(new_qubits)\n i+=1\n #print(i)\n str_cir = '-[med]-'\n str_nan = '-----------'\n circuit_painel = self.circuit_painel\n i = 0\n c=0\n new_circuit = []\n for line in circuit_painel:\n if i in n_bits:\n #print(q_probs)\n n_simb = q_probs[c]['state']\n line = line+str_cir+f'{n_simb}-'\n new_circuit.append(line)\n c+=1\n else:\n line=line+str_nan\n new_circuit.append(line)\n i += 1\n self.circuit_painel = new_circuit\n self.q_probs = q_probs\n self.q_qubits = new_qubits\n #print(self.q_qubits)\n self.qprobs = self.get_prob_from_qbits(self.q_qubits)\n\n return q_probs\n \n def __str__(self):\n print_circuit = self.circuit_painel\n if self.divisor_circuits:\n print_circuit.append(self.divisor_circuits)\n print_circuit.append('\\n'.join(self.classic_circuit_painel))\n #print(print_circuit)\n #self.circuit_painel = []\n return '\\n'.join(print_circuit)\n \n \n \nclass q_qubitq:\n def __init__(self,*bit_values: int) -> None:\n self.values = list(bit_values)\n# \\na^2 + b^2 = {qbit[0][0]**2+qbit[1][0]**2}')\n\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.sqrt" ] ]
davidbrochart/rioxarray
[ "6587726a019f46ec68ca94d6cea866abea956a2a" ]
[ "test/integration/test_integration__io.py" ]
[ "import contextlib\nimport itertools\nimport os\nimport pickle\nimport shutil\nimport sys\nimport tempfile\nimport warnings\n\nimport dask.array as da\nimport mock\nimport numpy as np\nimport pytest\nimport rasterio\nimport xarray as xr\nfrom affine import Affine\nfrom numpy.testing import assert_almost_equal\nfrom rasterio.errors import NotGeoreferencedWarning\nfrom rasterio.transform import from_origin\nfrom rasterio.warp import calculate_default_transform\nfrom xarray import DataArray\nfrom xarray.coding.variables import SerializationWarning\nfrom xarray.testing import assert_allclose, assert_equal, assert_identical\n\nimport rioxarray\nfrom rioxarray._io import build_subdataset_filter\nfrom test.conftest import (\n TEST_COMPARE_DATA_DIR,\n TEST_INPUT_DATA_DIR,\n _assert_xarrays_equal,\n)\n\n\[email protected](\n \"subdataset, variable, group, match\",\n [\n (\n \"netcdf:../../test/test_data/input/PLANET_SCOPE_3D.nc:blue\",\n \"green\",\n None,\n False,\n ),\n (\n \"netcdf:../../test/test_data/input/PLANET_SCOPE_3D.nc:blue\",\n \"blue\",\n None,\n True,\n ),\n (\n \"netcdf:../../test/test_data/input/PLANET_SCOPE_3D.nc:blue1\",\n \"blue\",\n None,\n False,\n ),\n (\n \"netcdf:../../test/test_data/input/PLANET_SCOPE_3D.nc:1blue\",\n \"blue\",\n None,\n False,\n ),\n (\n \"netcdf:../../test/test_data/input/PLANET_SCOPE_3D.nc:blue\",\n \"blue\",\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n [\"sur_refl_b01_1\"],\n None,\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n None,\n [\"MODIS_Grid_2D\"],\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n (\"sur_refl_b01_1\",),\n (\"MODIS_Grid_2D\",),\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n \"blue\",\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n \"sur_refl_b01_1\",\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \":MODIS_Grid_2D:sur_refl_b01_1\",\n None,\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n \"sur_refl_b01_1\",\n None,\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n None,\n \"MODIS_Grid_2D\",\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n \"sur_refl_b01_1\",\n \"MODIS_Grid_2D\",\n True,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n \"blue\",\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n \"sur_refl_b01_1\",\n \"gr\",\n False,\n ),\n (\n 'HDF4_EOS:EOS_GRID:\"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf\"'\n \"://MODIS_Grid_2D://sur_refl_b01_1\",\n None,\n \"gr\",\n False,\n ),\n (\n \"netcdf:S5P_NRTI_L2__NO2____20190513T181819_20190513T182319_08191_\"\n \"01_010301_20190513T185033.nc:/PRODUCT/tm5_constant_a\",\n None,\n \"PRODUCT\",\n True,\n ),\n (\n \"netcdf:S5P_NRTI_L2__NO2____20190513T181819_20190513T182319_08191_\"\n \"01_010301_20190513T185033.nc:/PRODUCT/tm5_constant_a\",\n \"tm5_constant_a\",\n \"PRODUCT\",\n True,\n ),\n (\n \"netcdf:S5P_NRTI_L2__NO2____20190513T181819_20190513T182319_08191_\"\n \"01_010301_20190513T185033.nc:/PRODUCT/tm5_constant_a\",\n \"tm5_constant_a\",\n \"/PRODUCT\",\n True,\n ),\n ],\n)\ndef test_build_subdataset_filter(subdataset, variable, group, match):\n assert (\n build_subdataset_filter(group, variable).search(subdataset) is not None\n ) == match\n\n\ndef test_open_variable_filter():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\"), variable=[\"blue\"]\n ) as rds:\n assert list(rds.data_vars) == [\"blue\"]\n\n\ndef test_open_group_filter__missing():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\"),\n variable=\"blue\",\n group=[\"non-existent\"],\n ) as rds:\n assert list(rds.data_vars) == []\n\n\ndef test_open_multiple_resolution():\n rds_list = rioxarray.open_rasterio(\n os.path.join(\n TEST_INPUT_DATA_DIR, \"MOD09GA.A2008296.h14v17.006.2015181011753.hdf\"\n )\n )\n assert isinstance(rds_list, list)\n assert len(rds_list) == 2\n for rds in rds_list:\n assert rds.attrs[\"SHORTNAME\"] == \"MOD09GA\"\n assert rds_list[0].dims == {\"y\": 1200, \"x\": 1200, \"band\": 1}\n assert rds_list[1].dims == {\"y\": 2400, \"x\": 2400, \"band\": 1}\n\n\ndef test_open_group_filter():\n with rioxarray.open_rasterio(\n os.path.join(\n TEST_INPUT_DATA_DIR, \"MOD09GA.A2008296.h14v17.006.2015181011753.hdf\"\n ),\n group=\"MODIS_Grid_500m_2D\",\n ) as rds:\n assert sorted(rds.data_vars) == [\n \"QC_500m_1\",\n \"iobs_res_1\",\n \"num_observations_500m\",\n \"obscov_500m_1\",\n \"sur_refl_b01_1\",\n \"sur_refl_b02_1\",\n \"sur_refl_b03_1\",\n \"sur_refl_b04_1\",\n \"sur_refl_b05_1\",\n \"sur_refl_b06_1\",\n \"sur_refl_b07_1\",\n ]\n\n\ndef test_open_group_load_attrs():\n with rioxarray.open_rasterio(\n os.path.join(\n TEST_INPUT_DATA_DIR, \"MOD09GA.A2008296.h14v17.006.2015181011753.hdf\"\n ),\n variable=\"sur_refl_b05_1\",\n ) as rds:\n attrs = rds[\"sur_refl_b05_1\"].attrs\n assert sorted(attrs) == [\n \"_FillValue\",\n \"add_offset\",\n \"grid_mapping\",\n \"long_name\",\n \"scale_factor\",\n \"transform\",\n \"units\",\n ]\n assert attrs[\"long_name\"] == \"500m Surface Reflectance Band 5 - first layer\"\n assert attrs[\"units\"] == \"reflectance\"\n assert attrs[\"_FillValue\"] == -28672.0\n assert attrs[\"grid_mapping\"] == \"spatial_ref\"\n\n\ndef test_open_rasterio_mask_chunk_clip():\n with rioxarray.open_rasterio(\n os.path.join(TEST_COMPARE_DATA_DIR, \"small_dem_3m_merged.tif\"),\n masked=True,\n chunks=True,\n default_name=\"dem\",\n ) as xdi:\n assert xdi.name == \"dem\"\n assert str(xdi.dtype) == \"float64\"\n assert str(xdi.data.dtype) == \"float64\"\n assert str(type(xdi.data)) == \"<class 'dask.array.core.Array'>\"\n assert xdi.chunks == ((1,), (245,), (574,))\n assert np.isnan(xdi.values).sum() == 52119\n assert xdi.encoding == {\"_FillValue\": 0.0}\n attrs = dict(xdi.attrs)\n assert_almost_equal(\n attrs.pop(\"transform\"),\n (3.0, 0.0, 425047.68381405267, 0.0, -3.0, 4615780.040546387),\n )\n assert attrs == {\n \"grid_mapping\": \"spatial_ref\",\n \"add_offset\": 0.0,\n \"scale_factor\": 1.0,\n }\n\n # get subset for testing\n subset = xdi.isel(x=slice(150, 160), y=slice(100, 150))\n comp_subset = subset.isel(x=slice(1, None), y=slice(1, None))\n # add transform for test\n comp_subset.attrs[\"transform\"] = tuple(comp_subset.rio.transform(recalc=True))\n\n geometries = [\n {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [subset.x.values[0], subset.y.values[-1]],\n [subset.x.values[0], subset.y.values[0]],\n [subset.x.values[-1], subset.y.values[0]],\n [subset.x.values[-1], subset.y.values[-1]],\n [subset.x.values[0], subset.y.values[-1]],\n ]\n ],\n }\n ]\n\n # test data array\n clipped = xdi.rio.clip(geometries, comp_subset.rio.crs)\n _assert_xarrays_equal(clipped, comp_subset)\n assert clipped.encoding == {\"_FillValue\": 0.0}\n\n # test dataset\n clipped_ds = xdi.to_dataset(name=\"test_data\").rio.clip(\n geometries, subset.rio.crs\n )\n comp_subset_ds = comp_subset.to_dataset(name=\"test_data\")\n _assert_xarrays_equal(clipped_ds, comp_subset_ds)\n assert clipped_ds.test_data.encoding == {\"_FillValue\": 0.0}\n\n\n##############################################################################\n# From xarray tests\n##############################################################################\nON_WINDOWS = sys.platform == \"win32\"\n_counter = itertools.count()\n\n\[email protected]\ndef create_tmp_file(suffix=\".nc\", allow_cleanup_failure=False):\n temp_dir = tempfile.mkdtemp()\n path = os.path.join(temp_dir, \"temp-%s%s\" % (next(_counter), suffix))\n try:\n yield path\n finally:\n try:\n shutil.rmtree(temp_dir)\n except OSError:\n if not allow_cleanup_failure:\n raise\n\n\[email protected]\ndef create_tmp_geotiff(\n nx=4,\n ny=3,\n nz=3,\n transform=None,\n transform_args=[5000, 80000, 1000, 2000.0],\n crs={\"units\": \"m\", \"no_defs\": True, \"ellps\": \"WGS84\", \"proj\": \"utm\", \"zone\": 18},\n open_kwargs=None,\n additional_attrs=None,\n):\n # yields a temporary geotiff file and a corresponding expected DataArray\n if open_kwargs is None:\n open_kwargs = {}\n\n with create_tmp_file(suffix=\".tif\", allow_cleanup_failure=ON_WINDOWS) as tmp_file:\n # allow 2d or 3d shapes\n if nz == 1:\n data_shape = ny, nx\n write_kwargs = {\"indexes\": 1}\n else:\n data_shape = nz, ny, nx\n write_kwargs = {}\n data = np.arange(nz * ny * nx, dtype=rasterio.float32).reshape(*data_shape)\n if transform is None and transform_args is not None:\n transform = from_origin(*transform_args)\n if additional_attrs is None:\n additional_attrs = {\n \"descriptions\": tuple(\"d{}\".format(n + 1) for n in range(nz)),\n \"units\": tuple(\"u{}\".format(n + 1) for n in range(nz)),\n }\n with rasterio.open(\n tmp_file,\n \"w\",\n driver=\"GTiff\",\n height=ny,\n width=nx,\n count=nz,\n crs=crs,\n transform=transform,\n dtype=rasterio.float32,\n **open_kwargs,\n ) as s:\n for attr, val in additional_attrs.items():\n setattr(s, attr, val)\n s.write(data, **write_kwargs)\n dx, dy = s.res[0], -s.res[1]\n tt = s.transform\n\n if not transform_args:\n a, b, c, d = tt.c, tt.f, -tt.e, tt.a\n else:\n a, b, c, d = transform_args\n data = data[np.newaxis, ...] if nz == 1 else data\n expected = DataArray(\n data,\n dims=(\"band\", \"y\", \"x\"),\n coords={\n \"band\": np.arange(nz) + 1,\n \"y\": -np.arange(ny) * d + b + dy / 2,\n \"x\": np.arange(nx) * c + a + dx / 2,\n },\n )\n yield tmp_file, expected\n\n\nclass TestRasterio:\n def test_serialization(self):\n with create_tmp_geotiff(additional_attrs={}) as (tmp_file, expected):\n # Write it to a netcdf and read again (roundtrip)\n with xr.open_rasterio(tmp_file) as rioda:\n with create_tmp_file(suffix=\".nc\") as tmp_nc_file:\n rioda.to_netcdf(tmp_nc_file)\n with xr.open_dataarray(tmp_nc_file) as ncds:\n assert_identical(rioda, ncds)\n\n def test_utm(self):\n with create_tmp_geotiff() as (tmp_file, expected):\n with xr.open_rasterio(tmp_file) as rioda:\n assert_allclose(rioda, expected)\n assert rioda.attrs[\"scales\"] == (1.0, 1.0, 1.0)\n assert rioda.attrs[\"offsets\"] == (0.0, 0.0, 0.0)\n assert rioda.attrs[\"descriptions\"] == (\"d1\", \"d2\", \"d3\")\n assert rioda.attrs[\"units\"] == (\"u1\", \"u2\", \"u3\")\n assert isinstance(rioda.attrs[\"crs\"], str)\n assert isinstance(rioda.attrs[\"res\"], tuple)\n assert isinstance(rioda.attrs[\"is_tiled\"], np.uint8)\n assert isinstance(rioda.attrs[\"transform\"], tuple)\n assert len(rioda.attrs[\"transform\"]) == 6\n np.testing.assert_array_equal(\n rioda.attrs[\"nodatavals\"], [np.NaN, np.NaN, np.NaN]\n )\n\n # Check no parse coords\n with xr.open_rasterio(tmp_file, parse_coordinates=False) as rioda:\n assert \"x\" not in rioda.coords\n assert \"y\" not in rioda.coords\n\n def test_non_rectilinear(self):\n # Create a geotiff file with 2d coordinates\n with create_tmp_geotiff(\n transform=from_origin(0, 3, 1, 1).rotation(45), crs=None\n ) as (tmp_file, _):\n # Default is to not parse coords\n with xr.open_rasterio(tmp_file) as rioda:\n assert \"x\" not in rioda.coords\n assert \"y\" not in rioda.coords\n assert \"crs\" not in rioda.attrs\n assert rioda.attrs[\"scales\"] == (1.0, 1.0, 1.0)\n assert rioda.attrs[\"offsets\"] == (0.0, 0.0, 0.0)\n assert rioda.attrs[\"descriptions\"] == (\"d1\", \"d2\", \"d3\")\n assert rioda.attrs[\"units\"] == (\"u1\", \"u2\", \"u3\")\n assert isinstance(rioda.attrs[\"res\"], tuple)\n assert isinstance(rioda.attrs[\"is_tiled\"], np.uint8)\n assert isinstance(rioda.attrs[\"transform\"], tuple)\n assert len(rioda.attrs[\"transform\"]) == 6\n\n # See if a warning is raised if we force it\n with pytest.warns(Warning, match=\"transformation isn't rectilinear\"):\n with xr.open_rasterio(tmp_file, parse_coordinates=True) as rioda:\n assert \"x\" not in rioda.coords\n assert \"y\" not in rioda.coords\n\n def test_platecarree(self):\n with create_tmp_geotiff(\n 8,\n 10,\n 1,\n transform_args=[1, 2, 0.5, 2.0],\n crs=\"+proj=latlong\",\n open_kwargs={\"nodata\": -9765},\n ) as (tmp_file, expected):\n with xr.open_rasterio(tmp_file) as rioda:\n assert_allclose(rioda, expected)\n assert rioda.attrs[\"scales\"] == (1.0,)\n assert rioda.attrs[\"offsets\"] == (0.0,)\n assert isinstance(rioda.attrs[\"descriptions\"], tuple)\n assert isinstance(rioda.attrs[\"units\"], tuple)\n assert isinstance(rioda.attrs[\"crs\"], str)\n assert isinstance(rioda.attrs[\"res\"], tuple)\n assert isinstance(rioda.attrs[\"is_tiled\"], np.uint8)\n assert isinstance(rioda.attrs[\"transform\"], tuple)\n assert len(rioda.attrs[\"transform\"]) == 6\n np.testing.assert_array_equal(rioda.attrs[\"nodatavals\"], [-9765.0])\n\n def test_notransform(self):\n # regression test for https://github.com/pydata/xarray/issues/1686\n # Create a geotiff file\n with warnings.catch_warnings():\n # rasterio throws a NotGeoreferencedWarning here, which is\n # expected since we test rasterio's defaults in this case.\n warnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=\"Dataset has no geotransform set\",\n )\n with create_tmp_file(suffix=\".tif\") as tmp_file:\n # data\n nx, ny, nz = 4, 3, 3\n data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(\n nz, ny, nx\n )\n with rasterio.open(\n tmp_file,\n \"w\",\n driver=\"GTiff\",\n height=ny,\n width=nx,\n count=nz,\n dtype=rasterio.float32,\n ) as s:\n s.descriptions = (\"nx\", \"ny\", \"nz\")\n s.units = (\"cm\", \"m\", \"km\")\n s.write(data)\n\n # Tests\n expected = DataArray(\n data,\n dims=(\"band\", \"y\", \"x\"),\n coords={\n \"band\": [1, 2, 3],\n \"y\": [0.5, 1.5, 2.5],\n \"x\": [0.5, 1.5, 2.5, 3.5],\n },\n )\n with xr.open_rasterio(tmp_file) as rioda:\n assert_allclose(rioda, expected)\n assert rioda.attrs[\"scales\"] == (1.0, 1.0, 1.0)\n assert rioda.attrs[\"offsets\"] == (0.0, 0.0, 0.0)\n assert rioda.attrs[\"descriptions\"] == (\"nx\", \"ny\", \"nz\")\n assert rioda.attrs[\"units\"] == (\"cm\", \"m\", \"km\")\n assert isinstance(rioda.attrs[\"res\"], tuple)\n assert isinstance(rioda.attrs[\"is_tiled\"], np.uint8)\n assert isinstance(rioda.attrs[\"transform\"], tuple)\n assert len(rioda.attrs[\"transform\"]) == 6\n\n def test_indexing(self):\n with create_tmp_geotiff(\n 8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs=\"+proj=latlong\"\n ) as (tmp_file, expected):\n with xr.open_rasterio(tmp_file, cache=False) as actual:\n\n # tests\n # assert_allclose checks all data + coordinates\n assert_allclose(actual, expected)\n assert not actual.variable._in_memory\n\n # Basic indexer\n ind = {\"x\": slice(2, 5), \"y\": slice(5, 7)}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": slice(1, 2), \"x\": slice(2, 5), \"y\": slice(5, 7)}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": slice(1, 2), \"x\": slice(2, 5), \"y\": 0}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n # orthogonal indexer\n ind = {\n \"band\": np.array([2, 1, 0]),\n \"x\": np.array([1, 0]),\n \"y\": np.array([0, 2]),\n }\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": np.array([2, 1, 0]), \"x\": np.array([1, 0]), \"y\": 0}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": 0, \"x\": np.array([0, 0]), \"y\": np.array([1, 1, 1])}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n # minus-stepped slice\n ind = {\"band\": np.array([2, 1, 0]), \"x\": slice(-1, None, -1), \"y\": 0}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": np.array([2, 1, 0]), \"x\": 1, \"y\": slice(-1, 1, -2)}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n # empty selection\n ind = {\"band\": np.array([2, 1, 0]), \"x\": 1, \"y\": slice(2, 2, 1)}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\"band\": slice(0, 0), \"x\": 1, \"y\": 2}\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n # vectorized indexer\n ind = {\n \"band\": DataArray([2, 1, 0], dims=\"a\"),\n \"x\": DataArray([1, 0, 0], dims=\"a\"),\n \"y\": np.array([0, 2]),\n }\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n ind = {\n \"band\": DataArray([[2, 1, 0], [1, 0, 2]], dims=[\"a\", \"b\"]),\n \"x\": DataArray([[1, 0, 0], [0, 1, 0]], dims=[\"a\", \"b\"]),\n \"y\": 0,\n }\n assert_allclose(expected.isel(**ind), actual.isel(**ind))\n assert not actual.variable._in_memory\n\n # Selecting lists of bands is fine\n ex = expected.isel(band=[1, 2])\n ac = actual.isel(band=[1, 2])\n assert_allclose(ac, ex)\n ex = expected.isel(band=[0, 2])\n ac = actual.isel(band=[0, 2])\n assert_allclose(ac, ex)\n\n # Integer indexing\n ex = expected.isel(band=1)\n ac = actual.isel(band=1)\n assert_allclose(ac, ex)\n\n ex = expected.isel(x=1, y=2)\n ac = actual.isel(x=1, y=2)\n assert_allclose(ac, ex)\n\n ex = expected.isel(band=0, x=1, y=2)\n ac = actual.isel(band=0, x=1, y=2)\n assert_allclose(ac, ex)\n\n # Mixed\n ex = actual.isel(x=slice(2), y=slice(2))\n ac = actual.isel(x=[0, 1], y=[0, 1])\n assert_allclose(ac, ex)\n\n ex = expected.isel(band=0, x=1, y=slice(5, 7))\n ac = actual.isel(band=0, x=1, y=slice(5, 7))\n assert_allclose(ac, ex)\n\n ex = expected.isel(band=0, x=slice(2, 5), y=2)\n ac = actual.isel(band=0, x=slice(2, 5), y=2)\n assert_allclose(ac, ex)\n\n # One-element lists\n ex = expected.isel(band=[0], x=slice(2, 5), y=[2])\n ac = actual.isel(band=[0], x=slice(2, 5), y=[2])\n assert_allclose(ac, ex)\n\n def test_caching(self):\n with create_tmp_geotiff(\n 8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs=\"+proj=latlong\"\n ) as (tmp_file, expected):\n # Cache is the default\n with xr.open_rasterio(tmp_file) as actual:\n\n # This should cache everything\n assert_allclose(actual, expected)\n\n # once cached, non-windowed indexing should become possible\n ac = actual.isel(x=[2, 4])\n ex = expected.isel(x=[2, 4])\n assert_allclose(ac, ex)\n\n def test_chunks(self):\n with create_tmp_geotiff(\n 8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs=\"+proj=latlong\"\n ) as (tmp_file, expected):\n # Chunk at open time\n with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:\n assert isinstance(actual.data, da.Array)\n assert \"open_rasterio\" in actual.data.name\n\n # do some arithmetic\n ac = actual.mean()\n ex = expected.mean()\n assert_allclose(ac, ex)\n\n ac = actual.sel(band=1).mean(dim=\"x\")\n ex = expected.sel(band=1).mean(dim=\"x\")\n assert_allclose(ac, ex)\n\n def test_pickle_rasterio(self):\n # regression test for https://github.com/pydata/xarray/issues/2121\n with create_tmp_geotiff() as (tmp_file, expected):\n with xr.open_rasterio(tmp_file) as rioda:\n temp = pickle.dumps(rioda)\n with pickle.loads(temp) as actual:\n assert_equal(actual, rioda)\n\n def test_ENVI_tags(self):\n # Create an ENVI file with some tags in the ENVI namespace\n # this test uses a custom driver, so we can't use create_tmp_geotiff\n with create_tmp_file(suffix=\".dat\") as tmp_file:\n # data\n nx, ny, nz = 4, 3, 3\n data = np.arange(nx * ny * nz, dtype=rasterio.float32).reshape(nz, ny, nx)\n transform = from_origin(5000, 80000, 1000, 2000.0)\n with rasterio.open(\n tmp_file,\n \"w\",\n driver=\"ENVI\",\n height=ny,\n width=nx,\n count=nz,\n crs={\n \"units\": \"m\",\n \"no_defs\": True,\n \"ellps\": \"WGS84\",\n \"proj\": \"utm\",\n \"zone\": 18,\n },\n transform=transform,\n dtype=rasterio.float32,\n ) as s:\n s.update_tags(\n ns=\"ENVI\",\n description=\"{Tagged file}\",\n wavelength=\"{123.000000, 234.234000, 345.345678}\",\n fwhm=\"{1.000000, 0.234000, 0.000345}\",\n )\n s.write(data)\n dx, dy = s.res[0], -s.res[1]\n\n # Tests\n coords = {\n \"band\": [1, 2, 3],\n \"y\": -np.arange(ny) * 2000 + 80000 + dy / 2,\n \"x\": np.arange(nx) * 1000 + 5000 + dx / 2,\n \"wavelength\": (\"band\", np.array([123, 234.234, 345.345678])),\n \"fwhm\": (\"band\", np.array([1, 0.234, 0.000345])),\n }\n expected = DataArray(data, dims=(\"band\", \"y\", \"x\"), coords=coords)\n\n with xr.open_rasterio(tmp_file) as rioda:\n assert_allclose(rioda, expected)\n assert isinstance(rioda.attrs[\"crs\"], str)\n assert isinstance(rioda.attrs[\"res\"], tuple)\n assert isinstance(rioda.attrs[\"is_tiled\"], np.uint8)\n assert isinstance(rioda.attrs[\"transform\"], tuple)\n assert len(rioda.attrs[\"transform\"]) == 6\n # from ENVI tags\n assert isinstance(rioda.attrs[\"description\"], str)\n assert isinstance(rioda.attrs[\"map_info\"], str)\n assert isinstance(rioda.attrs[\"samples\"], str)\n\n def test_no_mftime(self):\n # rasterio can accept \"filename\" urguments that are actually urls,\n # including paths to remote files.\n # In issue #1816, we found that these caused dask to break, because\n # the modification time was used to determine the dask token. This\n # tests ensure we can still chunk such files when reading with\n # rasterio.\n with create_tmp_geotiff(\n 8, 10, 3, transform_args=[1, 2, 0.5, 2.0], crs=\"+proj=latlong\"\n ) as (tmp_file, expected):\n with mock.patch(\"os.path.getmtime\", side_effect=OSError):\n with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:\n assert isinstance(actual.data, da.Array)\n assert_allclose(actual, expected)\n\n @pytest.mark.xfail(reason=\"Network could be problematic\")\n def test_http_url(self):\n # more examples urls here\n # http://download.osgeo.org/geotiff/samples/\n url = \"http://download.osgeo.org/geotiff/samples/made_up/ntf_nord.tif\"\n with xr.open_rasterio(url) as actual:\n assert actual.shape == (1, 512, 512)\n # make sure chunking works\n with xr.open_rasterio(url, chunks=(1, 256, 256)) as actual:\n assert isinstance(actual.data, da.Array)\n\n def test_rasterio_environment(self):\n with create_tmp_geotiff() as (tmp_file, expected):\n # Should fail with error since suffix not allowed\n with pytest.raises(Exception):\n with rasterio.Env(GDAL_SKIP=\"GTiff\"):\n with xr.open_rasterio(tmp_file) as actual:\n assert_allclose(actual, expected)\n\n @pytest.mark.xfail(\n rasterio.__version__ == \"1.1.1\",\n reason=\"https://github.com/mapbox/rasterio/issues/1833\",\n )\n def test_rasterio_vrt(self):\n # tmp_file default crs is UTM: CRS({'init': 'epsg:32618'}\n with create_tmp_geotiff() as (tmp_file, expected):\n with rasterio.open(tmp_file) as src:\n with rasterio.vrt.WarpedVRT(src, crs=\"epsg:4326\") as vrt:\n expected_shape = (vrt.width, vrt.height)\n expected_crs = vrt.crs\n expected_res = vrt.res\n # Value of single pixel in center of image\n lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)\n expected_val = next(vrt.sample([(lon, lat)]))\n with xr.open_rasterio(vrt) as da:\n actual_shape = (da.sizes[\"x\"], da.sizes[\"y\"])\n actual_crs = da.crs\n actual_res = da.res\n actual_val = da.sel(dict(x=lon, y=lat), method=\"nearest\").data\n\n assert actual_crs == expected_crs\n assert actual_res == expected_res\n assert actual_shape == expected_shape\n assert expected_val.all() == actual_val.all()\n\n def test_rasterio_vrt_with_transform_and_size(self):\n # Test open_rasterio() support of WarpedVRT with transform, width and\n # height (issue #2864)\n with create_tmp_geotiff() as (tmp_file, expected):\n with rasterio.open(tmp_file) as src:\n # Estimate the transform, width and height\n # for a change of resolution\n # tmp_file initial res is (1000,2000) (default values)\n trans, w, h = calculate_default_transform(\n src.crs, src.crs, src.width, src.height, resolution=500, *src.bounds\n )\n with rasterio.vrt.WarpedVRT(\n src, transform=trans, width=w, height=h\n ) as vrt:\n expected_shape = (vrt.width, vrt.height)\n expected_res = vrt.res\n expected_transform = vrt.transform\n with xr.open_rasterio(vrt) as da:\n actual_shape = (da.sizes[\"x\"], da.sizes[\"y\"])\n actual_res = da.res\n actual_transform = Affine(*da.transform)\n assert actual_res == expected_res\n assert actual_shape == expected_shape\n assert actual_transform == expected_transform\n\n @pytest.mark.xfail(reason=\"Network could be problematic\")\n def test_rasterio_vrt_network(self):\n url = \"https://storage.googleapis.com/\\\n gcp-public-data-landsat/LC08/01/047/027/\\\n LC08_L1TP_047027_20130421_20170310_01_T1/\\\n LC08_L1TP_047027_20130421_20170310_01_T1_B4.TIF\"\n env = rasterio.Env(\n GDAL_DISABLE_READDIR_ON_OPEN=\"EMPTY_DIR\",\n CPL_VSIL_CURL_USE_HEAD=False,\n CPL_VSIL_CURL_ALLOWED_EXTENSIONS=\"TIF\",\n )\n with env:\n with rasterio.open(url) as src:\n with rasterio.vrt.WarpedVRT(src, crs=\"epsg:4326\") as vrt:\n expected_shape = (vrt.width, vrt.height)\n expected_crs = vrt.crs\n expected_res = vrt.res\n # Value of single pixel in center of image\n lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)\n expected_val = next(vrt.sample([(lon, lat)]))\n with xr.open_rasterio(vrt) as da:\n actual_shape = (da.sizes[\"x\"], da.sizes[\"y\"])\n actual_crs = da.crs\n actual_res = da.res\n actual_val = da.sel(dict(x=lon, y=lat), method=\"nearest\").data\n\n assert_equal(actual_shape, expected_shape)\n assert_equal(actual_crs, expected_crs)\n assert_equal(actual_res, expected_res)\n assert_equal(expected_val, actual_val)\n\n\ndef test_open_cog():\n cog_file = os.path.join(TEST_INPUT_DATA_DIR, \"cog.tif\")\n rdsm = rioxarray.open_rasterio(cog_file)\n assert rdsm.shape == (1, 500, 500)\n rdso = rioxarray.open_rasterio(cog_file, overview_level=0)\n assert rdso.shape == (1, 250, 250)\n\n\ndef test_mask_and_scale():\n with pytest.warns(SerializationWarning):\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"), mask_and_scale=True\n ) as rds:\n assert np.nanmin(rds.air_temperature.values) == 248.7\n assert np.nanmax(rds.air_temperature.values) == 302.1\n assert rds.air_temperature.encoding == {\n \"_Unsigned\": \"true\",\n \"add_offset\": 220.0,\n \"scale_factor\": 0.1,\n \"_FillValue\": 32767.0,\n \"missing_value\": 32767,\n }\n attrs = rds.air_temperature.attrs\n attrs.pop(\"transform\")\n assert attrs == {\n \"coordinates\": \"day\",\n \"coordinate_system\": \"WGS84,EPSG:4326\",\n \"description\": \"Daily Maximum Temperature\",\n \"dimensions\": \"lon lat time\",\n \"grid_mapping\": \"spatial_ref\",\n \"long_name\": \"tmmx\",\n \"standard_name\": \"tmmx\",\n \"units\": \"K\",\n }\n\n\ndef test_no_mask_and_scale():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"tmmx_20190121.nc\"), masked=True\n ) as rds:\n assert np.nanmin(rds.air_temperature.values) == 287\n assert np.nanmax(rds.air_temperature.values) == 821\n assert rds.air_temperature.encoding == {\n \"_FillValue\": 32767.0,\n \"missing_value\": 32767,\n }\n attrs = rds.air_temperature.attrs\n attrs.pop(\"transform\")\n assert attrs == {\n \"_Unsigned\": \"true\",\n \"add_offset\": 220.0,\n \"coordinates\": \"day\",\n \"coordinate_system\": \"WGS84,EPSG:4326\",\n \"description\": \"Daily Maximum Temperature\",\n \"dimensions\": \"lon lat time\",\n \"grid_mapping\": \"spatial_ref\",\n \"long_name\": \"tmmx\",\n \"scale_factor\": 0.1,\n \"standard_name\": \"tmmx\",\n \"units\": \"K\",\n }\n\n\ndef test_notgeoreferenced_warning():\n with create_tmp_geotiff(transform_args=None) as (tmp_file, expected):\n with pytest.warns(NotGeoreferencedWarning):\n rioxarray.open_rasterio(tmp_file)\n\n\ndef test_nc_attr_loading():\n with rioxarray.open_rasterio(\n os.path.join(TEST_INPUT_DATA_DIR, \"PLANET_SCOPE_3D.nc\")\n ) as rds:\n assert rds.dims == {\"y\": 10, \"x\": 10, \"time\": 2}\n assert rds.attrs == {\"coordinates\": \"spatial_ref\"}\n assert rds.y.attrs[\"units\"] == \"metre\"\n assert rds.x.attrs[\"units\"] == \"metre\"\n assert rds.time.encoding == {\n \"units\": \"seconds since 2016-12-19T10:27:29.687763\",\n \"calendar\": \"proleptic_gregorian\",\n }\n assert str(rds.time.values[0]) == \"2016-12-19 10:27:29\"\n assert str(rds.time.values[1]) == \"2016-12-29 12:52:41.659696\"\n" ]
[ [ "numpy.nanmax", "numpy.testing.assert_array_equal", "numpy.nanmin", "numpy.arange", "numpy.isnan", "numpy.array" ] ]
jackd/graph-tfds
[ "c843e485b931cc25370a151ae2326e1eec009792" ]
[ "graph_tfds/graphs/graph_saint/graph_saint.py" ]
[ "\"\"\"graph_saint dataset.\"\"\"\n\nimport json\nimport os\nfrom typing import Dict\n\nimport numpy as np\nimport scipy.sparse as sp\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\n\nimport gdown\n\n_DESCRIPTION = \"\"\"\\\nDatasets used in/provided by [GraphSAINT](https://github.com/GraphSAINT/GraphSAINT).\"\"\"\n\n_CITATION = \"\"\"\\\n@inproceedings{graphsaint-iclr20,\ntitle={{GraphSAINT}: Graph Sampling Based Inductive Learning Method},\nauthor={Hanqing Zeng and Hongkuan Zhou and Ajitesh Srivastava and Rajgopal Kannan and Viktor Prasanna},\nbooktitle={International Conference on Learning Representations},\nyear={2020},\nurl={https://openreview.net/forum?id=BJe8pkHFwS}\n}\n\"\"\"\n\n_DRIVE_URL = \"https://drive.google.com/uc?export=download&id={}\"\n\n\nclass GraphSaintConfig(tfds.core.BuilderConfig):\n def __init__(\n self,\n *,\n num_classes: int,\n num_features: int,\n drive_ids: Dict[str, str],\n **kwargs,\n ):\n self.drive_ids = drive_ids\n self.num_classes = num_classes\n self.num_features = num_features\n super().__init__(\n version=tfds.core.Version(\"1.0.0\"),\n release_notes={\"1.0.0\": \"Initial release.\"},\n description=_DESCRIPTION,\n **kwargs,\n )\n\n\nYELP = GraphSaintConfig(\n name=\"yelp\",\n num_classes=100,\n num_features=300,\n drive_ids={\n \"adj_full.npz\": \"1Juwx8HtDwSzmVIJ31ooVa1WljI4U5JnA\",\n # \"adj_train.npz\": \"1nnkeyMcaro-2_j20CLZ0P6nH4SdivEgJ\",\n \"feats.npy\": \"1Zy6BZH_zLEjKlEFSduKE5tV9qqA_8VtM\",\n \"role.json\": \"1NI5pa5Chpd-52eSmLW60OnB3WS5ikxq_\",\n \"class_map.json\": \"1VUcBGr0T0-klqerjAjxRmAqFuld_SMWU\",\n },\n)\n\n\nclass GraphSaint(tfds.core.GeneratorBasedBuilder):\n \"\"\"DatasetBuilder for graph_saint dataset.\"\"\"\n\n BUILDER_CONFIGS = [YELP]\n\n def _info(self) -> tfds.core.DatasetInfo:\n \"\"\"Returns the dataset metadata.\"\"\"\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict(\n {\n \"graph\": {\n \"links\": tfds.core.features.Tensor(\n shape=(None, 2), dtype=tf.int64\n ),\n \"node_features\": tfds.core.features.Tensor(\n shape=(None, self.builder_config.num_features),\n dtype=tf.float32,\n ),\n },\n \"node_labels\": tfds.core.features.Tensor(\n shape=(None, self.builder_config.num_classes), dtype=tf.int64,\n ),\n \"train_ids\": tfds.core.features.Tensor(\n shape=(None,), dtype=tf.int64\n ),\n \"validation_ids\": tfds.core.features.Tensor(\n shape=(None,), dtype=tf.int64\n ),\n \"test_ids\": tfds.core.features.Tensor(\n shape=(None,), dtype=tf.int64\n ),\n }\n ),\n supervised_keys=(\"graph\", \"node_labels\"),\n homepage=\"http://snap.stanford.edu/graphsage/\",\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager: tfds.download.DownloadManager):\n \"\"\"Returns SplitGenerators.\"\"\"\n name = self.builder_config.name\n ids = self.builder_config.drive_ids\n dl_dir = dl_manager._download_dir # pylint: disable=protected-access\n paths = {k: os.path.join(dl_dir, f\"{name}-{k}\") for k in ids}\n missing_paths = {k: v for k, v in paths.items() if not tf.io.gfile.exists(v)}\n\n for k, path in missing_paths.items():\n url = _DRIVE_URL.format(ids[k])\n gdown.download(url, output=path)\n\n return {\n \"train\": self._generate_examples(paths),\n }\n\n def _generate_examples(self, paths):\n \"\"\"Yields examples.\"\"\"\n with tf.io.gfile.GFile(paths[\"class_map.json\"], \"rb\") as fp:\n class_map = json.load(fp)\n labels = np.empty(\n (len(class_map), self.builder_config.num_classes), dtype=np.int64\n )\n for k, v in class_map.items():\n labels[int(k)] = v\n del class_map\n\n with tf.io.gfile.GFile(paths[\"adj_full.npz\"], \"rb\") as fp:\n adj = sp.load_npz(fp).tocoo()\n links = np.stack((adj.row, adj.col), axis=-1).astype(np.int64)\n del adj\n\n with tf.io.gfile.GFile(paths[\"feats.npy\"], \"rb\") as fp:\n feats = np.load(fp).astype(np.float32)\n\n with tf.io.gfile.GFile(paths[\"role.json\"], \"rb\") as fp:\n roles = json.load(fp)\n\n train_ids, validation_ids, test_ids = (\n np.array(roles[k], dtype=np.int64) for k in (\"tr\", \"va\", \"te\")\n )\n del roles\n\n data = dict(\n graph=dict(links=links, node_features=feats),\n node_labels=labels,\n train_ids=train_ids,\n validation_ids=validation_ids,\n test_ids=test_ids,\n )\n yield 0, data\n\n\nif __name__ == \"__main__\":\n\n config = tfds.core.download.DownloadConfig(register_checksums=True)\n for builder_config in GraphSaint.BUILDER_CONFIGS:\n builder = GraphSaint(config=builder_config)\n builder.download_and_prepare(download_config=config)\n" ]
[ [ "tensorflow.io.gfile.exists", "numpy.load", "tensorflow.io.gfile.GFile", "numpy.stack", "scipy.sparse.load_npz", "numpy.array" ] ]
micimize/deepchem
[ "510b9bf1805bc5a472c1a519700e6b128e06c651" ]
[ "deepchem/trans/tests/test_normalization.py" ]
[ "import os\nimport deepchem as dc\nimport numpy as np\nimport pytest\n\n\ndef load_unlabelled_data():\n current_dir = os.path.dirname(os.path.abspath(__file__))\n featurizer = dc.feat.CircularFingerprint(size=1024)\n tasks = []\n input_file = os.path.join(current_dir, \"../../data/tests/no_labels.csv\")\n loader = dc.data.CSVLoader(\n tasks=tasks, smiles_field=\"smiles\", featurizer=featurizer)\n return loader.featurize(input_file)\n\n\ndef load_solubility_data():\n \"\"\"Loads solubility dataset\"\"\"\n current_dir = os.path.dirname(os.path.abspath(__file__))\n featurizer = dc.feat.CircularFingerprint(size=1024)\n tasks = [\"log-solubility\"]\n task_type = \"regression\"\n input_file = os.path.join(current_dir, \"../../models/tests/example.csv\")\n loader = dc.data.CSVLoader(\n tasks=tasks, smiles_field=\"smiles\", featurizer=featurizer)\n\n return loader.create_dataset(input_file)\n\n\ndef test_transform_unlabelled():\n ul_dataset = load_unlabelled_data()\n # transforming y should raise an exception\n with pytest.raises(ValueError):\n dc.trans.transformers.Transformer(transform_y=True).transform(ul_dataset)\n\n # transforming w should raise an exception\n with pytest.raises(ValueError):\n dc.trans.transformers.Transformer(transform_w=True).transform(ul_dataset)\n\n # transforming X should be okay\n dc.trans.NormalizationTransformer(\n transform_X=True, dataset=ul_dataset).transform(ul_dataset)\n\n\ndef test_y_normalization_transformer():\n \"\"\"Tests normalization transformer.\"\"\"\n solubility_dataset = load_solubility_data()\n normalization_transformer = dc.trans.NormalizationTransformer(\n transform_y=True, dataset=solubility_dataset)\n X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,\n solubility_dataset.w, solubility_dataset.ids)\n solubility_dataset = normalization_transformer.transform(solubility_dataset)\n X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,\n solubility_dataset.w, solubility_dataset.ids)\n # Check ids are unchanged.\n for id_elt, id_t_elt in zip(ids, ids_t):\n assert id_elt == id_t_elt\n # Check X is unchanged since this is a y transformer\n np.testing.assert_allclose(X, X_t)\n # Check w is unchanged since this is a y transformer\n np.testing.assert_allclose(w, w_t)\n # Check that y_t has zero mean, unit std.\n assert np.isclose(y_t.mean(), 0.)\n assert np.isclose(y_t.std(), 1.)\n\n # Check that untransform does the right thing.\n np.testing.assert_allclose(normalization_transformer.untransform(y_t), y)\n\n\ndef test_X_normalization_transformer():\n \"\"\"Tests normalization transformer.\"\"\"\n solubility_dataset = load_solubility_data()\n normalization_transformer = dc.trans.NormalizationTransformer(\n transform_X=True, dataset=solubility_dataset)\n X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,\n solubility_dataset.w, solubility_dataset.ids)\n solubility_dataset = normalization_transformer.transform(solubility_dataset)\n X_t, y_t, w_t, ids_t = (solubility_dataset.X, solubility_dataset.y,\n solubility_dataset.w, solubility_dataset.ids)\n # Check ids are unchanged.\n for id_elt, id_t_elt in zip(ids, ids_t):\n assert id_elt == id_t_elt\n # Check y is unchanged since this is a X transformer\n np.testing.assert_allclose(y, y_t)\n # Check w is unchanged since this is a y transformer\n np.testing.assert_allclose(w, w_t)\n # Check that X_t has zero mean, unit std.\n # np.set_printoptions(threshold='nan')\n mean = X_t.mean(axis=0)\n assert np.amax(np.abs(mean - np.zeros_like(mean))) < 1e-7\n orig_std_array = X.std(axis=0)\n std_array = X_t.std(axis=0)\n # Entries with zero std are not normalized\n for orig_std, std in zip(orig_std_array, std_array):\n if not np.isclose(orig_std, 0):\n assert np.isclose(std, 1)\n\n # Check that untransform does the right thing.\n np.testing.assert_allclose(\n normalization_transformer.untransform(X_t), X, atol=1e-7)\n" ]
[ [ "numpy.zeros_like", "numpy.isclose", "numpy.testing.assert_allclose" ] ]
NightmareNyx/semi-supervised-pytorch
[ "43bb86bc6757345bd7a4eb37d6948ee62a268f7e" ]
[ "semi-supervised/read_functions.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 6 15:40:14 2017\n\nFunctions needed to read the data from different databases\n\n@author: anazabal, olmosUC3M, ivaleraM\n\"\"\"\n\nimport csv\nimport os\nimport torch\nimport numpy as np\nfrom sklearn.metrics import mean_squared_error\n\n\ndef read_data(data_file, types_file, miss_file, true_miss_file):\n # Read types of data from data file\n with open(types_file) as f:\n types_dict = [{k: v for k, v in row.items()}\n for row in csv.DictReader(f, skipinitialspace=True)]\n for t in types_dict:\n t['dim'] = int(t['dim'])\n\n # Read data from input file\n with open(data_file, 'r') as f:\n data = [[float(x) for x in rec] for rec in csv.reader(f, delimiter=',')]\n data = np.array(data)\n\n # Substitute NaN values by something (we assume we have the real missing value mask)\n if true_miss_file:\n with open(true_miss_file, 'r') as f:\n missing_positions = [[int(x) for x in rec] for rec in csv.reader(f, delimiter=',')]\n missing_positions = np.array(missing_positions)\n\n true_miss_mask = np.ones([np.shape(data)[0], len(types_dict)])\n true_miss_mask[missing_positions[:, 0] - 1, missing_positions[:, 1] - 1] = 0 # Indexes in the csv start at 1\n data_masked = np.ma.masked_where(np.isnan(data), data)\n # We need to fill the data depending on the given data...\n data_filler = []\n for i in range(len(types_dict)):\n if types_dict[i]['type'] == 'cat' or types_dict[i]['type'] == 'ordinal':\n aux = np.unique(data[:, i])\n data_filler.append(aux[0]) # Fill with the first element of the cat (0, 1, or whatever)\n else:\n data_filler.append(0.0)\n\n data = data_masked.filled(data_filler)\n else:\n true_miss_mask = np.ones([np.shape(data)[0], len(types_dict)]) # It doesn't affect our data\n\n # Construct the data matrices\n data_complete = []\n for i in range(np.shape(data)[1]):\n\n if types_dict[i]['type'] == 'cat':\n # Get categories\n cat_data = [int(x) for x in data[:, i]]\n categories, indexes = np.unique(cat_data, return_inverse=True)\n # Transform categories to a vector of 0:n_categories\n new_categories = np.arange(int(types_dict[i]['dim']))\n cat_data = new_categories[indexes]\n # Create one hot encoding for the categories\n aux = np.zeros([np.shape(data)[0], len(new_categories)])\n aux[np.arange(np.shape(data)[0]), cat_data] = 1\n data_complete.append(aux)\n\n elif types_dict[i]['type'] == 'ordinal':\n # Get categories\n cat_data = [int(x) for x in data[:, i]]\n categories, indexes = np.unique(cat_data, return_inverse=True)\n # Transform categories to a vector of 0:n_categories\n new_categories = np.arange(int(types_dict[i]['dim']))\n cat_data = new_categories[indexes]\n # Create thermometer encoding for the categories\n aux = np.zeros([np.shape(data)[0], 1 + len(new_categories)])\n aux[:, 0] = 1\n aux[np.arange(np.shape(data)[0]), 1 + cat_data] = -1\n aux = np.cumsum(aux, 1)\n data_complete.append(aux[:, :-1])\n\n elif types_dict[i]['type'] == 'count':\n if np.min(data[:, i]) == 0:\n aux = data[:, i] + 1\n data_complete.append(np.transpose([aux]))\n else:\n data_complete.append(np.transpose([data[:, i]]))\n else:\n data_complete.append(np.transpose([data[:, i]]))\n\n data = np.concatenate(data_complete, 1)\n\n # Read Missing mask from csv (contains positions of missing values)\n n_samples = np.shape(data)[0]\n n_variables = len(types_dict)\n miss_mask = np.ones([np.shape(data)[0], n_variables])\n # If there is no mask, assume all data is observed\n if os.path.isfile(miss_file):\n with open(miss_file, 'r') as f:\n missing_positions = [[int(x) for x in rec] for rec in csv.reader(f, delimiter=',')]\n missing_positions = np.array(missing_positions)\n miss_mask[missing_positions[:, 0] - 1, missing_positions[:, 1] - 1] = 0 # Indexes in the csv start at 1\n\n return data, types_dict, miss_mask, true_miss_mask, n_samples\n\n\ndef next_batch(data, labels, types_dict, miss_mask, batch_size, index_batch):\n # Create minibath\n batch_xs = data[index_batch * batch_size:(index_batch + 1) * batch_size, :]\n\n # Slipt variables of the batches\n data_list = []\n initial_index = 0\n for d in types_dict:\n dim = int(d['dim'])\n data_list.append(batch_xs[:, initial_index:initial_index + dim])\n initial_index += dim\n\n data_tensor = torch.cat(data_list, dim=1)\n\n # Missing data\n miss_list = miss_mask[index_batch * batch_size:(index_batch + 1) * batch_size, :]\n\n # Labels\n labels_batch = None\n if labels is not None:\n labels_batch = labels[index_batch * batch_size:(index_batch + 1) * batch_size]\n\n return data_tensor, labels_batch, miss_list\n\n\ndef samples_concatenation(samples):\n for i, batch in enumerate(samples):\n if i == 0:\n samples_x = np.concatenate(batch['x'], 1)\n samples_y = batch['y']\n samples_z = batch['z']\n samples_s = batch['s']\n else:\n samples_x = np.concatenate([samples_x, np.concatenate(batch['x'], 1)], 0)\n samples_y = np.concatenate([samples_y, batch['y']], 0)\n samples_z = np.concatenate([samples_z, batch['z']], 0)\n samples_s = np.concatenate([samples_s, batch['s']], 0)\n\n return samples_s, samples_z, samples_y, samples_x\n\n\ndef discrete_variables_transformation(data, types_dict):\n ind_ini = 0\n output = []\n for d in range(len(types_dict)):\n ind_end = ind_ini + int(types_dict[d]['dim'])\n if types_dict[d]['type'] == 'cat':\n output.append(np.reshape(np.argmax(data[:, ind_ini:ind_end], 1), [-1, 1]))\n elif types_dict[d]['type'] == 'ordinal':\n output.append(np.reshape(np.sum(data[:, ind_ini:ind_end], 1) - 1, [-1, 1]))\n else:\n output.append(data[:, ind_ini:ind_end])\n ind_ini = ind_end\n\n return np.concatenate(output, 1)\n\n\n# Several baselines\ndef mean_imputation(train_data, miss_mask, types_dict):\n ind_ini = 0\n est_data = []\n for dd in range(len(types_dict)):\n # Imputation for cat and ordinal is done using the mode of the data\n if types_dict[dd]['type'] == 'cat' or types_dict[dd]['type'] == 'ordinal':\n ind_end = ind_ini + 1\n # The imputation is based on whatever is observed\n miss_pattern = (miss_mask[:, dd] == 1)\n values, counts = np.unique(train_data[miss_pattern, ind_ini:ind_end], return_counts=True)\n data_mode = np.argmax(counts)\n data_imputed = train_data[:, ind_ini:ind_end] * miss_mask[:, ind_ini:ind_end] + data_mode * (\n 1.0 - miss_mask[:, ind_ini:ind_end])\n\n # Imputation for the rest of the variables is done with the mean of the data\n else:\n ind_end = ind_ini + int(types_dict[dd]['dim'])\n miss_pattern = (miss_mask[:, dd] == 1)\n # The imputation is based on whatever is observed\n data_mean = np.mean(train_data[miss_pattern, ind_ini:ind_end], 0)\n data_imputed = train_data[:, ind_ini:ind_end] * miss_mask[:, ind_ini:ind_end] + data_mean * (\n 1.0 - miss_mask[:, ind_ini:ind_end])\n\n est_data.append(data_imputed)\n ind_ini = ind_end\n\n return np.concatenate(est_data, 1)\n\n\ndef p_distribution_params_concatenation(params, types_dict, z_dim, s_dim):\n keys = params[0].keys()\n out_dict = {key: [] for key in keys}\n\n for i, batch in enumerate(params):\n\n for d, k in enumerate(keys):\n\n if k == 'z' or k == 'y':\n if i == 0:\n out_dict[k] = batch[k]\n else:\n out_dict[k] = np.concatenate([out_dict[k], batch[k]], 1)\n\n elif k == 'x':\n if i == 0:\n out_dict[k] = batch[k]\n else:\n for v in range(len(types_dict)):\n if types_dict[v]['type'] == 'pos' or types_dict[v]['type'] == 'real':\n out_dict[k][v] = np.concatenate([out_dict[k][v], batch[k][v]], 1)\n else:\n out_dict[k][v] = np.concatenate([out_dict[k][v], batch[k][v]], 0)\n\n return out_dict\n\n\ndef q_distribution_params_concatenation(params, z_dim, s_dim):\n keys = params[0].keys()\n out_dict = {key: [] for key in keys}\n\n for i, batch in enumerate(params):\n for d, k in enumerate(keys):\n out_dict[k].append(batch[k])\n\n out_dict['z'] = np.concatenate(out_dict['z'], 1)\n if 's' in out_dict:\n out_dict['s'] = np.concatenate(out_dict['s'], 0)\n\n return out_dict\n\n\ndef statistics(loglik_params, types_dict):\n loglik_mean = []\n loglik_mode = []\n\n for d, attrib in enumerate(loglik_params):\n if types_dict[d]['type'] == 'real':\n # Normal distribution (mean, sigma)\n loglik_mean.append(attrib[0])\n loglik_mode.append(attrib[0])\n # Only for log-normal\n elif types_dict[d]['type'] == 'pos':\n # Log-normal distribution (mean, sigma)\n loglik_mean.append(np.maximum(np.exp(attrib[0] + 0.5 * attrib[1]) - 1.0, 0.0))\n loglik_mode.append(np.maximum(np.exp(attrib[0] - attrib[1]) - 1.0, 0.0))\n elif types_dict[d]['type'] == 'count':\n # Poisson distribution (lambda)\n loglik_mean.append(attrib)\n loglik_mode.append(np.floor(attrib))\n\n else:\n # Categorical and ordinal (mode imputation for both)\n loglik_mean.append(np.reshape(np.argmax(attrib, 1), [-1, 1]))\n loglik_mode.append(np.reshape(np.argmax(attrib, 1), [-1, 1]))\n\n return np.transpose(np.squeeze(loglik_mean)), np.transpose(np.squeeze(loglik_mode))\n\n\ndef error_computation(x_train, x_hat, types_dict, miss_mask):\n error_observed = []\n error_missing = []\n ind_ini = 0\n for dd in range(len(types_dict)):\n # Mean classification error\n if types_dict[dd]['type'] == 'cat':\n ind_end = ind_ini + 1\n error_observed.append(np.mean(\n x_train[miss_mask[:, dd] == 1, ind_ini:ind_end] != x_hat[miss_mask[:, dd] == 1, ind_ini:ind_end]))\n if np.sum(miss_mask[:, dd] == 0, 0) == 0:\n error_missing.append(0)\n else:\n error_missing.append(np.mean(\n x_train[miss_mask[:, dd] == 0, ind_ini:ind_end] != x_hat[miss_mask[:, dd] == 0, ind_ini:ind_end]))\n # Mean \"shift\" error\n elif types_dict[dd]['type'] == 'ordinal':\n ind_end = ind_ini + 1\n error_observed.append(np.mean(np.abs(\n x_train[miss_mask[:, dd] == 1, ind_ini:ind_end] - x_hat[miss_mask[:, dd] == 1, ind_ini:ind_end])) / int(\n types_dict[dd]['dim']))\n if np.sum(miss_mask[:, dd] == 0, 0) == 0:\n error_missing.append(0)\n else:\n error_missing.append(np.mean(np.abs(\n x_train[miss_mask[:, dd] == 0, ind_ini:ind_end] - x_hat[miss_mask[:, dd] == 0,\n ind_ini:ind_end])) / int(types_dict[dd]['dim']))\n # Normalized root mean square error\n else:\n ind_end = ind_ini + int(types_dict[dd]['dim'])\n norm_term = np.max(x_train[:, dd]) - np.min(x_train[:, dd])\n error_observed.append(np.sqrt(mean_squared_error(x_train[miss_mask[:, dd] == 1, ind_ini:ind_end],\n x_hat[miss_mask[:, dd] == 1,\n ind_ini:ind_end])) / norm_term)\n\n if np.sum(miss_mask[:, dd] == 0, 0) == 0:\n error_missing.append(0)\n else:\n error_missing.append(np.sqrt(mean_squared_error(x_train[miss_mask[:, dd] == 0, ind_ini:ind_end],\n x_hat[miss_mask[:, dd] == 0,\n ind_ini:ind_end])) / norm_term)\n\n ind_ini = ind_end\n\n return error_observed, error_missing\n" ]
[ [ "numpy.sum", "numpy.cumsum", "numpy.transpose", "numpy.squeeze", "sklearn.metrics.mean_squared_error", "numpy.floor", "numpy.abs", "numpy.argmax", "numpy.exp", "numpy.max", "numpy.shape", "numpy.min", "numpy.isnan", "numpy.array", "numpy.concatenate", "torch.cat", "numpy.unique", "numpy.mean" ] ]
IAmSuyogJadhav/tensorlayer
[ "b9115e027f8a47f5b8c3c92ade30603560c5e987" ]
[ "example/tutorial_tfrecord.py" ]
[ "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"You will learn.\n\n1. How to save data into TFRecord format file.\n2. How to read data from TFRecord format file by using Queue and Thread.\n\nReference:\n-----------\nEnglish : https://indico.io/blog/tensorflow-data-inputs-part1-placeholders-protobufs-queues/\n https://www.tensorflow.org/versions/master/how_tos/reading_data/index.html\n https://www.tensorflow.org/versions/master/api_docs/python/io_ops.html#readers\nChinese : http://blog.csdn.net/u012759136/article/details/52232266\n https://github.com/ycszen/tf_lab/blob/master/reading_data/TensorFlow高效加载数据的方法.md\n\nMore\n------\n1. tutorial_tfrecord2.py\n2. tutorial_cifar10_tfrecord.py\n\n\"\"\"\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\nimport tensorlayer as tl\n\n## Save data ==================================================================\nclasses = ['/data/cat', '/data/dog'] # cat is 0, dog is 1\ncwd = os.getcwd()\nwriter = tf.python_io.TFRecordWriter(\"train.tfrecords\")\nfor index, name in enumerate(classes):\n class_path = cwd + name + \"/\"\n for img_name in os.listdir(class_path):\n img_path = class_path + img_name\n img = Image.open(img_path)\n img = img.resize((224, 224))\n ## Visualize the image as follow:\n # tl.visualize.frame(I=img, second=5, saveable=False, name='frame', fig_idx=12836)\n ## Converts a image to bytes\n img_raw = img.tobytes()\n ## Convert the bytes back to image as follow:\n # image = Image.frombytes('RGB', (224,224), img_raw)\n # tl.visualize.frame(I=image, second=1, saveable=False, name='frame', fig_idx=1236)\n ## Write the data into TF format\n # image : Feature + BytesList\n # label : Feature + Int64List or FloatList\n # sentence : FeatureList + Int64List , see Google's im2txt example\n example = tf.train.Example(features=tf.train.Features(feature={ # SequenceExample for seuqnce example\n \"label\": tf.train.Feature(int64_list=tf.train.Int64List(value=[index])),\n 'img_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),\n }))\n writer.write(example.SerializeToString()) # Serialize To String\nwriter.close()\n\n## Load Data Method 1: Simple read ============================================\n# read data one by one in order\nfor serialized_example in tf.python_io.tf_record_iterator(\"train.tfrecords\"):\n example = tf.train.Example() # SequenceExample for seuqnce example\n example.ParseFromString(serialized_example)\n img_raw = example.features.feature['img_raw'].bytes_list.value\n label = example.features.feature['label'].int64_list.value\n ## converts a image from bytes\n image = Image.frombytes('RGB', (224, 224), img_raw[0])\n tl.visualize.frame(np.asarray(image), second=0.5, saveable=False, name='frame', fig_idx=1283)\n print(label)\n\n\n## Read Data Method 2: Queue and Thread =======================================\n# use sess.run to get a batch of data\ndef read_and_decode(filename):\n # generate a queue with a given file name\n filename_queue = tf.train.string_input_producer([filename])\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue) # return the file and the name of file\n features = tf.parse_single_example(\n serialized_example, # see parse_single_sequence_example for sequence example\n features={\n 'label': tf.FixedLenFeature([], tf.int64),\n 'img_raw': tf.FixedLenFeature([], tf.string),\n })\n # You can do more image distortion here for training data\n img = tf.decode_raw(features['img_raw'], tf.uint8)\n img = tf.reshape(img, [224, 224, 3])\n # img = tf.cast(img, tf.float32) * (1. / 255) - 0.5\n label = tf.cast(features['label'], tf.int32)\n return img, label\n\n\nimg, label = read_and_decode(\"train.tfrecords\")\n\n## Use shuffle_batch or batch\n# see https://www.tensorflow.org/versions/master/api_docs/python/io_ops.html#shuffle_batch\nimg_batch, label_batch = tf.train.shuffle_batch([img, label], batch_size=4, capacity=2000, min_after_dequeue=1000, num_threads=16)\nprint(\"img_batch : %s\" % img_batch._shape)\nprint(\"label_batch : %s\" % label_batch._shape)\n# init = tf.initialize_all_variables()\nwith tf.Session() as sess:\n # sess.run(init)\n tl.layers.initialize_global_variables(sess)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n for i in range(3): # number of mini-batch (step)\n print(\"Step %d\" % i)\n val, l = sess.run([img_batch, label_batch])\n print(val.shape, l)\n tl.visualize.images2d(val, second=1, saveable=False, name='batch', dtype=None, fig_idx=2020121)\n\n coord.request_stop()\n coord.join(threads)\n sess.close()\n\n#\n" ]
[ [ "tensorflow.TFRecordReader", "tensorflow.reshape", "tensorflow.decode_raw", "tensorflow.python_io.tf_record_iterator", "tensorflow.train.string_input_producer", "numpy.asarray", "tensorflow.train.Int64List", "tensorflow.cast", "tensorflow.train.start_queue_runners", "tensorflow.FixedLenFeature", "tensorflow.Session", "tensorflow.train.shuffle_batch", "tensorflow.python_io.TFRecordWriter", "tensorflow.train.BytesList", "tensorflow.train.Example", "tensorflow.train.Coordinator" ] ]
sadielbartholomew/astropy
[ "ec864232350c5282a56af52a35945e9e43861556" ]
[ "astropy/io/votable/tree.py" ]
[ "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n# TODO: Test FITS parsing\n\n# STDLIB\nimport io\nimport re\nimport gzip\nimport base64\nimport codecs\nimport urllib.request\n\n# THIRD-PARTY\nimport numpy as np\nfrom numpy import ma\n\n# LOCAL\nfrom astropy.io import fits\nfrom astropy import __version__ as astropy_version\nfrom astropy.utils.collections import HomogeneousList\nfrom astropy.utils.xml.writer import XMLWriter\n\nfrom . import converters\nfrom .exceptions import (warn_or_raise, vo_warn, vo_raise, vo_reraise,\n warn_unknown_attrs, W06, W07, W08, W09, W10, W11, W12,\n W13, W15, W17, W18, W19, W20, W21, W22, W26, W27, W28,\n W29, W32, W33, W35, W36, W37, W38, W40, W41, W42, W43,\n W44, W45, W50, W52, W53, E06, E08, E09, E10, E11, E12,\n E13, E15, E16, E17, E18, E19, E20, E21)\nfrom . import ucd as ucd_mod\nfrom . import util\nfrom . import xmlutil\n\ntry:\n from . import tablewriter\n _has_c_tabledata_writer = True\nexcept ImportError:\n _has_c_tabledata_writer = False\n\n\n__all__ = [\n 'Link', 'Info', 'Values', 'Field', 'Param', 'CooSys',\n 'FieldRef', 'ParamRef', 'Group', 'Table', 'Resource',\n 'VOTableFile'\n ]\n\n\n# The default number of rows to read in each chunk before converting\n# to an array.\nDEFAULT_CHUNK_SIZE = 256\nRESIZE_AMOUNT = 1.5\n\n######################################################################\n# FACTORY FUNCTIONS\n\n\ndef _resize(masked, new_size):\n \"\"\"\n Masked arrays can not be resized inplace, and `np.resize` and\n `ma.resize` are both incompatible with structured arrays.\n Therefore, we do all this.\n \"\"\"\n new_array = ma.zeros((new_size,), dtype=masked.dtype)\n length = min(len(masked), new_size)\n new_array[:length] = masked[:length]\n\n return new_array\n\n\ndef _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):\n \"\"\"\n Creates a function useful for looking up an element by a given\n attribute.\n\n Parameters\n ----------\n attr : str\n The attribute name\n\n unique : bool\n Should be `True` if the attribute is unique and therefore this\n should return only one value. Otherwise, returns a list of\n values.\n\n iterator : generator\n A generator that iterates over some arbitrary set of elements\n\n element_name : str\n The XML element name of the elements being iterated over (used\n for error messages only).\n\n doc : str\n A docstring to apply to the generated function.\n\n Returns\n -------\n factory : function\n A function that looks up an element by the given attribute.\n \"\"\"\n\n def lookup_by_attr(self, ref, before=None):\n \"\"\"\n Given a string *ref*, finds the first element in the iterator\n where the given attribute == *ref*. If *before* is provided,\n will stop searching at the object *before*. This is\n important, since \"forward references\" are not allowed in the\n VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if getattr(element, attr, None) == ref:\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if getattr(element, attr, None) == ref:\n yield element\n\n def lookup_by_attr_unique(self, ref, before=None):\n for element in lookup_by_attr(self, ref, before=before):\n return element\n raise KeyError(\n \"No {} with {} '{}' found before the referencing {}\".format(\n element_name, attr, ref, element_name))\n\n if unique:\n lookup_by_attr_unique.__doc__ = doc\n return lookup_by_attr_unique\n else:\n lookup_by_attr.__doc__ = doc\n return lookup_by_attr\n\n\ndef _lookup_by_id_or_name_factory(iterator, element_name, doc):\n \"\"\"\n Like `_lookup_by_attr_factory`, but looks in both the \"ID\" and\n \"name\" attributes.\n \"\"\"\n\n def lookup_by_id_or_name(self, ref, before=None):\n \"\"\"\n Given an key *ref*, finds the first element in the iterator\n with the attribute ID == *ref* or name == *ref*. If *before*\n is provided, will stop searching at the object *before*. This\n is important, since \"forward references\" are not allowed in\n the VOTABLE format.\n \"\"\"\n for element in getattr(self, iterator)():\n if element is before:\n if ref in (element.ID, element.name):\n vo_raise(\n f\"{element_name} references itself\",\n element._config, element._pos, KeyError)\n break\n if ref in (element.ID, element.name):\n return element\n raise KeyError(\n \"No {} with ID or name '{}' found before the referencing {}\".format(\n element_name, ref, element_name))\n\n lookup_by_id_or_name.__doc__ = doc\n return lookup_by_id_or_name\n\n\ndef _get_default_unit_format(config):\n \"\"\"\n Get the default unit format as specified in the VOTable spec.\n \"\"\"\n # In the future, this should take into account the VOTable\n # version.\n return 'cds'\n\n\ndef _get_unit_format(config):\n \"\"\"\n Get the unit format based on the configuration.\n \"\"\"\n if config.get('unit_format') is None:\n format = _get_default_unit_format(config)\n else:\n format = config['unit_format']\n return format\n\n\n######################################################################\n# ATTRIBUTE CHECKERS\ndef check_astroyear(year, field, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *year* is not a valid astronomical year as defined by the VOTABLE\n standard.\n\n Parameters\n ----------\n year : str\n An astronomical year string\n\n field : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if (year is not None and\n re.match(r\"^[JB]?[0-9]+([.][0-9]*)?$\", year) is None):\n warn_or_raise(W07, W07, (field, year), config, pos)\n return False\n return True\n\n\ndef check_string(string, attr_name, config=None, pos=None):\n \"\"\"\n Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if\n *string* is not a string or Unicode string.\n\n Parameters\n ----------\n string : str\n An astronomical year string\n\n attr_name : str\n The name of the field this year was found in (used for error\n message)\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if string is not None and not isinstance(string, str):\n warn_or_raise(W08, W08, attr_name, config, pos)\n return False\n return True\n\n\ndef resolve_id(ID, id, config=None, pos=None):\n if ID is None and id is not None:\n warn_or_raise(W09, W09, (), config, pos)\n return id\n return ID\n\n\ndef check_ucd(ucd, config=None, pos=None):\n \"\"\"\n Warns or raises a\n `~astropy.io.votable.exceptions.VOTableSpecError` if *ucd* is not\n a valid `unified content descriptor`_ string as defined by the\n VOTABLE standard.\n\n Parameters\n ----------\n ucd : str\n A UCD string.\n\n config, pos : optional\n Information about the source of the value\n \"\"\"\n if config is None:\n config = {}\n if config.get('version_1_1_or_later'):\n try:\n ucd_mod.parse_ucd(\n ucd,\n check_controlled_vocabulary=config.get(\n 'version_1_2_or_later', False),\n has_colon=config.get('version_1_2_or_later', False))\n except ValueError as e:\n # This weird construction is for Python 3 compatibility\n if config.get('verify', 'ignore') == 'exception':\n vo_raise(W06, (ucd, str(e)), config, pos)\n elif config.get('verify', 'ignore') == 'warn':\n vo_warn(W06, (ucd, str(e)), config, pos)\n return False\n else:\n return False\n return True\n\n\n######################################################################\n# PROPERTY MIXINS\nclass _IDProperty:\n @property\n def ID(self):\n \"\"\"\n The XML ID_ of the element. May be `None` or a string\n conforming to XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @ID.deleter\n def ID(self):\n self._ID = None\n\n\nclass _NameProperty:\n @property\n def name(self):\n \"\"\"An optional name for the element.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @name.deleter\n def name(self):\n self._name = None\n\n\nclass _XtypeProperty:\n @property\n def xtype(self):\n \"\"\"Extended data type information.\"\"\"\n return self._xtype\n\n @xtype.setter\n def xtype(self, xtype):\n if xtype is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(\n W28, W28, ('xtype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(xtype, 'xtype', self._config, self._pos)\n self._xtype = xtype\n\n @xtype.deleter\n def xtype(self):\n self._xtype = None\n\n\nclass _UtypeProperty:\n _utype_in_v1_2 = False\n\n @property\n def utype(self):\n \"\"\"The usage-specific or `unique type`_ of the element.\"\"\"\n return self._utype\n\n @utype.setter\n def utype(self, utype):\n if (self._utype_in_v1_2 and\n utype is not None and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('utype', self._element_name, '1.2'),\n self._config, self._pos)\n check_string(utype, 'utype', self._config, self._pos)\n self._utype = utype\n\n @utype.deleter\n def utype(self):\n self._utype = None\n\n\nclass _UcdProperty:\n _ucd_in_v1_2 = False\n\n @property\n def ucd(self):\n \"\"\"The `unified content descriptor`_ for the element.\"\"\"\n return self._ucd\n\n @ucd.setter\n def ucd(self, ucd):\n if ucd is not None and ucd.strip() == '':\n ucd = None\n if ucd is not None:\n if (self._ucd_in_v1_2 and\n not self._config.get('version_1_2_or_later')):\n warn_or_raise(\n W28, W28, ('ucd', self._element_name, '1.2'),\n self._config, self._pos)\n check_ucd(ucd, self._config, self._pos)\n self._ucd = ucd\n\n @ucd.deleter\n def ucd(self):\n self._ucd = None\n\n\nclass _DescriptionProperty:\n @property\n def description(self):\n \"\"\"\n An optional string describing the element. Corresponds to the\n DESCRIPTION_ element.\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n self._description = description\n\n @description.deleter\n def description(self):\n self._description = None\n\n\n######################################################################\n# ELEMENT CLASSES\nclass Element:\n \"\"\"\n A base class for all classes that represent XML elements in the\n VOTABLE file.\n \"\"\"\n _element_name = ''\n _attr_list = []\n\n def _add_unknown_tag(self, iterator, tag, data, config, pos):\n warn_or_raise(W10, W10, tag, config, pos)\n\n def _ignore_add(self, iterator, tag, data, config, pos):\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def _add_definitions(self, iterator, tag, data, config, pos):\n if config.get('version_1_1_or_later'):\n warn_or_raise(W22, W22, (), config, pos)\n warn_unknown_attrs(tag, data.keys(), config, pos)\n\n def parse(self, iterator, config):\n \"\"\"\n For internal use. Parse the XML content of the children of the\n element.\n\n Parameters\n ----------\n iterator : xml iterator\n An iterator over XML elements as returned by\n `~astropy.utils.xml.iterparser.get_xml_iterator`.\n\n config : dict\n The configuration dictionary that affects how certain\n elements are read.\n\n Returns\n -------\n self : Element\n Returns self as a convenience.\n \"\"\"\n raise NotImplementedError()\n\n def to_xml(self, w, **kwargs):\n \"\"\"\n For internal use. Output the element to XML.\n\n Parameters\n ----------\n w : astropy.utils.xml.writer.XMLWriter object\n An XML writer to write to.\n\n kwargs : dict\n Any configuration parameters to control the output.\n \"\"\"\n raise NotImplementedError()\n\n\nclass SimpleElement(Element):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n Element.__init__(self)\n\n def __repr__(self):\n buff = io.StringIO()\n SimpleElement.to_xml(self, XMLWriter(buff))\n return buff.getvalue().strip()\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n\n\nclass SimpleElementWithContent(SimpleElement):\n \"\"\"\n A base class for simple elements, such as FIELD, PARAM and INFO\n that don't require any special parsing or outputting machinery.\n \"\"\"\n\n def __init__(self):\n SimpleElement.__init__(self)\n\n self._content = None\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start and tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n elif tag == self._element_name:\n if data:\n self.content = data\n break\n\n return self\n\n def to_xml(self, w, **kwargs):\n w.element(self._element_name, self._content,\n attrib=w.object_attrs(self, self._attr_list))\n\n @property\n def content(self):\n \"\"\"The content of the element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n\nclass Link(SimpleElement, _IDProperty):\n \"\"\"\n LINK_ elements: used to reference external documents and servers through a URI.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'content_role', 'content_type', 'title', 'value',\n 'href', 'action']\n _element_name = 'LINK'\n\n def __init__(self, ID=None, title=None, value=None, href=None, action=None,\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n content_role = kwargs.get('content-role') or kwargs.get('content_role')\n content_type = kwargs.get('content-type') or kwargs.get('content_type')\n\n if 'gref' in kwargs:\n warn_or_raise(W11, W11, (), config, pos)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.content_role = content_role\n self.content_type = content_type\n self.title = title\n self.value = value\n self.href = href\n self.action = action\n\n warn_unknown_attrs(\n 'LINK', kwargs.keys(), config, pos,\n ['content-role', 'content_role', 'content-type', 'content_type',\n 'gref'])\n\n @property\n def content_role(self):\n \"\"\"\n Defines the MIME role of the referenced object. Must be one of:\n\n None, 'query', 'hints', 'doc', 'location' or 'type'\n \"\"\"\n return self._content_role\n\n @content_role.setter\n def content_role(self, content_role):\n if ((content_role == 'type' and\n not self._config['version_1_3_or_later']) or\n content_role not in\n (None, 'query', 'hints', 'doc', 'location')):\n vo_warn(W45, (content_role,), self._config, self._pos)\n self._content_role = content_role\n\n @content_role.deleter\n def content_role(self):\n self._content_role = None\n\n @property\n def content_type(self):\n \"\"\"Defines the MIME content type of the referenced object.\"\"\"\n return self._content_type\n\n @content_type.setter\n def content_type(self, content_type):\n xmlutil.check_mime_content_type(content_type, self._config, self._pos)\n self._content_type = content_type\n\n @content_type.deleter\n def content_type(self):\n self._content_type = None\n\n @property\n def href(self):\n \"\"\"\n A URI to an arbitrary protocol. The vo package only supports\n http and anonymous ftp.\n \"\"\"\n return self._href\n\n @href.setter\n def href(self, href):\n xmlutil.check_anyuri(href, self._config, self._pos)\n self._href = href\n\n @href.deleter\n def href(self):\n self._href = None\n\n def to_table_column(self, column):\n meta = {}\n for key in self._attr_list:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n column.meta.setdefault('links', [])\n column.meta['links'].append(meta)\n\n @classmethod\n def from_table_column(cls, d):\n return cls(**d)\n\n\nclass Info(SimpleElementWithContent, _IDProperty, _XtypeProperty,\n _UtypeProperty):\n \"\"\"\n INFO_ elements: arbitrary key-value pairs for extensions to the standard.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _element_name = 'INFO'\n _attr_list_11 = ['ID', 'name', 'value']\n _attr_list_12 = _attr_list_11 + ['xtype', 'ref', 'unit', 'ucd', 'utype']\n _utype_in_v1_2 = True\n\n def __init__(self, ID=None, name=None, value=None, id=None, xtype=None,\n ref=None, unit=None, ucd=None, utype=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElementWithContent.__init__(self)\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n self.value = value\n self.xtype = xtype\n self.ref = ref\n self.unit = unit\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs('INFO', ['xtype'], config, pos)\n if ref is not None:\n warn_unknown_attrs('INFO', ['ref'], config, pos)\n if unit is not None:\n warn_unknown_attrs('INFO', ['unit'], config, pos)\n if ucd is not None:\n warn_unknown_attrs('INFO', ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs('INFO', ['utype'], config, pos)\n\n warn_unknown_attrs('INFO', extra.keys(), config, pos)\n\n @property\n def name(self):\n \"\"\"[*required*] The key of the key-value pair.\"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n if name is None:\n warn_or_raise(W35, W35, ('name'), self._config, self._pos)\n xmlutil.check_token(name, 'name', self._config, self._pos)\n self._name = name\n\n @property\n def value(self):\n \"\"\"\n [*required*] The value of the key-value pair. (Always stored\n as a string or unicode string).\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n warn_or_raise(W35, W35, ('value'), self._config, self._pos)\n check_string(value, 'value', self._config, self._pos)\n self._value = value\n\n @property\n def content(self):\n \"\"\"The content inside the INFO element.\"\"\"\n return self._content\n\n @content.setter\n def content(self, content):\n check_string(content, 'content', self._config, self._pos)\n self._content = content\n\n @content.deleter\n def content(self):\n self._content = None\n\n @property\n def ref(self):\n \"\"\"\n Refer to another INFO_ element by ID_, defined previously in\n the document.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n if ref is not None and not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('ref', 'INFO', '1.2'),\n self._config, self._pos)\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n # TODO: actually apply the reference\n # if ref is not None:\n # try:\n # other = self._votable.get_values_by_id(ref, before=self)\n # except KeyError:\n # vo_raise(\n # \"VALUES ref='%s', which has not already been defined.\" %\n # self.ref, self._config, self._pos, KeyError)\n # self.null = other.null\n # self.type = other.type\n # self.min = other.min\n # self.min_inclusive = other.min_inclusive\n # self.max = other.max\n # self.max_inclusive = other.max_inclusive\n # self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the INFO_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n if not self._config.get('version_1_2_or_later'):\n warn_or_raise(W28, W28, ('unit', 'INFO', '1.2'),\n self._config, self._pos)\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n w.element(self._element_name, self._content,\n attrib=attrib)\n\n\nclass Values(Element, _IDProperty):\n \"\"\"\n VALUES_ element: used within FIELD_ and PARAM_ elements to define the domain of values.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, field, ID=None, null=None, ref=None,\n type=\"legal\", id=None, config=None, pos=None, **extras):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n\n self._votable = votable\n self._field = field\n self.ID = resolve_id(ID, id, config, pos)\n self.null = null\n self._ref = ref\n self.type = type\n\n self.min = None\n self.max = None\n self.min_inclusive = True\n self.max_inclusive = True\n self._options = []\n\n warn_unknown_attrs('VALUES', extras.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n self.to_xml(XMLWriter(buff))\n return buff.getvalue().strip()\n\n @property\n def null(self):\n \"\"\"\n For integral datatypes, *null* is used to define the value\n used for missing values.\n \"\"\"\n return self._null\n\n @null.setter\n def null(self, null):\n if null is not None and isinstance(null, str):\n try:\n null_val = self._field.converter.parse_scalar(\n null, self._config, self._pos)[0]\n except Exception:\n warn_or_raise(W36, W36, null, self._config, self._pos)\n null_val = self._field.converter.parse_scalar(\n '0', self._config, self._pos)[0]\n else:\n null_val = null\n self._null = null_val\n\n @null.deleter\n def null(self):\n self._null = None\n\n @property\n def type(self):\n \"\"\"\n [*required*] Defines the applicability of the domain defined\n by this VALUES_ element. Must be one of the following\n strings:\n\n - 'legal': The domain of this column applies in general to\n this datatype. (default)\n\n - 'actual': The domain of this column applies only to the\n data enclosed in the parent table.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('legal', 'actual'):\n vo_raise(E08, type, self._config, self._pos)\n self._type = type\n\n @property\n def ref(self):\n \"\"\"\n Refer to another VALUES_ element by ID_, defined previously in\n the document, for MIN/MAX/OPTION information.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n other = self._votable.get_values_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(W43, W43, ('VALUES', self.ref), self._config,\n self._pos)\n ref = None\n else:\n self.null = other.null\n self.type = other.type\n self.min = other.min\n self.min_inclusive = other.min_inclusive\n self.max = other.max\n self.max_inclusive = other.max_inclusive\n self._options[:] = other.options\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def min(self):\n \"\"\"\n The minimum value of the domain. See :attr:`min_inclusive`.\n \"\"\"\n return self._min\n\n @min.setter\n def min(self, min):\n if hasattr(self._field, 'converter') and min is not None:\n self._min = self._field.converter.parse(min)[0]\n else:\n self._min = min\n\n @min.deleter\n def min(self):\n self._min = None\n\n @property\n def min_inclusive(self):\n \"\"\"When `True`, the domain includes the minimum value.\"\"\"\n return self._min_inclusive\n\n @min_inclusive.setter\n def min_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._min_inclusive = True\n elif inclusive == 'no':\n self._min_inclusive = False\n else:\n self._min_inclusive = bool(inclusive)\n\n @min_inclusive.deleter\n def min_inclusive(self):\n self._min_inclusive = True\n\n @property\n def max(self):\n \"\"\"\n The maximum value of the domain. See :attr:`max_inclusive`.\n \"\"\"\n return self._max\n\n @max.setter\n def max(self, max):\n if hasattr(self._field, 'converter') and max is not None:\n self._max = self._field.converter.parse(max)[0]\n else:\n self._max = max\n\n @max.deleter\n def max(self):\n self._max = None\n\n @property\n def max_inclusive(self):\n \"\"\"When `True`, the domain includes the maximum value.\"\"\"\n return self._max_inclusive\n\n @max_inclusive.setter\n def max_inclusive(self, inclusive):\n if inclusive == 'yes':\n self._max_inclusive = True\n elif inclusive == 'no':\n self._max_inclusive = False\n else:\n self._max_inclusive = bool(inclusive)\n\n @max_inclusive.deleter\n def max_inclusive(self):\n self._max_inclusive = True\n\n @property\n def options(self):\n \"\"\"\n A list of string key-value tuples defining other OPTION\n elements for the domain. All options are ignored -- they are\n stored for round-tripping purposes only.\n \"\"\"\n return self._options\n\n def parse(self, iterator, config):\n if self.ref is not None:\n for start, tag, data, pos in iterator:\n if start:\n warn_or_raise(W44, W44, tag, config, pos)\n else:\n if tag != 'VALUES':\n warn_or_raise(W44, W44, tag, config, pos)\n break\n else:\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'MIN':\n if 'value' not in data:\n vo_raise(E09, 'MIN', config, pos)\n self.min = data['value']\n self.min_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MIN', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'MAX':\n if 'value' not in data:\n vo_raise(E09, 'MAX', config, pos)\n self.max = data['value']\n self.max_inclusive = data.get('inclusive', 'yes')\n warn_unknown_attrs(\n 'MAX', data.keys(), config, pos,\n ['value', 'inclusive'])\n elif tag == 'OPTION':\n if 'value' not in data:\n vo_raise(E09, 'OPTION', config, pos)\n xmlutil.check_token(\n data.get('name'), 'name', config, pos)\n self.options.append(\n (data.get('name'), data.get('value')))\n warn_unknown_attrs(\n 'OPTION', data.keys(), config, pos,\n ['value', 'name'])\n elif tag == 'VALUES':\n break\n\n return self\n\n def is_defaults(self):\n \"\"\"\n Are the settings on this ``VALUE`` element all the same as the\n XML defaults?\n \"\"\"\n # If there's nothing meaningful or non-default to write,\n # don't write anything.\n return (self.ref is None and self.null is None and self.ID is None and\n self.max is None and self.min is None and self.options == [])\n\n def to_xml(self, w, **kwargs):\n def yes_no(value):\n if value:\n return 'yes'\n return 'no'\n\n if self.is_defaults():\n return\n\n if self.ref is not None:\n w.element('VALUES', attrib=w.object_attrs(self, ['ref']))\n else:\n with w.tag('VALUES',\n attrib=w.object_attrs(\n self, ['ID', 'null', 'ref'])):\n if self.min is not None:\n w.element(\n 'MIN',\n value=self._field.converter.output(self.min, False),\n inclusive=yes_no(self.min_inclusive))\n if self.max is not None:\n w.element(\n 'MAX',\n value=self._field.converter.output(self.max, False),\n inclusive=yes_no(self.max_inclusive))\n for name, value in self.options:\n w.element(\n 'OPTION',\n name=name,\n value=value)\n\n def to_table_column(self, column):\n # Have the ref filled in here\n meta = {}\n for key in ['ID', 'null']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n if self.min is not None:\n meta['min'] = {\n 'value': self.min,\n 'inclusive': self.min_inclusive}\n if self.max is not None:\n meta['max'] = {\n 'value': self.max,\n 'inclusive': self.max_inclusive}\n if len(self.options):\n meta['options'] = dict(self.options)\n\n column.meta['values'] = meta\n\n def from_table_column(self, column):\n if column.info.meta is None or 'values' not in column.info.meta:\n return\n\n meta = column.info.meta['values']\n for key in ['ID', 'null']:\n val = meta.get(key, None)\n if val is not None:\n setattr(self, key, val)\n if 'min' in meta:\n self.min = meta['min']['value']\n self.min_inclusive = meta['min']['inclusive']\n if 'max' in meta:\n self.max = meta['max']['value']\n self.max_inclusive = meta['max']['inclusive']\n if 'options' in meta:\n self._options = list(meta['options'].items())\n\n\nclass Field(SimpleElement, _IDProperty, _NameProperty, _XtypeProperty,\n _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELD_ element: describes the datatype of a particular column of data.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n If *ID* is provided, it is used for the column name in the\n resulting recarray of the table. If no *ID* is provided, *name*\n is used instead. If neither is provided, an exception will be\n raised.\n \"\"\"\n _attr_list_11 = ['ID', 'name', 'datatype', 'arraysize', 'ucd',\n 'unit', 'width', 'precision', 'utype', 'ref']\n _attr_list_12 = _attr_list_11 + ['xtype']\n _element_name = 'FIELD'\n\n def __init__(self, votable, ID=None, name=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, ref=None, type=None, id=None,\n xtype=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if xtype is not None:\n warn_unknown_attrs(self._element_name, ['xtype'], config, pos)\n\n # TODO: REMOVE ME ----------------------------------------\n # This is a terrible hack to support Simple Image Access\n # Protocol results from archive.noao.edu. It creates a field\n # for the coordinate projection type of type \"double\", which\n # actually contains character data. We have to hack the field\n # to store character data, or we can't read it in. A warning\n # will be raised when this happens.\n if (config.get('verify', 'ignore') != 'exception' and name == 'cprojection' and\n ID == 'cprojection' and ucd == 'VOX:WCS_CoordProjection' and\n datatype == 'double'):\n datatype = 'char'\n arraysize = '3'\n vo_warn(W40, (), config, pos)\n # ----------------------------------------\n\n self.description = None\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos) or\n xmlutil.fix_id(name, config, pos))\n self.name = name\n if name is None:\n if (self._element_name == 'PARAM' and\n not config.get('version_1_1_or_later')):\n pass\n else:\n warn_or_raise(W15, W15, self._element_name, config, pos)\n self.name = self.ID\n\n if self._ID is None and name is None:\n vo_raise(W12, self._element_name, config, pos)\n\n datatype_mapping = {\n 'string': 'char',\n 'unicodeString': 'unicodeChar',\n 'int16': 'short',\n 'int32': 'int',\n 'int64': 'long',\n 'float32': 'float',\n 'float64': 'double',\n # The following appear in some Vizier tables\n 'unsignedInt': 'long',\n 'unsignedShort': 'int'\n }\n\n datatype_mapping.update(config.get('datatype_mapping', {}))\n\n if datatype in datatype_mapping:\n warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]),\n config, pos)\n datatype = datatype_mapping[datatype]\n\n self.ref = ref\n self.datatype = datatype\n self.arraysize = arraysize\n self.ucd = ucd\n self.unit = unit\n self.width = width\n self.precision = precision\n self.utype = utype\n self.type = type\n self._links = HomogeneousList(Link)\n self.title = self.name\n self.values = Values(self._votable, self)\n self.xtype = xtype\n\n self._setup(config, pos)\n\n warn_unknown_attrs(self._element_name, extra.keys(), config, pos)\n\n @classmethod\n def uniqify_names(cls, fields):\n \"\"\"\n Make sure that all names and titles in a list of fields are\n unique, by appending numbers if necessary.\n \"\"\"\n unique = {}\n for field in fields:\n i = 2\n new_id = field.ID\n while new_id in unique:\n new_id = field.ID + f\"_{i:d}\"\n i += 1\n if new_id != field.ID:\n vo_warn(W32, (field.ID, new_id), field._config, field._pos)\n field.ID = new_id\n unique[new_id] = field.ID\n\n for field in fields:\n i = 2\n if field.name is None:\n new_name = field.ID\n implicit = True\n else:\n new_name = field.name\n implicit = False\n if new_name != field.ID:\n while new_name in unique:\n new_name = field.name + f\" {i:d}\"\n i += 1\n\n if (not implicit and\n new_name != field.name):\n vo_warn(W33, (field.name, new_name), field._config, field._pos)\n field._unique_name = new_name\n unique[new_name] = field.name\n\n def _setup(self, config, pos):\n if self.values._ref is not None:\n self.values.ref = self.values._ref\n self.converter = converters.get_converter(self, config, pos)\n\n @property\n def datatype(self):\n \"\"\"\n [*required*] The datatype of the column. Valid values (as\n defined by the spec) are:\n\n 'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',\n 'char', 'unicodeChar', 'float', 'double', 'floatComplex', or\n 'doubleComplex'\n\n Many VOTABLE files in the wild use 'string' instead of 'char',\n so that is also a valid option, though 'string' will always be\n converted to 'char' when writing the file back out.\n \"\"\"\n return self._datatype\n\n @datatype.setter\n def datatype(self, datatype):\n if datatype is None:\n if self._config.get('version_1_1_or_later'):\n warn_or_raise(E10, E10, self._element_name, self._config,\n self._pos)\n datatype = 'char'\n if datatype not in converters.converter_mapping:\n vo_raise(E06, (datatype, self.ID), self._config, self._pos)\n self._datatype = datatype\n\n @property\n def precision(self):\n \"\"\"\n Along with :attr:`width`, defines the `numerical accuracy`_\n associated with the data. These values are used to limit the\n precision when writing floating point values back to the XML\n file. Otherwise, it is purely informational -- the Numpy\n recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._precision\n\n @precision.setter\n def precision(self, precision):\n if precision is not None and not re.match(r\"^[FE]?[0-9]+$\", precision):\n vo_raise(E11, precision, self._config, self._pos)\n self._precision = precision\n\n @precision.deleter\n def precision(self):\n self._precision = None\n\n @property\n def width(self):\n \"\"\"\n Along with :attr:`precision`, defines the `numerical\n accuracy`_ associated with the data. These values are used to\n limit the precision when writing floating point values back to\n the XML file. Otherwise, it is purely informational -- the\n Numpy recarray containing the data itself does not use this\n information.\n \"\"\"\n return self._width\n\n @width.setter\n def width(self, width):\n if width is not None:\n width = int(width)\n if width <= 0:\n vo_raise(E12, width, self._config, self._pos)\n self._width = width\n\n @width.deleter\n def width(self):\n self._width = None\n\n # ref on FIELD and PARAM behave differently than elsewhere -- here\n # they're just informational, such as to refer to a coordinate\n # system.\n @property\n def ref(self):\n \"\"\"\n On FIELD_ elements, ref is used only for informational\n purposes, for example to refer to a COOSYS_ element.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def unit(self):\n \"\"\"A string specifying the units_ for the FIELD_.\"\"\"\n return self._unit\n\n @unit.setter\n def unit(self, unit):\n if unit is None:\n self._unit = None\n return\n\n from astropy import units as u\n\n # First, parse the unit in the default way, so that we can\n # still emit a warning if the unit is not to spec.\n default_format = _get_default_unit_format(self._config)\n unit_obj = u.Unit(\n unit, format=default_format, parse_strict='silent')\n if isinstance(unit_obj, u.UnrecognizedUnit):\n warn_or_raise(W50, W50, (unit,),\n self._config, self._pos)\n\n format = _get_unit_format(self._config)\n if format != default_format:\n unit_obj = u.Unit(\n unit, format=format, parse_strict='silent')\n\n self._unit = unit_obj\n\n @unit.deleter\n def unit(self):\n self._unit = None\n\n @property\n def arraysize(self):\n \"\"\"\n Specifies the size of the multidimensional array if this\n FIELD_ contains more than a single value.\n\n See `multidimensional arrays`_.\n \"\"\"\n return self._arraysize\n\n @arraysize.setter\n def arraysize(self, arraysize):\n if (arraysize is not None and\n not re.match(r\"^([0-9]+x)*[0-9]*[*]?(s\\W)?$\", arraysize)):\n vo_raise(E13, arraysize, self._config, self._pos)\n self._arraysize = arraysize\n\n @arraysize.deleter\n def arraysize(self):\n self._arraysize = None\n\n @property\n def type(self):\n \"\"\"\n The type attribute on FIELD_ elements is reserved for future\n extensions.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n self._type = type\n\n @type.deleter\n def type(self):\n self._type = None\n\n @property\n def values(self):\n \"\"\"\n A :class:`Values` instance (or `None`) defining the domain\n of the column.\n \"\"\"\n return self._values\n\n @values.setter\n def values(self, values):\n assert values is None or isinstance(values, Values)\n self._values = values\n\n @values.deleter\n def values(self):\n self._values = None\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` instances used to reference more\n details about the meaning of the FIELD_. This is purely\n informational and is not used by the `astropy.io.votable`\n package.\n \"\"\"\n return self._links\n\n def parse(self, iterator, config):\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'VALUES':\n self.values.__init__(\n self._votable, self, config=config, pos=pos, **data)\n self.values.parse(iterator, config)\n elif tag == 'LINK':\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n elif tag == 'DESCRIPTION':\n warn_unknown_attrs(\n 'DESCRIPTION', data.keys(), config, pos)\n elif tag != self._element_name:\n self._add_unknown_tag(iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(\n W17, W17, self._element_name, config, pos)\n self.description = data or None\n elif tag == self._element_name:\n break\n\n if self.description is not None:\n self.title = \" \".join(x.strip() for x in\n self.description.splitlines())\n else:\n self.title = self.name\n\n self._setup(config, pos)\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrib = w.object_attrs(self, self._attr_list)\n if 'unit' in attrib:\n attrib['unit'] = self.unit.to_string('cds')\n with w.tag(self._element_name, attrib=attrib):\n if self.description is not None:\n w.element('DESCRIPTION', self.description, wrap=True)\n if not self.values.is_defaults():\n self.values.to_xml(w, **kwargs)\n for link in self.links:\n link.to_xml(w, **kwargs)\n\n def to_table_column(self, column):\n \"\"\"\n Sets the attributes of a given `astropy.table.Column` instance\n to match the information in this `Field`.\n \"\"\"\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = getattr(self, key, None)\n if val is not None:\n column.meta[key] = val\n if not self.values.is_defaults():\n self.values.to_table_column(column)\n for link in self.links:\n link.to_table_column(column)\n if self.description is not None:\n column.description = self.description\n if self.unit is not None:\n # TODO: Use units framework when it's available\n column.unit = self.unit\n if (isinstance(self.converter, converters.FloatingPoint) and\n self.converter.output_format != '{!r:>}'):\n column.format = self.converter.output_format\n\n @classmethod\n def from_table_column(cls, votable, column):\n \"\"\"\n Restores a `Field` instance from a given\n `astropy.table.Column` instance.\n \"\"\"\n kwargs = {}\n meta = column.info.meta\n if meta:\n for key in ['ucd', 'width', 'precision', 'utype', 'xtype']:\n val = meta.get(key, None)\n if val is not None:\n kwargs[key] = val\n # TODO: Use the unit framework when available\n if column.info.unit is not None:\n kwargs['unit'] = column.info.unit\n kwargs['name'] = column.info.name\n result = converters.table_column_to_votable_datatype(column)\n kwargs.update(result)\n\n field = cls(votable, **kwargs)\n\n if column.info.description is not None:\n field.description = column.info.description\n field.values.from_table_column(column)\n if meta and 'links' in meta:\n for link in meta['links']:\n field.links.append(Link.from_table_column(link))\n\n # TODO: Parse format into precision and width\n return field\n\n\nclass Param(Field):\n \"\"\"\n PARAM_ element: constant-valued columns in the data.\n\n :class:`Param` objects are a subclass of :class:`Field`, and have\n all of its methods and members. Additionally, it defines :attr:`value`.\n \"\"\"\n _attr_list_11 = Field._attr_list_11 + ['value']\n _attr_list_12 = Field._attr_list_12 + ['value']\n _element_name = 'PARAM'\n\n def __init__(self, votable, ID=None, name=None, value=None, datatype=None,\n arraysize=None, ucd=None, unit=None, width=None,\n precision=None, utype=None, type=None, id=None, config=None,\n pos=None, **extra):\n self._value = value\n Field.__init__(self, votable, ID=ID, name=name, datatype=datatype,\n arraysize=arraysize, ucd=ucd, unit=unit,\n precision=precision, utype=utype, type=type,\n id=id, config=config, pos=pos, **extra)\n\n @property\n def value(self):\n \"\"\"\n [*required*] The constant value of the parameter. Its type is\n determined by the :attr:`~Field.datatype` member.\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n if value is None:\n value = \"\"\n if isinstance(value, str):\n self._value = self.converter.parse(\n value, self._config, self._pos)[0]\n else:\n self._value = value\n\n def _setup(self, config, pos):\n Field._setup(self, config, pos)\n self.value = self._value\n\n def to_xml(self, w, **kwargs):\n tmp_value = self._value\n self._value = self.converter.output(tmp_value, False)\n # We must always have a value\n if self._value is None:\n self._value = \"\"\n Field.to_xml(self, w, **kwargs)\n self._value = tmp_value\n\n\nclass CooSys(SimpleElement):\n \"\"\"\n COOSYS_ element: defines a coordinate system.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n _attr_list = ['ID', 'equinox', 'epoch', 'system']\n _element_name = 'COOSYS'\n\n def __init__(self, ID=None, equinox=None, epoch=None, system=None, id=None,\n config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n # COOSYS was deprecated in 1.2 but then re-instated in 1.3\n if (config.get('version_1_2_or_later') and\n not config.get('version_1_3_or_later')):\n warn_or_raise(W27, W27, (), config, pos)\n\n SimpleElement.__init__(self)\n\n self.ID = resolve_id(ID, id, config, pos)\n self.equinox = equinox\n self.epoch = epoch\n self.system = system\n\n warn_unknown_attrs('COOSYS', extra.keys(), config, pos)\n\n @property\n def ID(self):\n \"\"\"\n [*required*] The XML ID of the COOSYS_ element, used for\n cross-referencing. May be `None` or a string conforming to\n XML ID_ syntax.\n \"\"\"\n return self._ID\n\n @ID.setter\n def ID(self, ID):\n if self._config.get('version_1_1_or_later'):\n if ID is None:\n vo_raise(E15, (), self._config, self._pos)\n xmlutil.check_id(ID, 'ID', self._config, self._pos)\n self._ID = ID\n\n @property\n def system(self):\n \"\"\"\n Specifies the type of coordinate system. Valid choices are:\n\n 'eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5', 'galactic',\n 'supergalactic', 'xy', 'barycentric', or 'geo_app'\n \"\"\"\n return self._system\n\n @system.setter\n def system(self, system):\n if system not in ('eq_FK4', 'eq_FK5', 'ICRS', 'ecl_FK4', 'ecl_FK5',\n 'galactic', 'supergalactic', 'xy', 'barycentric',\n 'geo_app'):\n warn_or_raise(E16, E16, system, self._config, self._pos)\n self._system = system\n\n @system.deleter\n def system(self):\n self._system = None\n\n @property\n def equinox(self):\n \"\"\"\n A parameter required to fix the equatorial or ecliptic systems\n (as e.g. \"J2000\" as the default \"eq_FK5\" or \"B1950\" as the\n default \"eq_FK4\").\n \"\"\"\n return self._equinox\n\n @equinox.setter\n def equinox(self, equinox):\n check_astroyear(equinox, 'equinox', self._config, self._pos)\n self._equinox = equinox\n\n @equinox.deleter\n def equinox(self):\n self._equinox = None\n\n @property\n def epoch(self):\n \"\"\"\n Specifies the epoch of the positions. It must be a string\n specifying an astronomical year.\n \"\"\"\n return self._epoch\n\n @epoch.setter\n def epoch(self, epoch):\n check_astroyear(epoch, 'epoch', self._config, self._pos)\n self._epoch = epoch\n\n @epoch.deleter\n def epoch(self):\n self._epoch = None\n\n\nclass FieldRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n FIELDref_ element: used inside of GROUP_ elements to refer to remote FIELD_ elements.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"FIELDref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None,\n **extra):\n \"\"\"\n *table* is the :class:`Table` object that this :class:`FieldRef`\n is a member of.\n\n *ref* is the ID to reference a :class:`Field` object defined\n elsewhere.\n \"\"\"\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n SimpleElement.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the FIELD_ that this FIELDref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Field` instance that this :class:`FieldRef`\n references.\n \"\"\"\n for field in self._table._votable.iter_fields_and_params():\n if isinstance(field, Field) and field.ID == self.ref:\n return field\n vo_raise(\n f\"No field named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass ParamRef(SimpleElement, _UtypeProperty, _UcdProperty):\n \"\"\"\n PARAMref_ element: used inside of GROUP_ elements to refer to remote PARAM_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n It contains the following publicly-accessible members:\n\n *ref*: An XML ID referring to a <PARAM> element.\n \"\"\"\n _attr_list_11 = ['ref']\n _attr_list_12 = _attr_list_11 + ['ucd', 'utype']\n _element_name = \"PARAMref\"\n _utype_in_v1_2 = True\n _ucd_in_v1_2 = True\n\n def __init__(self, table, ref, ucd=None, utype=None, config=None, pos=None):\n if config is None:\n config = {}\n\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n\n if config.get('version_1_2_or_later'):\n self._attr_list = self._attr_list_12\n else:\n self._attr_list = self._attr_list_11\n if ucd is not None:\n warn_unknown_attrs(self._element_name, ['ucd'], config, pos)\n if utype is not None:\n warn_unknown_attrs(self._element_name, ['utype'], config, pos)\n\n @property\n def ref(self):\n \"\"\"The ID_ of the PARAM_ that this PARAMref_ references.\"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n def get_ref(self):\n \"\"\"\n Lookup the :class:`Param` instance that this :class:``PARAMref``\n references.\n \"\"\"\n for param in self._table._votable.iter_fields_and_params():\n if isinstance(param, Param) and param.ID == self.ref:\n return param\n vo_raise(\n f\"No params named '{self.ref}'\",\n self._config, self._pos, KeyError)\n\n\nclass Group(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _UcdProperty, _DescriptionProperty):\n \"\"\"\n GROUP_ element: groups FIELD_ and PARAM_ elements.\n\n This information is currently ignored by the vo package---that is\n the columns in the recarray are always flat---but the grouping\n information is stored so that it can be written out again to the\n XML file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, table, ID=None, name=None, ref=None, ucd=None,\n utype=None, id=None, config=None, pos=None, **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self._table = table\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n self.ref = ref\n self.ucd = ucd\n self.utype = utype\n self.description = None\n\n self._entries = HomogeneousList(\n (FieldRef, ParamRef, Group, Param))\n\n warn_unknown_attrs('GROUP', extra.keys(), config, pos)\n\n def __repr__(self):\n return '<GROUP>... {} entries ...</GROUP>'.format(len(self._entries))\n\n @property\n def ref(self):\n \"\"\"\n Currently ignored, as it's not clear from the spec how this is\n meant to work.\n \"\"\"\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def entries(self):\n \"\"\"\n [read-only] A list of members of the GROUP_. This list may\n only contain objects of type :class:`Param`, :class:`Group`,\n :class:`ParamRef` and :class:`FieldRef`.\n \"\"\"\n return self._entries\n\n def _add_fieldref(self, iterator, tag, data, config, pos):\n fieldref = FieldRef(self._table, config=config, pos=pos, **data)\n self.entries.append(fieldref)\n\n def _add_paramref(self, iterator, tag, data, config, pos):\n paramref = ParamRef(self._table, config=config, pos=pos, **data)\n self.entries.append(paramref)\n\n def _add_param(self, iterator, tag, data, config, pos):\n if isinstance(self._table, VOTableFile):\n votable = self._table\n else:\n votable = self._table._votable\n param = Param(votable, config=config, pos=pos, **data)\n self.entries.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self._table, config=config, pos=pos, **data)\n self.entries.append(group)\n group.parse(iterator, config)\n\n def parse(self, iterator, config):\n tag_mapping = {\n 'FIELDref': self._add_fieldref,\n 'PARAMref': self._add_paramref,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'GROUP', config, pos)\n self.description = data or None\n elif tag == 'GROUP':\n break\n return self\n\n def to_xml(self, w, **kwargs):\n with w.tag(\n 'GROUP',\n attrib=w.object_attrs(\n self, ['ID', 'name', 'ref', 'ucd', 'utype'])):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for entry in self.entries:\n entry.to_xml(w, **kwargs)\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all :class:`Param` elements in this\n :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Param):\n yield entry\n elif isinstance(entry, Group):\n for field in entry.iter_fields_and_params():\n yield field\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all sub-:class:`Group` instances in\n this :class:`Group`.\n \"\"\"\n for entry in self.entries:\n if isinstance(entry, Group):\n yield entry\n for group in entry.iter_groups():\n yield group\n\n\nclass Table(Element, _IDProperty, _NameProperty, _UcdProperty,\n _DescriptionProperty):\n \"\"\"\n TABLE_ element: optionally contains data.\n\n It contains the following publicly-accessible and mutable\n attribute:\n\n *array*: A Numpy masked array of the data itself, where each\n row is a row of votable data, and columns are named and typed\n based on the <FIELD> elements of the table. The mask is\n parallel to the data array, except for variable-length fields.\n For those fields, the numpy array's column type is \"object\"\n (``\"O\"``), and another masked array is stored there.\n\n If the Table contains no data, (for example, its enclosing\n :class:`Resource` has :attr:`~Resource.type` == 'meta') *array*\n will have zero-length.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, votable, ID=None, name=None, ref=None, ucd=None,\n utype=None, nrows=None, id=None, config=None, pos=None,\n **extra):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n self._empty = False\n\n Element.__init__(self)\n self._votable = votable\n\n self.ID = (resolve_id(ID, id, config, pos)\n or xmlutil.fix_id(name, config, pos))\n self.name = name\n xmlutil.check_id(ref, 'ref', config, pos)\n self._ref = ref\n self.ucd = ucd\n self.utype = utype\n if nrows is not None:\n nrows = int(nrows)\n if nrows < 0:\n raise ValueError(\"'nrows' cannot be negative.\")\n self._nrows = nrows\n self.description = None\n self.format = 'tabledata'\n\n self._fields = HomogeneousList(Field)\n self._params = HomogeneousList(Param)\n self._groups = HomogeneousList(Group)\n self._links = HomogeneousList(Link)\n self._infos = HomogeneousList(Info)\n\n self.array = ma.array([])\n\n warn_unknown_attrs('TABLE', extra.keys(), config, pos)\n\n def __repr__(self):\n return repr(self.to_table())\n\n def __bytes__(self):\n return bytes(self.to_table())\n\n def __str__(self):\n return str(self.to_table())\n\n @property\n def ref(self):\n return self._ref\n\n @ref.setter\n def ref(self, ref):\n \"\"\"\n Refer to another TABLE, previously defined, by the *ref* ID_\n for all metadata (FIELD_, PARAM_ etc.) information.\n \"\"\"\n # When the ref changes, we want to verify that it will work\n # by actually going and looking for the referenced table.\n # If found, set a bunch of properties in this table based\n # on the other one.\n xmlutil.check_id(ref, 'ref', self._config, self._pos)\n if ref is not None:\n try:\n table = self._votable.get_table_by_id(ref, before=self)\n except KeyError:\n warn_or_raise(\n W43, W43, ('TABLE', self.ref), self._config, self._pos)\n ref = None\n else:\n self._fields = table.fields\n self._params = table.params\n self._groups = table.groups\n self._links = table.links\n else:\n del self._fields[:]\n del self._params[:]\n del self._groups[:]\n del self._links[:]\n self._ref = ref\n\n @ref.deleter\n def ref(self):\n self._ref = None\n\n @property\n def format(self):\n \"\"\"\n [*required*] The serialization format of the table. Must be\n one of:\n\n 'tabledata' (TABLEDATA_), 'binary' (BINARY_), 'binary2' (BINARY2_)\n 'fits' (FITS_).\n\n Note that the 'fits' format, since it requires an external\n file, can not be written out. Any file read in with 'fits'\n format will be read out, by default, in 'tabledata' format.\n\n See :ref:`votable-serialization`.\n \"\"\"\n return self._format\n\n @format.setter\n def format(self, format):\n format = format.lower()\n if format == 'fits':\n vo_raise(\"fits format can not be written out, only read.\",\n self._config, self._pos, NotImplementedError)\n if format == 'binary2':\n if not self._config['version_1_3_or_later']:\n vo_raise(\n \"binary2 only supported in votable 1.3 or later\",\n self._config, self._pos)\n elif format not in ('tabledata', 'binary'):\n vo_raise(f\"Invalid format '{format}'\",\n self._config, self._pos)\n self._format = format\n\n @property\n def nrows(self):\n \"\"\"\n [*immutable*] The number of rows in the table, as specified in\n the XML file.\n \"\"\"\n return self._nrows\n\n @property\n def fields(self):\n \"\"\"\n A list of :class:`Field` objects describing the types of each\n of the data columns.\n \"\"\"\n return self._fields\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n table. Must contain only :class:`Param` objects.\n \"\"\"\n return self._params\n\n @property\n def groups(self):\n \"\"\"\n A list of :class:`Group` objects describing how the columns\n and parameters are grouped. Currently this information is\n only kept around for round-tripping and informational\n purposes.\n \"\"\"\n return self._groups\n\n @property\n def links(self):\n \"\"\"\n A list of :class:`Link` objects (pointers to other documents\n or servers through a URI) for the table.\n \"\"\"\n return self._links\n\n @property\n def infos(self):\n \"\"\"\n A list of :class:`Info` objects for the table. Allows for\n post-operational diagnostics.\n \"\"\"\n return self._infos\n\n def is_empty(self):\n \"\"\"\n Returns True if this table doesn't contain any real data\n because it was skipped over by the parser (through use of the\n ``table_number`` kwarg).\n \"\"\"\n return self._empty\n\n def create_arrays(self, nrows=0, config=None):\n \"\"\"\n Create a new array to hold the data based on the current set\n of fields, and store them in the *array* and member variable.\n Any data in the existing array will be lost.\n\n *nrows*, if provided, is the number of rows to allocate.\n \"\"\"\n if nrows is None:\n nrows = 0\n\n fields = self.fields\n\n if len(fields) == 0:\n array = np.recarray((nrows,), dtype='O')\n mask = np.zeros((nrows,), dtype='b')\n else:\n # for field in fields: field._setup(config)\n Field.uniqify_names(fields)\n\n dtype = []\n for x in fields:\n if x._unique_name == x.ID:\n id = x.ID\n else:\n id = (x._unique_name, x.ID)\n dtype.append((id, x.converter.format))\n\n array = np.recarray((nrows,), dtype=np.dtype(dtype))\n descr_mask = []\n for d in array.dtype.descr:\n new_type = (d[1][1] == 'O' and 'O') or 'bool'\n if len(d) == 2:\n descr_mask.append((d[0], new_type))\n elif len(d) == 3:\n descr_mask.append((d[0], new_type, d[2]))\n mask = np.zeros((nrows,), dtype=descr_mask)\n\n self.array = ma.array(array, mask=mask)\n\n def _resize_strategy(self, size):\n \"\"\"\n Return a new (larger) size based on size, used for\n reallocating an array when it fills up. This is in its own\n function so the resizing strategy can be easily replaced.\n \"\"\"\n # Once we go beyond 0, make a big step -- after that use a\n # factor of 1.5 to help keep memory usage compact\n if size == 0:\n return 512\n return int(np.ceil(size * RESIZE_AMOUNT))\n\n def _add_field(self, iterator, tag, data, config, pos):\n field = Field(self._votable, config=config, pos=pos, **data)\n self.fields.append(field)\n field.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def parse(self, iterator, config):\n columns = config.get('columns')\n\n # If we've requested to read in only a specific table, skip\n # all others\n table_number = config.get('table_number')\n current_table_number = config.get('_current_table_number')\n skip_table = False\n if current_table_number is not None:\n config['_current_table_number'] += 1\n if (table_number is not None and\n table_number != current_table_number):\n skip_table = True\n self._empty = True\n\n table_id = config.get('table_id')\n if table_id is not None:\n if table_id != self.ID:\n skip_table = True\n self._empty = True\n\n if self.ref is not None:\n # This table doesn't have its own datatype descriptors, it\n # just references those from another table.\n\n # This is to call the property setter to go and get the\n # referenced information\n self.ref = self.ref\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n else:\n if tag == 'TABLE':\n return self\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n else:\n tag_mapping = {\n 'FIELD': self._add_field,\n 'PARAM': self._add_param,\n 'GROUP': self._add_group,\n 'LINK': self._add_link,\n 'INFO': self._add_info,\n 'DESCRIPTION': self._ignore_add}\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'DATA':\n warn_unknown_attrs(\n 'DATA', data.keys(), config, pos)\n break\n\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n else:\n if tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'TABLE':\n # For error checking purposes\n Field.uniqify_names(self.fields)\n # We still need to create arrays, even if the file\n # contains no DATA section\n self.create_arrays(nrows=0, config=config)\n return self\n\n self.create_arrays(nrows=self._nrows, config=config)\n fields = self.fields\n names = [x.ID for x in fields]\n # Deal with a subset of the columns, if requested.\n if not columns:\n colnumbers = list(range(len(fields)))\n else:\n if isinstance(columns, str):\n columns = [columns]\n columns = np.asarray(columns)\n if issubclass(columns.dtype.type, np.integer):\n if np.any(columns < 0) or np.any(columns > len(fields)):\n raise ValueError(\n \"Some specified column numbers out of range\")\n colnumbers = columns\n elif issubclass(columns.dtype.type, np.character):\n try:\n colnumbers = [names.index(x) for x in columns]\n except ValueError:\n raise ValueError(\n f\"Columns '{columns}' not found in fields list\")\n else:\n raise TypeError(\"Invalid columns list\")\n\n if not skip_table:\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'TABLEDATA':\n warn_unknown_attrs(\n 'TABLEDATA', data.keys(), config, pos)\n self.array = self._parse_tabledata(\n iterator, colnumbers, fields, config)\n break\n elif tag == 'BINARY':\n warn_unknown_attrs(\n 'BINARY', data.keys(), config, pos)\n self.array = self._parse_binary(\n 1, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'BINARY2':\n if not config['version_1_3_or_later']:\n warn_or_raise(\n W52, W52, config['version'], config, pos)\n self.array = self._parse_binary(\n 2, iterator, colnumbers, fields, config, pos)\n break\n elif tag == 'FITS':\n warn_unknown_attrs(\n 'FITS', data.keys(), config, pos, ['extnum'])\n try:\n extnum = int(data.get('extnum', 0))\n if extnum < 0:\n raise ValueError(\"'extnum' cannot be negative.\")\n except ValueError:\n vo_raise(E17, (), config, pos)\n self.array = self._parse_fits(\n iterator, extnum, config)\n break\n else:\n warn_or_raise(W37, W37, tag, config, pos)\n break\n\n for start, tag, data, pos in iterator:\n if not start and tag == 'DATA':\n break\n\n for start, tag, data, pos in iterator:\n if start and tag == 'INFO':\n if not config.get('version_1_2_or_later'):\n warn_or_raise(\n W26, W26, ('INFO', 'TABLE', '1.2'), config, pos)\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n elif not start and tag == 'TABLE':\n break\n\n return self\n\n def _parse_tabledata(self, iterator, colnumbers, fields, config):\n # Since we don't know the number of rows up front, we'll\n # reallocate the record array to make room as we go. This\n # prevents the need to scan through the XML twice. The\n # allocation is by factors of 1.5.\n invalid = config.get('invalid', 'exception')\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n parsers = [field.converter.parse for field in fields]\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n colnumbers_bits = [i in colnumbers for i in range(len(fields))]\n row_default = [x.converter.default for x in fields]\n mask_default = [True] * len(fields)\n array_chunk = []\n mask_chunk = []\n chunk_size = config.get('chunk_size', DEFAULT_CHUNK_SIZE)\n for start, tag, data, pos in iterator:\n if tag == 'TR':\n # Now parse one row\n row = row_default[:]\n row_mask = mask_default[:]\n i = 0\n for start, tag, data, pos in iterator:\n if start:\n binary = (data.get('encoding', None) == 'base64')\n warn_unknown_attrs(\n tag, data.keys(), config, pos, ['encoding'])\n else:\n if tag == 'TD':\n if i >= len(fields):\n vo_raise(E20, len(fields), config, pos)\n\n if colnumbers_bits[i]:\n try:\n if binary:\n rawdata = base64.b64decode(\n data.encode('ascii'))\n buf = io.BytesIO(rawdata)\n buf.seek(0)\n try:\n value, mask_value = binparsers[i](\n buf.read)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n else:\n try:\n value, mask_value = parsers[i](\n data, config, pos)\n except Exception as e:\n vo_reraise(\n e, config, pos,\n \"(in row {:d}, col '{}')\".format(\n len(array_chunk),\n fields[i].ID))\n except Exception as e:\n if invalid == 'exception':\n vo_reraise(e, config, pos)\n else:\n row[i] = value\n row_mask[i] = mask_value\n elif tag == 'TR':\n break\n else:\n self._add_unknown_tag(\n iterator, tag, data, config, pos)\n i += 1\n\n if i < len(fields):\n vo_raise(E21, (i, len(fields)), config, pos)\n\n array_chunk.append(tuple(row))\n mask_chunk.append(tuple(row_mask))\n\n if len(array_chunk) == chunk_size:\n while numrows + chunk_size > alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n if alloc_rows != len(array):\n array = _resize(array, alloc_rows)\n array[numrows:numrows + chunk_size] = array_chunk\n array.mask[numrows:numrows + chunk_size] = mask_chunk\n numrows += chunk_size\n array_chunk = []\n mask_chunk = []\n\n elif not start and tag == 'TABLEDATA':\n break\n\n # Now, resize the array to the exact number of rows we need and\n # put the last chunk values in there.\n alloc_rows = numrows + len(array_chunk)\n\n array = _resize(array, alloc_rows)\n array[numrows:] = array_chunk\n if alloc_rows != 0:\n array.mask[numrows:] = mask_chunk\n numrows += len(array_chunk)\n\n if (self.nrows is not None and\n self.nrows >= 0 and\n self.nrows != numrows):\n warn_or_raise(W18, W18, (self.nrows, numrows), config, pos)\n self._nrows = numrows\n\n return array\n\n def _get_binary_data_stream(self, iterator, config):\n have_local_stream = False\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n if 'href' not in data:\n have_local_stream = True\n if data.get('encoding', None) != 'base64':\n warn_or_raise(\n W38, W38, data.get('encoding', None),\n config, pos)\n else:\n href = data['href']\n xmlutil.check_anyuri(href, config, pos)\n encoding = data.get('encoding', None)\n else:\n buffer = data\n break\n\n if have_local_stream:\n buffer = base64.b64decode(buffer.encode('ascii'))\n string_io = io.BytesIO(buffer)\n string_io.seek(0)\n read = string_io.read\n else:\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \" +\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'rb', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n read = fd.read\n\n def careful_read(length):\n result = read(length)\n if len(result) != length:\n raise EOFError\n return result\n\n return careful_read\n\n def _parse_binary(self, mode, iterator, colnumbers, fields, config, pos):\n fields = self.fields\n\n careful_read = self._get_binary_data_stream(iterator, config)\n\n # Need to have only one reference so that we can resize the\n # array\n array = self.array\n del self.array\n\n binparsers = [field.converter.binparse for field in fields]\n\n numrows = 0\n alloc_rows = len(array)\n while True:\n # Resize result arrays if necessary\n if numrows >= alloc_rows:\n alloc_rows = self._resize_strategy(alloc_rows)\n array = _resize(array, alloc_rows)\n\n row_data = []\n row_mask_data = []\n\n try:\n if mode == 2:\n mask_bits = careful_read(int((len(fields) + 7) / 8))\n row_mask_data = list(converters.bitarray_to_bool(\n mask_bits, len(fields)))\n\n # Ignore the mask for string columns (see issue 8995)\n for i, f in enumerate(fields):\n if row_mask_data[i] and (f.datatype == 'char' or f.datatype == 'unicodeChar'):\n row_mask_data[i] = False\n\n for i, binparse in enumerate(binparsers):\n try:\n value, value_mask = binparse(careful_read)\n except EOFError:\n raise\n except Exception as e:\n vo_reraise(\n e, config, pos, \"(in row {:d}, col '{}')\".format(\n numrows, fields[i].ID))\n row_data.append(value)\n if mode == 1:\n row_mask_data.append(value_mask)\n else:\n row_mask_data[i] = row_mask_data[i] or value_mask\n except EOFError:\n break\n\n row = [x.converter.default for x in fields]\n row_mask = [False] * len(fields)\n for i in colnumbers:\n row[i] = row_data[i]\n row_mask[i] = row_mask_data[i]\n\n array[numrows] = tuple(row)\n array.mask[numrows] = tuple(row_mask)\n numrows += 1\n\n array = _resize(array, numrows)\n\n return array\n\n def _parse_fits(self, iterator, extnum, config):\n for start, tag, data, pos in iterator:\n if tag == 'STREAM':\n if start:\n warn_unknown_attrs(\n 'STREAM', data.keys(), config, pos,\n ['type', 'href', 'actuate', 'encoding', 'expires',\n 'rights'])\n href = data['href']\n encoding = data.get('encoding', None)\n else:\n break\n\n if not href.startswith(('http', 'ftp', 'file')):\n vo_raise(\n \"The vo package only supports remote data through http, \"\n \"ftp or file\",\n self._config, self._pos, NotImplementedError)\n\n fd = urllib.request.urlopen(href)\n if encoding is not None:\n if encoding == 'gzip':\n fd = gzip.GzipFile(href, 'r', fileobj=fd)\n elif encoding == 'base64':\n fd = codecs.EncodedFile(fd, 'base64')\n else:\n vo_raise(\n f\"Unknown encoding type '{encoding}'\",\n self._config, self._pos, NotImplementedError)\n\n hdulist = fits.open(fd)\n\n array = hdulist[int(extnum)].data\n if array.dtype != self.array.dtype:\n warn_or_raise(W19, W19, (), self._config, self._pos)\n\n return array\n\n def to_xml(self, w, **kwargs):\n specified_format = kwargs.get('tabledata_format')\n if specified_format is not None:\n format = specified_format\n else:\n format = self.format\n if format == 'fits':\n format = 'tabledata'\n\n with w.tag(\n 'TABLE',\n attrib=w.object_attrs(\n self,\n ('ID', 'name', 'ref', 'ucd', 'utype', 'nrows'))):\n\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n\n for element_set in (self.fields, self.params):\n for element in element_set:\n element._setup({}, None)\n\n if self.ref is None:\n for element_set in (self.fields, self.params, self.groups,\n self.links):\n for element in element_set:\n element.to_xml(w, **kwargs)\n elif kwargs['version_1_2_or_later']:\n index = list(self._votable.iter_tables()).index(self)\n group = Group(self, ID=f\"_g{index}\")\n group.to_xml(w, **kwargs)\n\n if len(self.array):\n with w.tag('DATA'):\n if format == 'tabledata':\n self._write_tabledata(w, **kwargs)\n elif format == 'binary':\n self._write_binary(1, w, **kwargs)\n elif format == 'binary2':\n self._write_binary(2, w, **kwargs)\n\n if kwargs['version_1_2_or_later']:\n for element in self._infos:\n element.to_xml(w, **kwargs)\n\n def _write_tabledata(self, w, **kwargs):\n fields = self.fields\n array = self.array\n\n with w.tag('TABLEDATA'):\n w._flush()\n if (_has_c_tabledata_writer and\n not kwargs.get('_debug_python_based_parser')):\n supports_empty_values = [\n field.converter.supports_empty_values(kwargs)\n for field in fields]\n fields = [field.converter.output for field in fields]\n indent = len(w._tags) - 1\n tablewriter.write_tabledata(\n w.write, array.data, array.mask, fields,\n supports_empty_values, indent, 1 << 8)\n else:\n write = w.write\n indent_spaces = w.get_indentation_spaces()\n tr_start = indent_spaces + \"<TR>\\n\"\n tr_end = indent_spaces + \"</TR>\\n\"\n td = indent_spaces + \" <TD>{}</TD>\\n\"\n td_empty = indent_spaces + \" <TD/>\\n\"\n fields = [(i, field.converter.output,\n field.converter.supports_empty_values(kwargs))\n for i, field in enumerate(fields)]\n for row in range(len(array)):\n write(tr_start)\n array_row = array.data[row]\n mask_row = array.mask[row]\n for i, output, supports_empty_values in fields:\n data = array_row[i]\n masked = mask_row[i]\n if supports_empty_values and np.all(masked):\n write(td_empty)\n else:\n try:\n val = output(data, masked)\n except Exception as e:\n vo_reraise(\n e,\n additional=\"(in row {:d}, col '{}')\".format(\n row, self.fields[i].ID))\n if len(val):\n write(td.format(val))\n else:\n write(td_empty)\n write(tr_end)\n\n def _write_binary(self, mode, w, **kwargs):\n fields = self.fields\n array = self.array\n if mode == 1:\n tag_name = 'BINARY'\n else:\n tag_name = 'BINARY2'\n\n with w.tag(tag_name):\n with w.tag('STREAM', encoding='base64'):\n fields_basic = [(i, field.converter.binoutput)\n for (i, field) in enumerate(fields)]\n\n data = io.BytesIO()\n for row in range(len(array)):\n array_row = array.data[row]\n array_mask = array.mask[row]\n\n if mode == 2:\n flattened = np.array([np.all(x) for x in array_mask])\n data.write(converters.bool_to_bitarray(flattened))\n\n for i, converter in fields_basic:\n try:\n chunk = converter(array_row[i], array_mask[i])\n assert type(chunk) == bytes\n except Exception as e:\n vo_reraise(\n e, additional=\"(in row {:d}, col '{}')\".format(\n row, fields[i].ID))\n data.write(chunk)\n\n w._flush()\n w.write(base64.b64encode(data.getvalue()).decode('ascii'))\n\n def to_table(self, use_names_over_ids=False):\n \"\"\"\n Convert this VO Table to an `astropy.table.Table` instance.\n\n Parameters\n ----------\n use_names_over_ids : bool, optional\n When `True` use the ``name`` attributes of columns as the\n names of columns in the `astropy.table.Table` instance.\n Since names are not guaranteed to be unique, this may cause\n some columns to be renamed by appending numbers to the end.\n Otherwise (default), use the ID attributes as the column\n names.\n\n .. warning::\n Variable-length array fields may not be restored\n identically when round-tripping through the\n `astropy.table.Table` instance.\n \"\"\"\n from astropy.table import Table\n\n meta = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype', 'description']:\n val = getattr(self, key, None)\n if val is not None:\n meta[key] = val\n\n if use_names_over_ids:\n names = [field.name for field in self.fields]\n unique_names = []\n for i, name in enumerate(names):\n new_name = name\n i = 2\n while new_name in unique_names:\n new_name = f'{name}{i}'\n i += 1\n unique_names.append(new_name)\n names = unique_names\n else:\n names = [field.ID for field in self.fields]\n\n table = Table(self.array, names=names, meta=meta)\n\n for name, field in zip(names, self.fields):\n column = table[name]\n field.to_table_column(column)\n\n return table\n\n @classmethod\n def from_table(cls, votable, table):\n \"\"\"\n Create a `Table` instance from a given `astropy.table.Table`\n instance.\n \"\"\"\n kwargs = {}\n for key in ['ID', 'name', 'ref', 'ucd', 'utype']:\n val = table.meta.get(key)\n if val is not None:\n kwargs[key] = val\n new_table = cls(votable, **kwargs)\n if 'description' in table.meta:\n new_table.description = table.meta['description']\n\n for colname in table.colnames:\n column = table[colname]\n new_table.fields.append(Field.from_table_column(votable, column))\n\n if table.mask is None:\n new_table.array = ma.array(np.asarray(table))\n else:\n new_table.array = ma.array(np.asarray(table),\n mask=np.asarray(table.mask))\n\n return new_table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD and PARAM elements in the\n TABLE.\n \"\"\"\n for param in self.params:\n yield param\n for field in self.fields:\n yield field\n for group in self.groups:\n for field in group.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given ID or name.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD or PARAM',\n \"\"\"\n Looks up a FIELD or PARAM element by the given utype and\n returns an iterator emitting all matches.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP elements in the TABLE.\n \"\"\"\n for group in self.groups:\n yield group\n for g in group.iter_groups():\n yield g\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_info(self):\n for info in self.infos:\n yield info\n\n\nclass Resource(Element, _IDProperty, _NameProperty, _UtypeProperty,\n _DescriptionProperty):\n \"\"\"\n RESOURCE_ element: Groups TABLE_ and RESOURCE_ elements.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n \"\"\"\n\n def __init__(self, name=None, ID=None, utype=None, type='results',\n id=None, config=None, pos=None, **kwargs):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.name = name\n self.ID = resolve_id(ID, id, config, pos)\n self.utype = utype\n self.type = type\n self._extra_attributes = kwargs\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._groups = HomogeneousList(Group)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._links = HomogeneousList(Link)\n self._tables = HomogeneousList(Table)\n self._resources = HomogeneousList(Resource)\n\n warn_unknown_attrs('RESOURCE', kwargs.keys(), config, pos)\n\n def __repr__(self):\n buff = io.StringIO()\n w = XMLWriter(buff)\n w.element(\n self._element_name,\n attrib=w.object_attrs(self, self._attr_list))\n return buff.getvalue().strip()\n\n @property\n def type(self):\n \"\"\"\n [*required*] The type of the resource. Must be either:\n\n - 'results': This resource contains actual result values\n (default)\n\n - 'meta': This resource contains only datatype descriptions\n (FIELD_ elements), but no actual data.\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n if type not in ('results', 'meta'):\n vo_raise(E18, type, self._config, self._pos)\n self._type = type\n\n @property\n def extra_attributes(self):\n \"\"\"\n A dictionary of string keys to string values containing any\n extra attributes of the RESOURCE_ element that are not defined\n in the specification. (The specification explicitly allows\n for extra attributes here, but nowhere else.)\n \"\"\"\n return self._extra_attributes\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system definitions (COOSYS_ elements) for\n the RESOURCE_. Must contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n resource. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def groups(self):\n \"\"\"\n A list of groups\n \"\"\"\n return self._groups\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) for the\n resource. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def links(self):\n \"\"\"\n A list of links (pointers to other documents or servers\n through a URI) for the resource. Must contain only `Link`\n objects.\n \"\"\"\n return self._links\n\n @property\n def tables(self):\n \"\"\"\n A list of tables in the resource. Must contain only\n `Table` objects.\n \"\"\"\n return self._tables\n\n @property\n def resources(self):\n \"\"\"\n A list of nested resources inside this resource. Must contain\n only `Resource` objects.\n \"\"\"\n return self._resources\n\n def _add_table(self, iterator, tag, data, config, pos):\n table = Table(self._votable, config=config, pos=pos, **data)\n self.tables.append(table)\n table.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self._votable, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self._votable, iterator, config)\n\n def _add_link(self, iterator, tag, data, config, pos):\n link = Link(config=config, pos=pos, **data)\n self.links.append(link)\n link.parse(iterator, config)\n\n def parse(self, votable, iterator, config):\n self._votable = votable\n\n tag_mapping = {\n 'TABLE': self._add_table,\n 'INFO': self._add_info,\n 'PARAM': self._add_param,\n 'GROUP' : self._add_group,\n 'COOSYS': self._add_coosys,\n 'RESOURCE': self._add_resource,\n 'LINK': self._add_link,\n 'DESCRIPTION': self._ignore_add\n }\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'RESOURCE', config, pos)\n self.description = data or None\n elif tag == 'RESOURCE':\n break\n\n del self._votable\n\n return self\n\n def to_xml(self, w, **kwargs):\n attrs = w.object_attrs(self, ('ID', 'type', 'utype'))\n attrs.update(self.extra_attributes)\n with w.tag('RESOURCE', attrib=attrs):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n for element_set in (self.coordinate_systems, self.params,\n self.infos, self.links, self.tables,\n self.resources):\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Recursively iterates over all tables in the resource and\n nested resources.\n \"\"\"\n for table in self.tables:\n yield table\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterates over all FIELD_ and PARAM_ elements in\n the resource, its tables and nested resources.\n \"\"\"\n for param in self.params:\n yield param\n for table in self.tables:\n for param in table.iter_fields_and_params():\n yield param\n for resource in self.resources:\n for param in resource.iter_fields_and_params():\n yield param\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterates over all the COOSYS_ elements in the\n resource and nested resources.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n def iter_info(self):\n \"\"\"\n Recursively iterates over all the INFO_ elements in the\n resource and nested resources.\n \"\"\"\n for info in self.infos:\n yield info\n for table in self.tables:\n for info in table.iter_info():\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n\nclass VOTableFile(Element, _IDProperty, _DescriptionProperty):\n \"\"\"\n VOTABLE_ element: represents an entire file.\n\n The keyword arguments correspond to setting members of the same\n name, documented below.\n\n *version* is settable at construction time only, since conformance\n tests for building the rest of the structure depend on it.\n \"\"\"\n\n def __init__(self, ID=None, id=None, config=None, pos=None, version=\"1.3\"):\n if config is None:\n config = {}\n self._config = config\n self._pos = pos\n\n Element.__init__(self)\n self.ID = resolve_id(ID, id, config, pos)\n self.description = None\n\n self._coordinate_systems = HomogeneousList(CooSys)\n self._params = HomogeneousList(Param)\n self._infos = HomogeneousList(Info)\n self._resources = HomogeneousList(Resource)\n self._groups = HomogeneousList(Group)\n\n version = str(version)\n if version not in (\"1.0\", \"1.1\", \"1.2\", \"1.3\"):\n raise ValueError(\"'version' should be one of '1.0', '1.1', \"\n \"'1.2', or '1.3'\")\n\n self._version = version\n\n def __repr__(self):\n n_tables = len(list(self.iter_tables()))\n return f'<VOTABLE>... {n_tables} tables ...</VOTABLE>'\n\n @property\n def version(self):\n \"\"\"\n The version of the VOTable specification that the file uses.\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n version = str(version)\n if version not in ('1.1', '1.2', '1.3'):\n raise ValueError(\n \"astropy.io.votable only supports VOTable versions \"\n \"1.1, 1.2 and 1.3\")\n self._version = version\n\n @property\n def coordinate_systems(self):\n \"\"\"\n A list of coordinate system descriptions for the file. Must\n contain only `CooSys` objects.\n \"\"\"\n return self._coordinate_systems\n\n @property\n def params(self):\n \"\"\"\n A list of parameters (constant-valued columns) that apply to\n the entire file. Must contain only `Param` objects.\n \"\"\"\n return self._params\n\n @property\n def infos(self):\n \"\"\"\n A list of informational parameters (key-value pairs) for the\n entire file. Must only contain `Info` objects.\n \"\"\"\n return self._infos\n\n @property\n def resources(self):\n \"\"\"\n A list of resources, in the order they appear in the file.\n Must only contain `Resource` objects.\n \"\"\"\n return self._resources\n\n @property\n def groups(self):\n \"\"\"\n A list of groups, in the order they appear in the file. Only\n supported as a child of the VOTABLE element in VOTable 1.2 or\n later.\n \"\"\"\n return self._groups\n\n def _add_param(self, iterator, tag, data, config, pos):\n param = Param(self, config=config, pos=pos, **data)\n self.params.append(param)\n param.parse(iterator, config)\n\n def _add_resource(self, iterator, tag, data, config, pos):\n resource = Resource(config=config, pos=pos, **data)\n self.resources.append(resource)\n resource.parse(self, iterator, config)\n\n def _add_coosys(self, iterator, tag, data, config, pos):\n coosys = CooSys(config=config, pos=pos, **data)\n self.coordinate_systems.append(coosys)\n coosys.parse(iterator, config)\n\n def _add_info(self, iterator, tag, data, config, pos):\n info = Info(config=config, pos=pos, **data)\n self.infos.append(info)\n info.parse(iterator, config)\n\n def _add_group(self, iterator, tag, data, config, pos):\n if not config.get('version_1_2_or_later'):\n warn_or_raise(W26, W26, ('GROUP', 'VOTABLE', '1.2'), config, pos)\n group = Group(self, config=config, pos=pos, **data)\n self.groups.append(group)\n group.parse(iterator, config)\n\n def parse(self, iterator, config):\n config['_current_table_number'] = 0\n\n for start, tag, data, pos in iterator:\n if start:\n if tag == 'xml':\n pass\n elif tag == 'VOTABLE':\n if 'version' not in data:\n warn_or_raise(W20, W20, self.version, config, pos)\n config['version'] = self.version\n else:\n config['version'] = self._version = data['version']\n if config['version'].lower().startswith('v'):\n warn_or_raise(\n W29, W29, config['version'], config, pos)\n self._version = config['version'] = \\\n config['version'][1:]\n if config['version'] not in ('1.1', '1.2', '1.3'):\n vo_warn(W21, config['version'], config, pos)\n\n if 'xmlns' in data:\n correct_ns = ('http://www.ivoa.net/xml/VOTable/v{}'.format(\n config['version']))\n if data['xmlns'] != correct_ns:\n vo_warn(\n W41, (correct_ns, data['xmlns']), config, pos)\n else:\n vo_warn(W42, (), config, pos)\n\n break\n else:\n vo_raise(E19, (), config, pos)\n config['version_1_1_or_later'] = \\\n util.version_compare(config['version'], '1.1') >= 0\n config['version_1_2_or_later'] = \\\n util.version_compare(config['version'], '1.2') >= 0\n config['version_1_3_or_later'] = \\\n util.version_compare(config['version'], '1.3') >= 0\n\n tag_mapping = {\n 'PARAM': self._add_param,\n 'RESOURCE': self._add_resource,\n 'COOSYS': self._add_coosys,\n 'INFO': self._add_info,\n 'DEFINITIONS': self._add_definitions,\n 'DESCRIPTION': self._ignore_add,\n 'GROUP': self._add_group}\n\n for start, tag, data, pos in iterator:\n if start:\n tag_mapping.get(tag, self._add_unknown_tag)(\n iterator, tag, data, config, pos)\n elif tag == 'DESCRIPTION':\n if self.description is not None:\n warn_or_raise(W17, W17, 'VOTABLE', config, pos)\n self.description = data or None\n\n if not len(self.resources) and config['version_1_2_or_later']:\n warn_or_raise(W53, W53, (), config, pos)\n\n return self\n\n def to_xml(self, fd, compressed=False, tabledata_format=None,\n _debug_python_based_parser=False, _astropy_version=None):\n \"\"\"\n Write to an XML file.\n\n Parameters\n ----------\n fd : str path or writable file-like object\n Where to write the file.\n\n compressed : bool, optional\n When `True`, write to a gzip-compressed file. (Default:\n `False`)\n\n tabledata_format : str, optional\n Override the format of the table(s) data to write. Must\n be one of ``tabledata`` (text representation), ``binary`` or\n ``binary2``. By default, use the format that was specified\n in each `Table` object as it was created or read in. See\n :ref:`votable-serialization`.\n \"\"\"\n if tabledata_format is not None:\n if tabledata_format.lower() not in (\n 'tabledata', 'binary', 'binary2'):\n raise ValueError(f\"Unknown format type '{format}'\")\n\n kwargs = {\n 'version': self.version,\n 'version_1_1_or_later':\n util.version_compare(self.version, '1.1') >= 0,\n 'version_1_2_or_later':\n util.version_compare(self.version, '1.2') >= 0,\n 'version_1_3_or_later':\n util.version_compare(self.version, '1.3') >= 0,\n 'tabledata_format':\n tabledata_format,\n '_debug_python_based_parser': _debug_python_based_parser,\n '_group_number': 1}\n\n with util.convert_to_writable_filelike(\n fd, compressed=compressed) as fd:\n w = XMLWriter(fd)\n version = self.version\n if _astropy_version is None:\n lib_version = astropy_version\n else:\n lib_version = _astropy_version\n\n xml_header = \"\"\"\n<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<!-- Produced with astropy.io.votable version {lib_version}\n http://www.astropy.org/ -->\\n\"\"\"\n w.write(xml_header.lstrip().format(**locals()))\n\n with w.tag('VOTABLE',\n {'version': version,\n 'xmlns:xsi':\n \"http://www.w3.org/2001/XMLSchema-instance\",\n 'xsi:noNamespaceSchemaLocation':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\",\n 'xmlns':\n f\"http://www.ivoa.net/xml/VOTable/v{version}\"}):\n if self.description is not None:\n w.element(\"DESCRIPTION\", self.description, wrap=True)\n element_sets = [self.coordinate_systems, self.params,\n self.infos, self.resources]\n if kwargs['version_1_2_or_later']:\n element_sets[0] = self.groups\n for element_set in element_sets:\n for element in element_set:\n element.to_xml(w, **kwargs)\n\n def iter_tables(self):\n \"\"\"\n Iterates over all tables in the VOTable file in a \"flat\" way,\n ignoring the nesting of resources etc.\n \"\"\"\n for resource in self.resources:\n for table in resource.iter_tables():\n yield table\n\n def get_first_table(self):\n \"\"\"\n Often, you know there is only one table in the file, and\n that's all you need. This method returns that first table.\n \"\"\"\n for table in self.iter_tables():\n if not table.is_empty():\n return table\n raise IndexError(\"No table found in VOTABLE file.\")\n\n get_table_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given ID. Used by the table\n \"ref\" attribute.\n \"\"\")\n\n get_tables_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_tables', 'TABLE',\n \"\"\"\n Looks up a TABLE_ element by the given utype, and returns an\n iterator emitting all matches.\n \"\"\")\n\n def get_table_by_index(self, idx):\n \"\"\"\n Get a table by its ordinal position in the file.\n \"\"\"\n for i, table in enumerate(self.iter_tables()):\n if i == idx:\n return table\n raise IndexError(\n f\"No table at index {idx:d} found in VOTABLE file.\")\n\n def iter_fields_and_params(self):\n \"\"\"\n Recursively iterate over all FIELD_ and PARAM_ elements in the\n VOTABLE_ file.\n \"\"\"\n for resource in self.resources:\n for field in resource.iter_fields_and_params():\n yield field\n\n get_field_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_. Used by the field's\n \"ref\" attribute.\n \"\"\")\n\n get_fields_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n get_field_by_id_or_name = _lookup_by_id_or_name_factory(\n 'iter_fields_and_params', 'FIELD',\n \"\"\"\n Looks up a FIELD_ element by the given ID_ or name.\n \"\"\")\n\n def iter_values(self):\n \"\"\"\n Recursively iterate over all VALUES_ elements in the VOTABLE_\n file.\n \"\"\"\n for field in self.iter_fields_and_params():\n yield field.values\n\n get_values_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_values', 'VALUES',\n \"\"\"\n Looks up a VALUES_ element by the given ID. Used by the values\n \"ref\" attribute.\n \"\"\")\n\n def iter_groups(self):\n \"\"\"\n Recursively iterate over all GROUP_ elements in the VOTABLE_\n file.\n \"\"\"\n for table in self.iter_tables():\n for group in table.iter_groups():\n yield group\n\n get_group_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given ID. Used by the group's\n \"ref\" attribute\n \"\"\")\n\n get_groups_by_utype = _lookup_by_attr_factory(\n 'utype', False, 'iter_groups', 'GROUP',\n \"\"\"\n Looks up a GROUP_ element by the given utype and returns an\n iterator emitting all matches.\n \"\"\")\n\n def iter_coosys(self):\n \"\"\"\n Recursively iterate over all COOSYS_ elements in the VOTABLE_\n file.\n \"\"\"\n for coosys in self.coordinate_systems:\n yield coosys\n for resource in self.resources:\n for coosys in resource.iter_coosys():\n yield coosys\n\n get_coosys_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_coosys', 'COOSYS',\n \"\"\"Looks up a COOSYS_ element by the given ID.\"\"\")\n\n def iter_info(self):\n \"\"\"\n Recursively iterate over all INFO_ elements in the VOTABLE_\n file.\n \"\"\"\n for info in self.infos:\n yield info\n for resource in self.resources:\n for info in resource.iter_info():\n yield info\n\n get_info_by_id = _lookup_by_attr_factory(\n 'ID', True, 'iter_info', 'INFO',\n \"\"\"Looks up a INFO element by the given ID.\"\"\")\n\n def set_all_tables_format(self, format):\n \"\"\"\n Set the output storage format of all tables in the file.\n \"\"\"\n for table in self.iter_tables():\n table.format = format\n\n @classmethod\n def from_table(cls, table, table_id=None):\n \"\"\"\n Create a `VOTableFile` instance from a given\n `astropy.table.Table` instance.\n\n Parameters\n ----------\n table_id : str, optional\n Set the given ID attribute on the returned Table instance.\n \"\"\"\n votable_file = cls()\n resource = Resource()\n votable = Table.from_table(votable_file, table)\n if table_id is not None:\n votable.ID = table_id\n resource.tables.append(votable)\n votable_file.resources.append(resource)\n return votable_file\n" ]
[ [ "numpy.ceil", "numpy.zeros", "numpy.dtype", "numpy.any", "numpy.asarray", "numpy.ma.array", "numpy.recarray", "numpy.all", "numpy.ma.zeros" ] ]
hfyer/NAIC2020_ReID_R1
[ "240f0c9f65e482e6b0090f01d9f9e3373a337033" ]
[ "fast-reid-master/fastreid/data/samplers/triplet_sampler.py" ]
[ "# encoding: utf-8\n\"\"\"\n@author: liaoxingyu\n@contact: [email protected]\n\"\"\"\n\nimport copy\nimport itertools\nfrom collections import defaultdict\nfrom typing import Optional\n\nimport numpy as np\nfrom torch.utils.data.sampler import Sampler\n\nfrom fastreid.utils import comm\n\n\ndef no_index(a, b):\n assert isinstance(a, list)\n return [i for i, j in enumerate(a) if j != b]\n\n\nclass BalancedIdentitySampler(Sampler):\n def __init__(self, data_source: str, batch_size: int, num_instances: int, seed: Optional[int] = None):\n self.data_source = data_source\n self.batch_size = batch_size\n self.num_instances = num_instances\n self.num_pids_per_batch = batch_size // self.num_instances\n\n self.index_pid = defaultdict(list)\n self.pid_cam = defaultdict(list)\n self.pid_index = defaultdict(list)\n\n for index, info in enumerate(data_source):\n pid = info[1]\n camid = info[2]\n self.index_pid[index] = pid\n self.pid_cam[pid].append(camid)\n self.pid_index[pid].append(index)\n\n self.pids = sorted(list(self.pid_index.keys()))\n self.num_identities = len(self.pids)\n\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n np.random.seed(self._seed)\n while True:\n # Shuffle identity list\n identities = np.random.permutation(self.num_identities)\n\n # If remaining identities cannot be enough for a batch,\n # just drop the remaining parts\n drop_indices = self.num_identities % self.num_pids_per_batch\n if drop_indices: identities = identities[:-drop_indices]\n\n ret = []\n for kid in identities:\n i = np.random.choice(self.pid_index[self.pids[kid]])\n _, i_pid, i_cam = self.data_source[i]\n ret.append(i)\n pid_i = self.index_pid[i]\n cams = self.pid_cam[pid_i]\n index = self.pid_index[pid_i]\n select_cams = no_index(cams, i_cam)\n\n if select_cams:\n if len(select_cams) >= self.num_instances:\n cam_indexes = np.random.choice(select_cams, size=self.num_instances - 1, replace=False)\n else:\n cam_indexes = np.random.choice(select_cams, size=self.num_instances - 1, replace=True)\n for kk in cam_indexes:\n ret.append(index[kk])\n else:\n select_indexes = no_index(index, i)\n if not select_indexes:\n # Only one image for this identity\n ind_indexes = [0] * (self.num_instances - 1)\n elif len(select_indexes) >= self.num_instances:\n ind_indexes = np.random.choice(select_indexes, size=self.num_instances - 1, replace=False)\n else:\n ind_indexes = np.random.choice(select_indexes, size=self.num_instances - 1, replace=True)\n\n for kk in ind_indexes:\n ret.append(index[kk])\n\n if len(ret) == self.batch_size:\n yield from ret\n ret = []\n\n\nclass NaiveIdentitySampler(Sampler):\n \"\"\"\n Randomly sample N identities, then for each identity,\n randomly sample K instances, therefore batch size is N*K.\n Args:\n - data_source (list): list of (img_path, pid, camid).\n - num_instances (int): number of instances per identity in a batch.\n - batch_size (int): number of examples in a batch.\n \"\"\"\n\n def __init__(self, data_source: str, batch_size: int, num_instances: int, seed: Optional[int] = None):\n self.data_source = data_source\n self.batch_size = batch_size\n self.num_instances = num_instances\n self.num_pids_per_batch = batch_size // self.num_instances\n\n self.index_pid = defaultdict(list)\n self.pid_cam = defaultdict(list)\n self.pid_index = defaultdict(list)\n\n for index, info in enumerate(data_source):\n pid = info[1]\n camid = info[2]\n self.index_pid[index] = pid\n self.pid_cam[pid].append(camid)\n self.pid_index[pid].append(index)\n # print(self.pid_index.keys())\n self.pids = sorted(list(self.pid_index.keys()))\n self.num_identities = len(self.pids)\n\n if seed is None:\n seed = comm.shared_random_seed()\n self._seed = int(seed)\n\n self._rank = comm.get_rank()\n self._world_size = comm.get_world_size()\n\n def __iter__(self):\n start = self._rank\n yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)\n\n def _infinite_indices(self):\n np.random.seed(self._seed)\n while True:\n avai_pids = copy.deepcopy(self.pids)\n batch_idxs_dict = {}\n\n batch_indices = []\n while len(avai_pids) >= self.num_pids_per_batch:\n selected_pids = np.random.choice(avai_pids, self.num_pids_per_batch, replace=False).tolist()\n for pid in selected_pids:\n # Register pid in batch_idxs_dict if not\n if pid not in batch_idxs_dict:\n idxs = copy.deepcopy(self.pid_index[pid])\n if len(idxs) < self.num_instances:\n idxs = np.random.choice(idxs, size=self.num_instances, replace=True).tolist()\n np.random.shuffle(idxs)\n batch_idxs_dict[pid] = idxs\n\n avai_idxs = batch_idxs_dict[pid]\n for _ in range(self.num_instances):\n batch_indices.append(avai_idxs.pop(0))\n\n if len(avai_idxs) < self.num_instances: avai_pids.remove(pid)\n\n assert len(batch_indices) == self.batch_size, f\"batch indices have wrong \" \\\n f\"length with {len(batch_indices)}!\"\n yield from batch_indices\n batch_indices = []\n" ]
[ [ "numpy.random.permutation", "numpy.random.shuffle", "numpy.random.seed", "numpy.random.choice" ] ]
Abdumajidhu/Image-Enhancement-therough-Image-Processing-Techniques
[ "126690319297a5ed7df99ff47797980cc525ecf3" ]
[ "lion.py" ]
[ "import numpy as np\r\nimport cv2\r\n\r\nimg = cv2.imread('exercise_images/lion.jpg')\r\n\r\nimg_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)\r\n\r\n# equalize the histogram of the Y channel\r\nimg_yuv[:,:,1] = cv2.equalizeHist(img_yuv[:,:,0])\r\n\r\n# convert the YUV image back to RGB format\r\nimg_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)\r\nres = np.hstack((img,img_output))\r\n\r\ncv2.imwrite('newlionc.jpg',res)\r\n\r\ncv2.imshow('Color input image', img)\r\ncv2.imshow('Histogram equalized', img_output)\r\n\r\ncv2.waitKey(0)\r\n" ]
[ [ "numpy.hstack" ] ]
Hydroxy-OH/pytorch-yolo2
[ "3d38ae9a762ba22c95856584dc3914cb8ff176df" ]
[ "debug.py" ]
[ "from __future__ import print_function\n\nimport os\n\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.optim as optim\nfrom darknet import Darknet\nfrom torch.autograd import Variable\nfrom utils import convert2cpu, image2torch\n\ncfgfile = \"face4.1re_95.91.cfg\"\nweightfile = \"face4.1re_95.91.conv.15\"\nimgpath = \"data/train/images/10002.png\"\nlabpath = imgpath.replace(\"images\", \"labels\").replace(\n \"JPEGImages\", \"labels\").replace(\".jpg\", \".txt\").replace(\".png\", \".txt\")\nlabel = torch.zeros(50 * 5)\nif os.path.getsize(labpath):\n tmp = torch.from_numpy(np.loadtxt(labpath))\n # tmp = torch.from_numpy(read_truths_args(labpath, 8.0/img.width))\n # tmp = torch.from_numpy(read_truths(labpath))\n tmp = tmp.view(-1)\n tsz = tmp.numel()\n # print(\"labpath = %s , tsz = %d\" % (labpath, tsz))\n if tsz > 50 * 5:\n label = tmp[0:50 * 5]\n elif tsz > 0:\n label[0:tsz] = tmp\nlabel = label.view(1, 50 * 5)\n\nmodel = Darknet(cfgfile)\nregion_loss = model.loss\nmodel.load_weights(weightfile)\n\nprint(\"--- bn weight ---\")\nprint(model.models[0][1].weight)\nprint(\"--- bn bias ---\")\nprint(model.models[0][1].bias)\nprint(\"--- bn running_mean ---\")\nprint(model.models[0][1].running_mean)\nprint(\"--- bn running_var ---\")\nprint(model.models[0][1].running_var)\n\nmodel.train()\nm = model.cuda()\n\noptimizer = optim.SGD(model.parameters(), lr=1e-2,\n momentum=0.9, weight_decay=0.1)\n\nimg = Image.open(imgpath)\nimg = image2torch(img)\nimg = Variable(img.cuda())\n\ntarget = Variable(label)\n\nprint(\"----- img ---------------------\")\nprint(img.data.storage()[0:100])\nprint(\"----- target -----------------\")\nprint(target.data.storage()[0:100])\n\noptimizer.zero_grad()\noutput = m(img)\nprint(\"----- output ------------------\")\nprint(output.data.storage()[0:100])\nexit()\n\nloss = region_loss(output, target)\nprint(\"----- loss --------------------\")\nprint(loss)\n\nsave_grad = None\n\n\ndef extract(grad):\n global saved_grad\n saved_grad = convert2cpu(grad.data)\n\n\noutput.register_hook(extract)\nloss.backward()\n\nsaved_grad = saved_grad.view(-1)\nfor i in range(saved_grad.size(0)):\n if abs(saved_grad[i]) >= 0.001:\n print(\"%d : %f\" % (i, saved_grad[i]))\n\nprint(model.state_dict().keys())\n# print(model.models[0][0].weight.grad.data.storage()[0:100])\n# print(model.models[14][0].weight.data.storage()[0:100])\nweight = model.models[13][0].weight.data\ngrad = model.models[13][0].weight.grad.data\nmask = torch.abs(grad) >= 0.1\nprint(weight[mask])\nprint(grad[mask])\n\noptimizer.step()\nweight2 = model.models[13][0].weight.data\nprint(weight2[mask])\n" ]
[ [ "torch.zeros", "torch.autograd.Variable", "torch.abs", "numpy.loadtxt" ] ]
lhwzds/CS303-project
[ "e1a4e7965b759c2c9583df3b731b19a0b2a198d6" ]
[ "project1/demo4.py" ]
[ "import numpy as np\nimport random\nimport time\nCOLOR_BLACK=-1\nCOLOR_WHITE=1\nCOLOR_NONE=0\nrandom.seed(0)\n\nclass AI(object):\n\n def __init__(self, chessboard_size, color, time_out):\n self.chessboard_size = chessboard_size\n self.color = color\n self.time_out = time_out\n self.candidate_list = []\n self.max_depth1=3\n self.max_depth2=4\n self.max_depth3=5\n self.steps=0\n # self.wmatrix=np.array([[500,-25,10,5,5,10,-25,500],\n # [-25,-45,1,1,1,1,-45,-25],\n # [10,1,3,2,2,3,1,10],\n # [5,1,2,1,1,2,1,5],\n # [5,1,2,1,1,2,1,5],\n # [10,1,3,2,2,3,1,10],\n # [-25,-45,1,1,1,1,-45,-25],\n # [500,-25,10,5,5,10,-25,500]])\n # self.wmatrix=np.array(\n # [[-500,25,-10,-5,-5,-10,25,-500],\n # [25,45,-1,-1,-1,-1,45,25],\n # [-10,-1,-3,-2,-2,-3,-1,-10],\n # [-5,-1,-2,-1,-1,-2,-1,-5],\n # [-5,-1,-2,-1,-1,-2,-1,-5],\n # [-10,-1,-3,-2,-2,-3,-1,-10],\n # [25,45,-1,-1,-1,-1,45,25],\n # [-500,25,-10,-5,-5,-10,25,-500]])\n self.wmatrix= np.array([\n [-990, 200, -300, -200, -200, -300, 200, -990],\n [200, 400, -4, -2, -2, -4, 400, 200],\n [-300, -4, -5, -1, -1, -5, -4, -300],\n [-200, -2, -1, 0, 0, -1, -2, -200],\n [-200, -2, -1, 0, 0, -1, -2, -200],\n [-300, -4, -5, -1, -1, -5, -4, -300],\n [200, 400, -4, -2, -2, -4, 400, 200],\n [-990, 200, -300, -200, -200, -300, 200, -990],\n ])\n\n def evaluate1(self,chessboard):\n return sum(sum(self.wmatrix*chessboard))*self.color\n\n def evaluate(self,chessboard):\n idx = np.where(chessboard == COLOR_NONE)\n idx = list(zip(idx[0], idx[1]))\n mobility=0\n \n for p in idx:\n result=self.test(chessboard,p,-1,self.color)\n if self.color in result:\n mobility+=1\n\n potentialmobility=0\n \n opidx=np.where(chessboard == -self.color)\n opidx=list(zip(opidx[0],opidx[1]))\n\n for p in idx:\n x=p[0]\n y=p[1]\n minx=max(p[0]-1,0)\n maxx=min(p[0]+1,self.chessboard_size-1)\n miny=max(p[1]-1,0)\n maxy=min(p[1]+1,self.chessboard_size-1)\n\n p0=(minx,miny)\n p1=(minx,y)\n p2=(minx,maxy)\n p3=(x,miny)\n p4=(x,maxy)\n p5=(maxx,miny)\n p6=(maxx,y)\n p7=(maxx,maxy)\n if p0 in opidx or p1 in opidx or p2 in opidx or p3 in opidx or p4 in opidx or p5 in opidx or p6 in opidx or p7 in opidx:\n potentialmobility+=1 \n\n value=mobility-potentialmobility\n\n return value \n\n # def minimax(self,board,player,depth=0):\n\n # for action in board.get_legal_actions():\n # board._move(action,self.take)\n # val,_ = player.minimax(board,self,depth+1) # 切换到假想敌\n # board._unmove(action) # 撤销走法,回溯\n\n # if self.take == \"O\":\n # if val > bestVal: # Max\n # bestVal,bestAction = val,action\n # else: # Min\n # if val < bestVal:\n # bestVal,bestAction = val,action\n # return bestVal,bestAction \n\n def test(self,chessboard,p,direction,color):\n plist=[]\n x=p[0]\n y=p[1]\n minx=max(p[0]-1,0)\n maxx=min(p[0]+1,self.chessboard_size-1)\n miny=max(p[1]-1,0)\n maxy=min(p[1]+1,self.chessboard_size-1)\n\n p0=(minx,miny)\n p1=(minx,y)\n p2=(minx,maxy)\n p3=(x,miny)\n p4=(x,maxy)\n p5=(maxx,miny)\n p6=(maxx,y)\n p7=(maxx,maxy)\n\n plist.append(p0) \n plist.append(p1)\n plist.append(p2)\n plist.append(p3)\n plist.append(p4) \n plist.append(p5)\n plist.append(p6)\n plist.append(p7)\n\n corner=[0,2,5,7]\n xedge=[3,4]\n yedge=[1,6]\n if direction!=-1 :\n if direction in corner:\n\n if p[0] == plist[direction][0] or p[1] == plist[direction][1]:\n return 0\n else:\n\n if chessboard[plist[direction]]+chessboard[p]==0:\n return chessboard[plist[direction]]\n elif chessboard[plist[direction]]==0:\n return 0\n else:\n return self.test(chessboard, plist[direction], direction,color)\n\n elif direction in xedge:\n\n if p[1] == plist[direction][1]:\n return 0\n else:\n\n if chessboard[plist[direction]]+chessboard[p]==0:\n return chessboard[plist[direction]]\n elif chessboard[plist[direction]]==0:\n return 0\n else:\n return self.test(chessboard, plist[direction], direction,color)\n\n elif direction in yedge:\n\n if p[0] == plist[direction][0]:\n return 0\n else:\n\n if chessboard[plist[direction]]+chessboard[p]==0:\n return chessboard[plist[direction]]\n elif chessboard[plist[direction]]==0:\n return 0\n else:\n return self.test(chessboard, plist[direction], direction,color)\n\n else: \n result=[]\n output=chessboard.copy()\n for d in range(len(plist)):\n if chessboard[plist[d]] !=0:\n result.append(self.test(chessboard, plist[d], d,color))\n else: \n result.append(0)\n if color in result:\n now=p\n for i in range(len(plist)):\n if result[i] == color:\n while output[now]!=color:\n output[now]=color\n x=now[0]\n y=now[1]\n nplist=[(x-1,y-1),(x-1,y),(x-1,y+1),(x,y-1),(x,y+1),(x+1,y-1),(x+1,y),(x+1,y+1)] \n now=nplist[i]\n return output\n else:\n return [0]\n\n\n def getscore1(self,idx,chessboard,color,depth,max_depth):\n \n if depth==0:\n return self.evaluate1(chessboard)\n else:\n if depth==max_depth:\n resultlist=[]\n otherlist=[] \n maxscore=-1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth) \n \n if score>maxscore:\n resultlist.append(p)\n maxscore=score\n \n else: \n otherlist.append(p)\n for i in resultlist:\n otherlist.append(i) \n return otherlist \n else:\n if (max_depth-depth+1) %2 ==1:\n maxscore=-1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth) \n maxscore=max(score,maxscore)\n return maxscore\n else:\n minscore=1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth)\n minscore=min(score,minscore)\n return minscore\n \n \n\n\n def getscore(self,idx,chessboard,color,depth,max_depth):\n\n if depth==0:\n return self.evaluate(chessboard)\n else:\n if depth==max_depth:\n resultlist=[]\n otherlist=[] \n maxscore=-1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth) \n \n if score>maxscore:\n resultlist.append(p)\n maxscore=score\n \n else: \n otherlist.append(p)\n for i in resultlist:\n otherlist.append(i) \n return otherlist \n else:\n if (max_depth-depth+1) %2 ==1:\n maxscore=-1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth) \n maxscore=max(score,maxscore)\n return maxscore\n else:\n minscore=1000000 \n for p in idx:\n result=self.test(chessboard,p,-1,color)\n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1])) \n score=self.getscore(idx1, result, -color, depth-1,max_depth)\n minscore=min(score,minscore)\n return minscore\n \n\n # def minimax(self,chessboard,color,idx):\n resultlist=[]\n otherlist=[]\n tempscore=-100000\n for p in idx:\n result=self.test(chessboard, p, -1, color)\n \n if color in result:\n idx1 = np.where(result == COLOR_NONE)\n idx1 = list(zip(idx1[0], idx1[1]))\n\n minscore=1000000\n for p1 in idx1:\n result1=self.test(result,p1,-1,-color)\n \n if -color in result1:\n idx2 = np.where(result1 == COLOR_NONE)\n idx2 = list(zip(idx2[0], idx2[1]))\n\n maxscore=-1000000 \n for p2 in idx2:\n result2=self.test(result1,p2,-1,color) \n \n if color in result2:\n idx3 = np.where(result2 == COLOR_NONE)\n idx3 = list(zip(idx3[0], idx3[1]))\n \n minscore2=1000000\n for p3 in idx3:\n result3=self.test(result2, p3, -1, -color)\n if -color in result3:\n evalscore=self.evaluate(result3, color)\n # print(evalscore)\n minscore2=min(minscore2,evalscore)\n \n maxscore=max(maxscore,minscore2)\n \n minscore=min(minscore,maxscore)\n\n if minscore>tempscore:\n # print(minscore)\n resultlist.append(p)\n tempscore=minscore\n \n else: \n otherlist.append(p)\n for i in resultlist:\n otherlist.append(i)\n \n return otherlist\n \n\n\n def go(self, chessboard):\n idx = np.where(chessboard == COLOR_NONE)\n idx = list(zip(idx[0], idx[1]))\n self.candidate_list.clear()\n start = time.time()\n self.candidate_list=self.getscore1(idx, chessboard, self.color, 2,2)\n run_time = (time.time() - start)\n print(run_time)\n self.candidate_list=self.getscore(idx, chessboard, self.color, 3,3)\n run_time = (time.time() - start)\n print(run_time)\n # self.candidate_list=self.getscore(idx, chessboard, self.color, self.max_depth2,4)\n # self.candidate_list=self.getscore(idx, chessboard, self.color, self.max_depth3,5)\n # self.candidate_list=self.getscore(idx, chessboard, self.color, self.max_depth3,6)\n # self.candidate_list=self.getscore(idx, chessboard, self.color, self.max_depth3,7)\n\n\n# time measurement\n# start = time.time()\n# run_time = (time.time() - start)\n\ndef takefirst(elem):\n return elem[0]\n\nif __name__ == '__main__':\n\n chessboard=[\n [ 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0,-1, 1, 0, 0, 0],\n [ 0, 0, 0, 1,-1, 1, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0],\n [ 0, 0, 0, 0, 0, 0, 0, 0]\n ] \n\n # chessboard=[\n # [0,0,0,0,],\n # [0,-1,1,0,],\n # [0,1,-1,0,],\n # [0,0,0,0,],\n # ]\n AI=AI(8,-1,5)\n # AI.max_depth=3\n chessboard=np.array(chessboard)\n \n AI.go(chessboard)\n\n # AI.candidate_list.sort(key=takefirst)\n print(AI.candidate_list)\n " ]
[ [ "numpy.array", "numpy.where" ] ]
jpambrun/pynndescent
[ "0e31328e3f509fad4d7d06df9830a932a65c3436" ]
[ "pynndescent/tests/test_pynndescent_.py" ]
[ "import os\nimport io\nimport re\nfrom contextlib import redirect_stdout\n\nfrom nose.tools import assert_greater_equal, assert_true, assert_equal\nfrom nose import SkipTest\n\nimport numpy as np\nfrom scipy import sparse\nfrom sklearn.neighbors import KDTree\nfrom sklearn.preprocessing import normalize\n\nfrom pynndescent import NNDescent, PyNNDescentTransformer\n\nnp.random.seed(42)\nspatial_data = np.random.randn(10, 20)\nspatial_data = np.vstack(\n [spatial_data, np.zeros((2, 20))]\n) # Add some all zero graph_data for corner case test\n\nnn_data = np.random.uniform(0, 1, size=(1000, 5))\nnn_data = np.vstack(\n [nn_data, np.zeros((2, 5))]\n) # Add some all zero graph_data for corner case test\n# for_sparse_nn_data = np.random.uniform(0, 1, size=(1002, 500))\n# binary_nn_data = np.random.choice(a=[False, True], size=(1000, 500), p=[0.1, 1 - 0.1])\n# binary_nn_data = np.vstack(\n# [binary_nn_data, np.zeros((2, 500))]\n# ) # Add some all zero graph_data for corner case test\n# sparse_nn_data = sparse.csr_matrix(for_sparse_nn_data * binary_nn_data)\nsparse_nn_data = sparse.random(1000, 50, density=0.5, format=\"csr\")\n# sparse_nn_data = sparse.csr_matrix(nn_data)\n\n\ndef test_nn_descent_neighbor_accuracy():\n knn_indices, _ = NNDescent(\n nn_data, \"euclidean\", {}, 10, random_state=np.random\n )._neighbor_graph\n\n tree = KDTree(nn_data)\n true_indices = tree.query(nn_data, 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (nn_data.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.98,\n \"NN-descent did not get 99% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_angular_nn_descent_neighbor_accuracy():\n knn_indices, _ = NNDescent(\n nn_data, \"cosine\", {}, 10, random_state=np.random\n )._neighbor_graph\n\n angular_data = normalize(nn_data, norm=\"l2\")\n tree = KDTree(angular_data)\n true_indices = tree.query(angular_data, 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (nn_data.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.98,\n \"NN-descent did not get 99% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_sparse_nn_descent_neighbor_accuracy():\n knn_indices, _ = NNDescent(\n sparse_nn_data, \"euclidean\", n_neighbors=20, random_state=None\n )._neighbor_graph\n\n tree = KDTree(sparse_nn_data.toarray())\n true_indices = tree.query(sparse_nn_data.toarray(), 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(sparse_nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.85,\n \"Sparse NN-descent did not get 95% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_sparse_angular_nn_descent_neighbor_accuracy():\n knn_indices, _ = NNDescent(\n sparse_nn_data, \"cosine\", {}, 20, random_state=None\n )._neighbor_graph\n\n angular_data = normalize(sparse_nn_data, norm=\"l2\").toarray()\n tree = KDTree(angular_data)\n true_indices = tree.query(angular_data, 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(sparse_nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (sparse_nn_data.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.85,\n \"Sparse angular NN-descent did not get 98% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_nn_descent_query_accuracy():\n nnd = NNDescent(nn_data[200:], \"euclidean\", n_neighbors=10, random_state=None)\n knn_indices, _ = nnd.query(nn_data[:200], k=10, epsilon=0.2)\n\n tree = KDTree(nn_data[200:])\n true_indices = tree.query(nn_data[:200], 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(true_indices.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (true_indices.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.95,\n \"NN-descent query did not get 95% \" \"accuracy on nearest neighbors\",\n )\n\n\n# @SkipTest\ndef test_sparse_nn_descent_query_accuracy():\n nnd = NNDescent(\n sparse_nn_data[200:], \"euclidean\", n_neighbors=15, random_state=None\n )\n knn_indices, _ = nnd.query(sparse_nn_data[:200], k=10, epsilon=0.24)\n\n tree = KDTree(sparse_nn_data[200:].toarray())\n true_indices = tree.query(sparse_nn_data[:200].toarray(), 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(true_indices.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (true_indices.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.95,\n \"Sparse NN-descent query did not get 95% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_transformer_equivalence():\n N_NEIGHBORS = 15\n EPSILON = 0.15\n train = nn_data[:400]\n test = nn_data[:200]\n\n nnd = NNDescent(data=train, n_neighbors=N_NEIGHBORS, random_state=42)\n indices, dists = nnd.query(test, k=N_NEIGHBORS, epsilon=EPSILON)\n sort_idx = np.argsort(indices, axis=1)\n indices_sorted = np.vstack(\n [indices[i, sort_idx[i]] for i in range(sort_idx.shape[0])]\n )\n dists_sorted = np.vstack([dists[i, sort_idx[i]] for i in range(sort_idx.shape[0])])\n\n transformer = PyNNDescentTransformer(\n n_neighbors=N_NEIGHBORS, search_epsilon=EPSILON, random_state=42\n ).fit(train)\n Xt = transformer.transform(test).sorted_indices()\n\n assert np.all(Xt.indices == indices_sorted.flatten())\n assert np.allclose(Xt.data, dists_sorted.flat)\n\n\ndef test_random_state_none():\n knn_indices, _ = NNDescent(\n nn_data, \"euclidean\", {}, 10, random_state=None\n )._neighbor_graph\n\n tree = KDTree(nn_data)\n true_indices = tree.query(nn_data, 10, return_distance=False)\n\n num_correct = 0.0\n for i in range(nn_data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n percent_correct = num_correct / (spatial_data.shape[0] * 10)\n assert_greater_equal(\n percent_correct,\n 0.99,\n \"NN-descent did not get 99% \" \"accuracy on nearest neighbors\",\n )\n\n\ndef test_deterministic():\n seed = np.random.RandomState(42)\n\n x1 = seed.normal(0, 100, (1000, 50))\n x2 = seed.normal(0, 100, (1000, 50))\n\n index1 = NNDescent(x1, random_state=np.random.RandomState(42))\n neighbors1, distances1 = index1.query(x2)\n\n index2 = NNDescent(x1, random_state=np.random.RandomState(42))\n neighbors2, distances2 = index2.query(x2)\n\n np.testing.assert_equal(neighbors1, neighbors2)\n np.testing.assert_equal(distances1, distances2)\n\n\n# This tests a recursion error on cosine metric reported at:\n# https://github.com/lmcinnes/umap/issues/99\n# graph_data used is a cut-down version of that provided by @scharron\n# It contains lots of all-zero vectors and some other duplicates\ndef test_rp_trees_should_not_stack_overflow_with_duplicate_data():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n data_path = os.path.join(this_dir, \"test_data/cosine_hang.npy\")\n data = np.load(data_path)\n\n n_neighbors = 10\n knn_indices, _ = NNDescent(\n data, \"cosine\", {}, n_neighbors, random_state=np.random, n_trees=20\n )._neighbor_graph\n\n for i in range(data.shape[0]):\n assert_equal(\n len(knn_indices[i]),\n len(np.unique(knn_indices[i])),\n \"Duplicate graph_indices in knn graph\",\n )\n\n\ndef test_deduplicated_data_behaves_normally():\n this_dir = os.path.dirname(os.path.abspath(__file__))\n data_path = os.path.join(this_dir, \"test_data/cosine_hang.npy\")\n data = np.unique(np.load(data_path), axis=0)\n data = data[~np.all(data == 0, axis=1)]\n data = data[:1000]\n\n n_neighbors = 10\n knn_indices, _ = NNDescent(\n data, \"cosine\", {}, n_neighbors, random_state=np.random, n_trees=20\n )._neighbor_graph\n\n for i in range(data.shape[0]):\n assert_equal(\n len(knn_indices[i]),\n len(np.unique(knn_indices[i])),\n \"Duplicate graph_indices in knn graph\",\n )\n\n angular_data = normalize(data, norm=\"l2\")\n tree = KDTree(angular_data)\n true_indices = tree.query(angular_data, n_neighbors, return_distance=False)\n\n num_correct = 0\n for i in range(data.shape[0]):\n num_correct += np.sum(np.in1d(true_indices[i], knn_indices[i]))\n\n proportion_correct = num_correct / (data.shape[0] * n_neighbors)\n assert_greater_equal(\n proportion_correct,\n 0.95,\n \"NN-descent did not get 95%\" \" accuracy on nearest neighbors\",\n )\n\n\ndef test_output_when_verbose_is_true():\n out = io.StringIO()\n with redirect_stdout(out):\n _ = NNDescent(\n data=spatial_data,\n metric=\"euclidean\",\n metric_kwds={},\n n_neighbors=4,\n random_state=np.random,\n n_trees=5,\n n_iters=2,\n verbose=True,\n )\n output = out.getvalue()\n assert_true(re.match(\"^.*5 trees\", output, re.DOTALL))\n assert_true(re.match(\"^.*2 iterations\", output, re.DOTALL))\n\n\ndef test_no_output_when_verbose_is_false():\n out = io.StringIO()\n with redirect_stdout(out):\n _ = NNDescent(\n data=spatial_data,\n metric=\"euclidean\",\n metric_kwds={},\n n_neighbors=4,\n random_state=np.random,\n n_trees=5,\n n_iters=2,\n verbose=False,\n )\n output = out.getvalue().strip()\n assert_equal(len(output), 0)\n\n\n# same as the previous two test, but this time using the PyNNDescentTransformer\n# interface\ndef test_transformer_output_when_verbose_is_true():\n out = io.StringIO()\n with redirect_stdout(out):\n _ = PyNNDescentTransformer(\n n_neighbors=4,\n metric=\"euclidean\",\n metric_kwds={},\n random_state=np.random,\n n_trees=5,\n n_iters=2,\n verbose=True,\n ).fit_transform(spatial_data)\n output = out.getvalue()\n assert_true(re.match(\"^.*5 trees\", output, re.DOTALL))\n assert_true(re.match(\"^.*2 iterations\", output, re.DOTALL))\n\n\ndef test_transformer_output_when_verbose_is_false():\n out = io.StringIO()\n with redirect_stdout(out):\n _ = PyNNDescentTransformer(\n n_neighbors=4,\n metric=\"standardised_euclidean\",\n metric_kwds={\"sigma\": np.ones(spatial_data.shape[1])},\n random_state=np.random,\n n_trees=5,\n n_iters=2,\n verbose=False,\n ).fit_transform(spatial_data)\n output = out.getvalue().strip()\n assert_equal(len(output), 0)\n" ]
[ [ "numpy.random.uniform", "numpy.allclose", "numpy.load", "scipy.sparse.random", "numpy.ones", "numpy.zeros", "numpy.testing.assert_equal", "numpy.random.randn", "numpy.random.seed", "numpy.argsort", "sklearn.neighbors.KDTree", "numpy.random.RandomState", "numpy.in1d", "numpy.all", "sklearn.preprocessing.normalize", "numpy.unique" ] ]
Sujit-O/gemben
[ "4577914dbe4b39559093a6e9517c666b8e69c052" ]
[ "gemben/utils/graph_util.py" ]
[ "try: import cPickle as pickle\r\nexcept: import pickle\r\nimport numpy as np\r\nimport networkx as nx\r\nimport random\r\nimport itertools\r\nfrom time import time\r\nimport pdb\r\n\r\n\r\n\r\ndef transform_DiGraph_to_adj(di_graph):\r\n \"\"\"Function to convert the directed graph to adjacency matrix.\"\"\"\r\n n = di_graph.number_of_nodes()\r\n adj = np.zeros((n, n))\r\n for st, ed, w in di_graph.edges(data='weight', default=1):\r\n adj[st, ed] = w\r\n return adj\r\n\r\n\r\ndef transform_adj_to_DiGraph(adj):\r\n \"\"\"Function to convert the adjacency matrix into directed graph.\"\"\"\r\n n = adj.shape[0]\r\n di_graph = nx.DiGraph()\r\n di_graph.add_nodes_from(range(n))\r\n for i in xrange(n):\r\n for j in xrange(n):\r\n if(i != j):\r\n if(adj[i, j] > 0):\r\n di_graph.add_edge(i, j, weight=adj[i, j])\r\n return di_graph\r\n\r\n\r\ndef get_lcc(di_graph):\r\n di_graph = di_graph.to_undirected().to_directed()\r\n di_graph = max(nx.weakly_connected_component_subgraphs(di_graph), key=len)\r\n tdl_nodes = di_graph.nodes()\r\n nodeListMap = dict(zip(tdl_nodes, range(len(tdl_nodes))))\r\n di_graph = nx.relabel_nodes(di_graph, nodeListMap, copy=True)\r\n return di_graph, nodeListMap\r\n\r\n\r\ndef get_lcc_undirected(G):\r\n G2 = max(nx.connected_component_subgraphs(G), key=len)\r\n tdl_nodes = G2.nodes()\r\n nodeListMap = dict(zip(tdl_nodes, range(len(tdl_nodes))))\r\n G2 = nx.relabel_nodes(G2, nodeListMap, copy=True)\r\n return G2, nodeListMap\r\n\r\ndef get_nk_lcc_undirected(G):\r\n G2 = max(nx.connected_component_subgraphs(G), key=len)\r\n tdl_nodes = G2.nodes()\r\n nodeListMap = dict(zip(tdl_nodes, range(len(tdl_nodes))))\r\n G2 = nx.relabel_nodes(G2, nodeListMap, copy=True)\r\n return G2, nodeListMap\r\n\r\ndef print_graph_stats(G):\r\n \"\"\"Function to print the graph statistics.\"\"\"\r\n print('# of nodes: %d, # of edges: %d' % (G.number_of_nodes(),\r\n G.number_of_edges()))\r\n\r\n\r\ndef sample_graph_rw(di_graph, n_sampled_nodes=None,\r\n random_res_p=0.01, verbose=False):\r\n \"\"\"Function to return the sampled graph.\"\"\"\r\n t1 = time()\r\n print('\\t\\tRandom walk sampling. # nodes to sample: %d' % n_sampled_nodes)\r\n node_num = di_graph.number_of_nodes()\r\n node_l = [[]] * n_sampled_nodes\r\n s_node_idx = 0\r\n if n_sampled_nodes and node_num > n_sampled_nodes:\r\n # Choose the first node such that it has an outgoing edge\r\n tempNb = []\r\n while not tempNb:\r\n curr_node = np.random.random_integers(node_num) - 1\r\n tempNb = di_graph.neighbors(curr_node)\r\n node_l[s_node_idx] = curr_node\r\n s_node_idx += 1\r\n while s_node_idx < n_sampled_nodes:\r\n # # node_l_inv = {v: k for k, v in enumerate(node_l[:s_node_idx])}\r\n # # sampled_graph = nx.DiGraph()\r\n # # sampled_graph.add_nodes_from(range(n_sampled_nodes))\r\n # # for st, ed, w in di_graph.edges_iter(data='weight', default=1):\r\n # # try:\r\n # # v_i = node_l_inv[st]\r\n # # v_j = node_l_inv[ed]\r\n # # sampled_graph.add_edge(v_i, v_j, weight=w)\r\n # # except:\r\n # # continue\r\n # if verbose:\r\n # print('Sample edges: %d, total edges: %d' % (sampled_graph.number_of_edges(), di_graph.number_of_edges()))\r\n # if sampled_graph.number_of_edges() == di_graph.number_of_edges():\r\n # break\r\n if verbose:\r\n print('\\t\\t\\t# nodes sampled: %d' % s_node_idx)\r\n try:\r\n random_restart = np.random.rand()\r\n if random_restart < random_res_p:\r\n tempNb = []\r\n while not tempNb:\r\n new_sampled_node = \\\r\n np.random.random_integers(node_num) - 1\r\n tempNb = di_graph.neighbors(new_sampled_node)\r\n else:\r\n new_sampled_node = np.random.choice(\r\n di_graph.neighbors(curr_node)\r\n )\r\n except ValueError:\r\n # Restart if there is no outgoing edge\r\n tempNb = []\r\n while not tempNb:\r\n new_sampled_node = np.random.random_integers(node_num) - 1\r\n tempNb = di_graph.neighbors(new_sampled_node)\r\n curr_node = new_sampled_node\r\n if curr_node not in node_l[:s_node_idx]:\r\n node_l[s_node_idx] = new_sampled_node\r\n s_node_idx += 1\r\n try:\r\n node_l_inv = {v: k for k, v in enumerate(node_l[:s_node_idx])}\r\n except:\r\n pdb.set_trace()\r\n sampled_graph = nx.DiGraph()\r\n sampled_graph.add_nodes_from(range(s_node_idx))\r\n for st, ed, w in di_graph.edges.data('weight', default=1):\r\n try:\r\n v_i = node_l_inv[st]\r\n v_j = node_l_inv[ed]\r\n sampled_graph.add_edge(v_i, v_j, weight=w)\r\n except:\r\n continue\r\n print('\\t\\tSampled graph: n-%d, m-%d, time taken: %f sec' % (\r\n sampled_graph.number_of_nodes(),\r\n sampled_graph.number_of_edges(),\r\n time() - t1)\r\n )\r\n return sampled_graph, node_l\r\n else:\r\n print('\\t\\tSampled graph: n-%d, m-%d, time taken: %f sec' % (\r\n di_graph.number_of_nodes(),\r\n di_graph.number_of_edges(),\r\n time() - t1)\r\n )\r\n return di_graph, np.arange(di_graph.number_of_nodes())\r\n\r\n\r\ndef sample_graph(di_graph, n_sampled_nodes=None):\r\n \"\"\"Function to sample the graph.\"\"\"\r\n node_num = di_graph.number_of_nodes()\r\n if n_sampled_nodes and node_num > n_sampled_nodes:\r\n node_l = np.random.choice(node_num, n_sampled_nodes, replace=False)\r\n node_l_inv = {v: k for k, v in enumerate(node_l)}\r\n sampled_graph = nx.DiGraph()\r\n sampled_graph.add_nodes_from(range(n_sampled_nodes))\r\n for st, ed, w in di_graph.edges(data='weight', default=1):\r\n try:\r\n v_i = node_l_inv[st]\r\n v_j = node_l_inv[ed]\r\n sampled_graph.add_edge(v_i, v_j, weight=w)\r\n except:\r\n continue\r\n return sampled_graph, node_l\r\n else:\r\n return di_graph, np.arange(di_graph.number_of_nodes())\r\n\r\n\r\ndef randwalk_DiGraph_to_adj(di_graph, node_frac=0.1,\r\n n_walks_per_node=5, len_rw=2):\r\n \"\"\"Function to perform a randomwalk on directed graph and return the adjacency list.\"\"\"\r\n t0 = time.time()\r\n n = di_graph.number_of_nodes()\r\n adj = np.zeros((n, n))\r\n rw_node_num = int(node_frac * n)\r\n rw_node_list = np.random.choice(n, size=[rw_node_num],\r\n replace=False, p=None)\r\n for node in rw_node_list:\r\n for walk in range(n_walks_per_node):\r\n cur_node = node\r\n for step in range(len_rw):\r\n cur_neighbors = di_graph.neighbors(cur_node)\r\n try:\r\n neighbor_node = np.random.choice(cur_neighbors)\r\n except:\r\n continue\r\n try:\r\n adj[cur_node, neighbor_node] = di_graph.get_edge_data(\r\n cur_node,\r\n neighbor_node\r\n )['weight']\r\n adj[neighbor_node, cur_node] = di_graph.get_edge_data(\r\n cur_node,\r\n neighbor_node\r\n )['weight']\r\n except KeyError:\r\n adj[cur_node, neighbor_node] = 1\r\n adj[neighbor_node, cur_node] = 1\r\n cur_node = neighbor_node\r\n print('Time taken for random walk on {0} nodes = {1}'.format(n, time.time() - t0))\r\n return adj\r\n\r\n\r\ndef addChaos(di_graphs, k):\r\n \"\"\"Function to add randomness in the graph.\"\"\"\r\n anomaly_time_steps = sorted(random.sample(range(len(di_graphs)), k))\r\n for t in anomaly_time_steps:\r\n n = di_graphs[t].number_of_nodes()\r\n e = di_graphs[t].number_of_edges()\r\n di_graphs[t] = nx.fast_gnp_random_graph(n, e / float(n * (n - 1)),\r\n seed=None, directed=False)\r\n di_graphs[t] = di_graphs[t].to_directed()\r\n return di_graphs, anomaly_time_steps\r\n\r\n\r\ndef addNodeAnomalies(di_graphs, p, k):\r\n \"\"\"Function to add anomalous edges in the graph.\"\"\"\r\n anomaly_time_steps = sorted(random.sample(range(len(di_graphs)), k))\r\n for t in anomaly_time_steps:\r\n n_nodes = di_graphs[t].number_of_nodes()\r\n anomalous_nodes_idx = np.random.choice([0, 1],\r\n size=(n_nodes, 1),\r\n p=(1 - p, p))\r\n node_list = np.array(di_graphs[t].nodes())\r\n node_list = node_list.reshape((n_nodes, 1))\r\n anomalous_nodes = np.multiply(anomalous_nodes_idx, node_list)\r\n anomalous_nodes = anomalous_nodes[anomalous_nodes > 0]\r\n # pdb.set_trace()\r\n di_graphs[t].add_edges_from(\r\n itertools.product(list(anomalous_nodes), range(n_nodes))\r\n )\r\n di_graphs[t].add_edges_from(\r\n itertools.product(range(n_nodes), list(anomalous_nodes))\r\n )\r\n print('Nodes: %d, Edges: %d' % (di_graphs[t].number_of_nodes(),\r\n di_graphs[t].number_of_edges()))\r\n return anomaly_time_steps\r\n\r\n\r\ndef saveGraphToEdgeListTxt(graph, file_name):\r\n with open(file_name, 'w') as f:\r\n f.write('%d\\n' % graph.number_of_nodes())\r\n f.write('%d\\n' % graph.number_of_edges())\r\n for i, j, w in graph.edges(data='weight', default=1):\r\n f.write('%d %d %f\\n' % (i, j, w))\r\n\r\n\r\n\r\ndef convertNkToNx(G_nk):\r\n G_nx = nx.Graph()\r\n for i, j in G_nk.edges():\r\n G_nx.add_edge(i,j)\r\n return G_nx\r\n\r\ndef saveGraphToEdgeListTxtn2v(graph, file_name):\r\n with open(file_name, 'w') as f:\r\n for i, j, w in graph.edges(data='weight', default=1):\r\n f.write('%d %d %f\\n' % (i, j, 1))\r\n\r\n\r\ndef loadGraphFromEdgeListTxt(file_name, directed=True):\r\n with open(file_name, 'r') as f:\r\n # n_nodes = f.readline()\r\n # f.readline() # Discard the number of edges\r\n if directed:\r\n G = nx.DiGraph()\r\n else:\r\n G = nx.Graph()\r\n for line in f:\r\n edge = line.strip().split()\r\n if len(edge) == 3:\r\n w = float(edge[2])\r\n else:\r\n w = 1.0\r\n G.add_edge(int(edge[0]), int(edge[1]), weight=w)\r\n return G\r\n\r\n\r\ndef loadEmbedding(file_name):\r\n with open(file_name, 'r') as f:\r\n n, d = f.readline().strip().split()\r\n X = np.zeros((int(n), int(d)))\r\n for line in f:\r\n emb = line.strip().split()\r\n emb_fl = [float(emb_i) for emb_i in emb[1:]]\r\n X[int(emb[0]), :] = emb_fl\r\n return X\r\n\r\n\r\ndef loadSBMGraph(file_prefix):\r\n graph_file = file_prefix + '_graph.gpickle'\r\n G = nx.read_gpickle(graph_file)\r\n node_file = file_prefix + '_node.pkl'\r\n with open(node_file, 'rb') as fp:\r\n node_community = pickle.load(fp)\r\n return (G, node_community)\r\n\r\n\r\ndef loadRealGraphSeries(file_prefix, startId, endId):\r\n graphs = []\r\n for file_id in range(startId, endId + 1):\r\n graph_file = file_prefix + str(file_id) + '_graph.gpickle'\r\n graphs.append(nx.read_gpickle(graph_file))\r\n return graphs\r\n\r\n\r\ndef saveRealGraphSeries(G, file_prefix='graphs/day_'):\r\n for idx in range(len(G)):\r\n f_name = file_prefix + str(idx) + \"_graph.gpickle\"\r\n # cPickle.dump(G[idx], open(f_name, 'wb'))\r\n nx.write_gpickle(G[idx], f_name)\r\n\r\n\r\ndef loadDynamicSBmGraph(file_perfix, length):\r\n graph_files = ['%s_%d_graph.gpickle' % (file_perfix, i) for i in xrange(length)]\r\n info_files = ['%s_%d_node.pkl' % (file_perfix, i) for i in xrange(length)]\r\n\r\n graphs = [nx.read_gpickle(graph_file) for graph_file in graph_files]\r\n\r\n nodes_comunities = []\r\n perturbations = []\r\n for info_file in info_files:\r\n with open(info_file, 'rb') as fp:\r\n node_infos = pickle.load(fp)\r\n nodes_comunities.append(node_infos['community'])\r\n perturbations.append(node_infos['perturbation'])\r\n\r\n return zip(graphs, nodes_comunities, perturbations)\r\n\r\n\r\ndef saveDynamicSBmGraph(file_perfix, dynamic_graphs):\r\n length = len(dynamic_graphs)\r\n graph_files = ['%s_%d_graph.gpickle' % (file_perfix, i) for i in xrange(length)]\r\n info_files = ['%s_%d_node.pkl' % (file_perfix, i) for i in xrange(length)]\r\n\r\n for i in xrange(length):\r\n # save graph\r\n nx.write_gpickle(dynamic_graphs[i][0], graph_files[i])\r\n # save additional node info\r\n with open(info_files[i], 'wb') as fp:\r\n node_infos = {}\r\n node_infos['community'] = dynamic_graphs[i][1]\r\n node_infos['perturbation'] = dynamic_graphs[i][2]\r\n pickle.dump(node_infos, fp)\r\n" ]
[ [ "numpy.multiply", "numpy.zeros", "numpy.random.choice", "numpy.random.rand", "numpy.random.random_integers" ] ]
daniella-brovkina-isw/panda_robot
[ "a89243af46cbcc47485f1d035af29695c9c89a9e" ]
[ "demos/demo_endeffector_keyboard.py" ]
[ "#!/usr/bin/env python\n\n# /***************************************************************************\n\n# \n# @package: panda_robot\n# @author: Saif Sidhik <[email protected]>\n# \n\n# **************************************************************************/\n\n# /***************************************************************************\n# Copyright (c) 2019-2021, Saif Sidhik\n# Copyright (c) 2013-2018, Rethink Robotics Inc.\n \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# **************************************************************************/\n\n\nimport rospy\nimport numpy as np\nfrom panda_robot import PandaArm\nfrom franka_dataflow.getch import getch\nfrom future.utils import viewitems # for python2&3 efficient compatibility\n\n\"\"\"\nPanda Robot IK Example: End-effector Keyboard Control\n\nUse your dev machine's keyboard to control robot end-effector position.\n\nEach key corresponds to increasing or decreasing the angle\nof a joint on the robot arm. For ease, use the numeral keys in the\nnumberpad of the keyboard.\n\n:info:\n Demo showing low-level position control using Franka ROS Interface.\n Run the demo and press '?' on keyboard for command instructions.\n Each key corresponds to increasing or decreasing the end-effector\n position along the X Y Z axes in the base frame.\n\n Disclaimer: This code is only for demonstration purpose, and does not\n show the ideal way to use the interface code.\n\n Warning: The robot will move according to the key press. Also,\n note that the motion will be slightly jerky (small noise from the\n robot joints). This is because of the non-smooth commands sent to\n the robot via direct joint position control (using IK). \n Using impedance control will produce smoother motion by definition.\n\"\"\"\n\npos_increment = 0.01\n# ori_increment = 0.001\ndef map_keyboard():\n \"\"\"\n Map keyboard keys to robot joint motion. Keybindings can be \n found when running the script.\n \"\"\"\n\n limb = PandaArm()\n\n has_gripper = limb.get_gripper() is not None\n\n def set_ee_target(action, value):\n pos, ori = limb.ee_pose()\n\n if action == 'position':\n pos += value\n status, j_des = limb.inverse_kinematics(pos, ori)\n if status:\n limb.exec_position_cmd(j_des)\n\n def set_g(action):\n if has_gripper:\n if action == \"close\":\n limb.get_gripper().close()\n elif action == \"open\":\n limb.get_gripper().open()\n elif action == \"calibrate\":\n limb.get_gripper().calibrate()\n def reset_robot(args):\n limb.untuck()\n\n bindings = {\n '5': (set_ee_target, ['position', np.asarray([pos_increment, 0, 0])], \"x increase\"),\n '2': (set_ee_target, ['position', np.asarray([-pos_increment, 0, 0])], \"x decrease\"),\n '1': (set_ee_target, ['position', np.asarray([0, pos_increment, 0])], \"y increase\"),\n '3': (set_ee_target, ['position', np.asarray([0, -pos_increment, 0])], \"y decrease\"),\n '7': (set_ee_target, ['position', np.asarray([0, 0, pos_increment])], \"z increase\"),\n '4': (set_ee_target, ['position', np.asarray([0, 0, -pos_increment])], \"z decrease\"),\n 'r': (reset_robot, [None], \"reset to neutral pose\")\n }\n if has_gripper:\n bindings.update({\n '8': (set_g, \"close\", \"close gripper\"),\n '9': (set_g, \"open\", \"open gripper\"),\n 'i': (set_g, \"calibrate\", \"calibrate gripper\")\n })\n done = False\n rospy.logwarn(\"Controlling end-effector position. Press ? for help, Esc to quit. For ease, use the numeral keys in the numberpad of the keyboard.\\n\\nWARNING: The motion will be slightly jerky!!\\n\")\n while not done and not rospy.is_shutdown():\n c = getch()\n if c:\n #catch Esc or ctrl-c\n if c in ['\\x1b', '\\x03']:\n done = True\n rospy.signal_shutdown(\"Example finished.\")\n elif c in bindings:\n cmd = bindings[c]\n if c == '8' or c == 'i' or c == '9':\n cmd[0](cmd[1])\n print(\"command: %s\" % (cmd[2],))\n else:\n #expand binding to something like \"set_j(right, 'j0', 0.1)\"\n cmd[0](*cmd[1])\n print(\"command: %s\" % (cmd[2],))\n else:\n print(\"key bindings: \")\n print(\" Esc: Quit\")\n print(\" ?: Help\")\n for key, val in sorted(viewitems(bindings),\n key=lambda x: x[1][2]):\n print(\" %s: %s\" % (key, val[2]))\n\ndef main():\n \"\"\"Panda Robot IK Example: End-effector Keyboard Control\n\n Use your dev machine's keyboard to control robot end-effector position.\n \"\"\"\n\n print(\"Initializing node... \")\n rospy.init_node(\"fri_example_joint_position_keyboard\")\n print(\"Getting robot state... \")\n\n def clean_shutdown():\n print(\"\\nExiting example.\")\n\n rospy.on_shutdown(clean_shutdown)\n\n map_keyboard()\n print(\"Done.\")\n\n\nif __name__ == '__main__':\n main()\n\n\n\n" ]
[ [ "numpy.asarray" ] ]
susucy/CeleScope
[ "a5d9501ff8bd9dc067b9718070876acb0767a6cf" ]
[ "celescope/tag/Analysis_tag.py" ]
[ "import os\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\nimport glob\nfrom celescope.tools.report import reporter\nfrom celescope.tools.utils import log\nfrom celescope.tools.Analysis import Analysis\n\nclass Analysis_tag(Analysis):\n\n def run(self, tsne_tag_file):\n cluster_tsne = self.get_cluster_tsne(colname='cluster')\n self.tsne_tag_df = pd.read_csv(tsne_tag_file, sep=\"\\t\", index_col=0)\n feature_tsne = self.get_cluster_tsne(colname='tag', show_tag=False, dfname='tsne_tag_df')\n marker_gene_table = self.marker_table()\n self.report_prepare(\n cluster_tsne=cluster_tsne, \n feature_tsne=feature_tsne,\n marker_gene_table=marker_gene_table,\n )\n self.report(stat=False)\n\n\n\n\n\n" ]
[ [ "pandas.read_csv" ] ]
goromal/flightmare
[ "b9b60a5bad4d167e3beb8401977802d6a926828f" ]
[ "flightrl/examples/view_reward.py" ]
[ "import seaborn as sns\nimport pandas as pd\nimport matplotlib.animation as animation\nimport matplotlib.pyplot as plt\n#\nsns.set_style(\"whitegrid\")\n#\n\n\ndef plot_reward(save_dir, varname, ylabel, save_fig=False):\n fig, ax = plt.subplots(1, 1, figsize=(7, 4))\n #\n sns.lineplot(data=data[varname].dropna(), ax=ax)\n plt.xlabel(\"Training Iterations\")\n plt.title(ylabel + \" (ppo)\")\n if save_fig:\n plt.savefig(save_dir + \"/\" + ylabel + \".png\")\n\n\nif __name__ == \"__main__\":\n logger_dir = \"./saved/2020-08-28-14-59-38/\"\n ppo_var_names = [\"ep_reward_mean\", \"ep_len_mean\", \"policy_entropy\"]\n ppo_y_labels = [\"Reward\", \"Episode Length\", \"Policy Entropy\"]\n #\n sac_var_names = [\"train_reward\", \"test_reward\", \"entropy\"]\n sac_y_labels = [\"Train Reward\", \"Test Reward\", \"Entropy\"]\n #\n csv_path = logger_dir + \"progress.csv\"\n data = pd.read_csv(csv_path)\n for i in range(len(ppo_var_names)):\n plot_reward(\n logger_dir, varname=ppo_var_names[i], ylabel=ppo_y_labels[i])\n plt.show()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.savefig", "matplotlib.pyplot.subplots", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.xlabel" ] ]
Asad-Ismail/SMN
[ "8d019d1751207ea535868d8473b39e77a22533f7" ]
[ "utils/util.py" ]
[ "import numpy as np\nimport cv2\nimport random\nimport warnings\nimport scipy\nfrom scipy.linalg.basic import solve_circulant\nimport skimage\nimport skimage.transform\nfrom distutils.version import LooseVersion\nimport torch\nimport math\nimport json\n\nfrom torch.functional import Tensor\n\nnp.random.seed(42)\n\ndef load_points_dataset(f_p):\n \"\"\"load keypoints(head/tail)from the text file\n Args:\n f_p ([str]): File path containing the head and tail points (x,y) of each fruit in the image. Each Image can have multiple fruits\n\n Returns:\n [dict]: Dictionary of file names as keys and corresponding fruit points as values\n \"\"\"\n with open(f_p, \"r\") as f:\n all_lines = f.readlines()\n points = {}\n i = 0\n while i < len(all_lines):\n if i > len(all_lines):\n break\n line = all_lines[i].split(\",\")\n label = line[0]\n file = line[3]\n first_point = None\n second_point = None\n if label == \"head\":\n first_point = (int(line[1]), int(line[2]))\n elif label == \"tail\":\n second_point = (int(line[1]), int(line[2]))\n i += 1\n if i < len(all_lines):\n line2 = all_lines[i].split(\",\")\n if line2[3] == file:\n if line2[0] == \"head\":\n first_point = (int(line2[1]), int(line2[2]))\n elif line2[0] == \"tail\":\n second_point = (int(line2[1]), int(line2[2]))\n i += 1\n if file in points:\n # file already in dictionary append the list\n # print(f\"Appending the file to existing one {file}\")\n points[file].append([first_point, second_point])\n else:\n points[file] = [[first_point, second_point]]\n return points\n\n\n\ndef load_points_dataset_2(f_p,label_name=[\"head\",\"tail\"]):\n \"\"\"load keypoints(head/tail)from the text file\n Args:\n f_p ([str]): File path containing the head and tail points (x,y) of each fruit in the image. Each Image can have multiple fruits\n\n Returns:\n [dict]: Dictionary of file names as keys and corresponding fruit points as values\n \"\"\"\n with open(f_p, \"r\") as f:\n all_lines = f.readlines()\n points = {}\n i = 0\n while i < len(all_lines):\n if i > len(all_lines):\n break\n line = all_lines[i].split(\",\")\n label = line[0]\n file = line[3]\n first_point = None\n second_point = None\n if label == label_name[0]:\n first_point = (int(line[1]), int(line[2]))\n elif label == label_name[1]:\n second_point = (int(line[1]), int(line[2]))\n i += 1\n if i < len(all_lines):\n line2 = all_lines[i].split(\",\")\n if line2[3] == file:\n if line2[0] == label_name[0]:\n first_point = (int(line2[1]), int(line2[2]))\n elif line2[0] == label_name[1]:\n second_point = (int(line2[1]), int(line2[2]))\n i += 1\n if not first_point and not second_point:\n continue\n if file in points:\n # file already in dictionary append the list\n # print(f\"Appending the file to existing one {file}\")\n points[file].append([first_point, second_point])\n else:\n points[file] = [[first_point, second_point]]\n return points\n\n\n\ndef load_class_dataset(f_p,label_name=[\"rating\",\"neck\"]):\n \"\"\"load keypoints(head/tail)from the text file\n Args:\n f_p ([str]): File path containing the head and tail points (x,y) of each fruit in the image. Each Image can have multiple fruits\n\n Returns:\n [dict]: Dictionary of file names as keys and corresponding fruit points as values\n \"\"\"\n with open(f_p, \"r\") as f:\n all_lines = f.readlines()\n points = {}\n i = 0\n while i < len(all_lines):\n if i > len(all_lines):\n break\n line = all_lines[i].split(\",\")\n label = line[0]\n splitted_labels=label.split(\"_\")\n file = line[3]\n coords= None\n if splitted_labels[0] in label_name:\n coords = (int(line[1]), int(line[2]))\n i += 1\n if coords is None:\n continue\n if file in points:\n # file already in dictionary append the list\n # print(f\"Appending the file to existing one {file}\")\n if splitted_labels[0] in points[file]:\n points[file][splitted_labels[0]].append([coords,int(splitted_labels[1])])\n else:\n points[file][splitted_labels[0]]=[[coords,int(splitted_labels[1])]]\n else:\n points[file]={splitted_labels[0]:[[coords, int(splitted_labels[1])]]}\n return points\n\ndef load_segmentation_dataset(f_p,label_names=None):\n \"\"\"\"\n Returns:\n [dict]: Dictionary of list with names \n \"\"\"\n data=load_json(f_p)\n cat_map={}\n for cat in data[\"categories\"]:\n if cat[\"name\"] in label_names:\n cat_map[cat['id']]=cat[\"name\"] \n image_map={}\n for cat in data[\"images\"]:\n image_map[cat['id']]=cat[\"file_name\"] \n annos={}\n for d in data[\"annotations\"]:\n tmp=[]\n seg=d[\"segmentation\"][0]\n for i in range(0,len(seg)-1,2):\n tmp.append([seg[i],seg[i+1]]) \n if image_map[d[\"image_id\"]] not in annos:\n annos[image_map[d[\"image_id\"]]]=[{\"class_id\":cat_map[d[\"category_id\"]],\"annotation\":tmp}]\n else:\n annos[image_map[d[\"image_id\"]]].append({\"class_id\":cat_map[d[\"category_id\"]],\"annotation\":tmp})\n return annos\n\n\ndef load_backbone(filename):\n with open(filename) as f:\n back_annotation = json.load(f)\n return back_annotation\n\ndef load_json(filename):\n with open(filename) as f:\n annotation = json.load(f)\n return annotation\n\n\ndef extract_bboxes(mask):\n \"\"\"Compute bounding boxes from masks.\n mask: [height, width, num_instances]. Mask pixels are either 1 or 0.\n Returns: bbox array [num_instances, (y1, x1, y2, x2)].\n \"\"\"\n #print(np.max(mask))\n #print(np.min(mask))\n boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)\n for i in range(mask.shape[-1]):\n m = mask[:, :, i]\n # Bounding box.\n horizontal_indicies = np.where(np.any(m, axis=0))[0]\n vertical_indicies = np.where(np.any(m, axis=1))[0]\n if horizontal_indicies.shape[0]:\n x1, x2 = horizontal_indicies[[0, -1]]\n y1, y2 = vertical_indicies[[0, -1]]\n # x2 and y2 should not be part of the box. Increment by 1.\n x2 += 1\n y2 += 1\n else:\n # No mask for this instance. Might happen due to\n # resizing or cropping. Set bbox to zeros\n x1, x2, y1, y2 = 0, 0, 0, 0\n boxes[i] = np.array([x1,y1,x2,y2])\n return boxes.astype(np.int32)\n\n\ndef resize(\n image,\n output_shape,\n order=1,\n mode=\"constant\",\n cval=0,\n clip=True,\n preserve_range=False,\n anti_aliasing=False,\n anti_aliasing_sigma=None,\n):\n \"\"\"A wrapper for Scikit-Image resize().\n Scikit-Image generates warnings on every call to resize() if it doesn't\n receive the right parameters. The right parameters depend on the version\n of skimage. This solves the problem by using different parameters per\n version. And it provides a central place to control resizing defaults.\n \"\"\"\n if LooseVersion(skimage.__version__) >= LooseVersion(\"0.14\"):\n # New in 0.14: anti_aliasing. Default it to False for backward\n # compatibility with skimage 0.13.\n return skimage.transform.resize(\n image,\n output_shape,\n order=order,\n mode=mode,\n cval=cval,\n clip=clip,\n preserve_range=preserve_range,\n anti_aliasing=anti_aliasing,\n anti_aliasing_sigma=anti_aliasing_sigma,\n )\n else:\n return skimage.transform.resize(\n image,\n output_shape,\n order=order,\n mode=mode,\n cval=cval,\n clip=clip,\n preserve_range=preserve_range,\n )\n\n\ndef resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode=\"square\"):\n \"\"\"Resizes an image keeping the aspect ratio unchanged.\n min_dim: if provided, resizes the image such that it's smaller\n dimension == min_dim\n max_dim: if provided, ensures that the image longest side doesn't\n exceed this value.\n min_scale: if provided, ensure that the image is scaled up by at least\n this percent even if min_dim doesn't require it.\n mode: Resizing mode.\n none: No resizing. Return the image unchanged.\n square: Resize and pad with zeros to get a square image\n of size [max_dim, max_dim].\n pad64: Pads width and height with zeros to make them multiples of 64.\n If min_dim or min_scale are provided, it scales the image up\n before padding. max_dim is ignored in this mode.\n The multiple of 64 is needed to ensure smooth scaling of feature\n maps up and down the 6 levels of the FPN pyramid (2**6=64).\n crop: Picks random crops from the image. First, scales the image based\n on min_dim and min_scale, then picks a random crop of\n size min_dim x min_dim. Can be used in training only.\n max_dim is not used in this mode.\n Returns:\n image: the resized image\n window: (y1, x1, y2, x2). If max_dim is provided, padding might\n be inserted in the returned image. If so, this window is the\n coordinates of the image part of the full image (excluding\n the padding). The x2, y2 pixels are not included.\n scale: The scale factor used to resize the image\n padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Keep track of image dtype and return results in the same dtype\n image_dtype = image.dtype\n # Default window (y1, x1, y2, x2) and default scale == 1.\n h, w = image.shape[:2]\n window = (0, 0, h, w)\n scale = 1\n padding = [(0, 0), (0, 0), (0, 0)]\n crop = None\n\n if mode == \"none\":\n return image, window, scale, padding, crop\n\n # Scale?\n if min_dim:\n # Scale up but not down\n scale = max(1, min_dim / min(h, w))\n if min_scale and scale < min_scale:\n scale = min_scale\n\n # Does it exceed max dim?\n if max_dim and mode == \"square\":\n image_max = max(h, w)\n if round(image_max * scale) > max_dim:\n scale = max_dim / image_max\n\n # Resize image using bilinear interpolation\n if scale != 1:\n image = resize(image, (round(h * scale), round(w * scale)), preserve_range=True)\n\n # Need padding or cropping?\n if mode == \"square\":\n # Get new height and width\n h, w = image.shape[:2]\n top_pad = (max_dim - h) // 2\n bottom_pad = max_dim - h - top_pad\n left_pad = (max_dim - w) // 2\n right_pad = max_dim - w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode=\"constant\", constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"pad64\":\n h, w = image.shape[:2]\n # Both sides must be divisible by 64\n assert min_dim % 64 == 0, \"Minimum dimension must be a multiple of 64\"\n # Height\n if h % 64 > 0:\n max_h = h - (h % 64) + 64\n top_pad = (max_h - h) // 2\n bottom_pad = max_h - h - top_pad\n else:\n top_pad = bottom_pad = 0\n # Width\n if w % 64 > 0:\n max_w = w - (w % 64) + 64\n left_pad = (max_w - w) // 2\n right_pad = max_w - w - left_pad\n else:\n left_pad = right_pad = 0\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n image = np.pad(image, padding, mode=\"constant\", constant_values=0)\n window = (top_pad, left_pad, h + top_pad, w + left_pad)\n elif mode == \"crop\":\n # Pick a random crop\n h, w = image.shape[:2]\n y = random.randint(0, (h - min_dim))\n x = random.randint(0, (w - min_dim))\n crop = (y, x, min_dim, min_dim)\n image = image[y : y + min_dim, x : x + min_dim]\n window = (0, 0, min_dim, min_dim)\n else:\n raise Exception(\"Mode {} not supported\".format(mode))\n return image.astype(image_dtype), window, scale, padding, crop\n\n\ndef resize_mask(mask, scale, padding, crop=None):\n \"\"\"Resizes a mask using the given scale and padding.\n Typically, you get the scale and padding from resize_image() to\n ensure both, the image and the mask, are resized consistently.\n scale: mask scaling factor\n padding: Padding to add to the mask in the form\n [(top, bottom), (left, right), (0, 0)]\n \"\"\"\n # Suppress warning from scipy 0.13.0, the output shape of zoom() is\n # calculated with round() instead of int()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)\n if crop is not None:\n y, x, h, w = crop\n mask = mask[y : y + h, x : x + w]\n else:\n mask = np.pad(mask, padding, mode=\"constant\", constant_values=0)\n return mask\n\ndef vis_mask(vis_img,indicies,color=(255,120,0)):\n for j in range(len(indicies[0])):\n x = indicies[1][j]\n y = indicies[0][j]\n # viusalize masks\n cv2.circle(vis_img, (x, y), 1, color, 1)\n\ndef resize_images_cv(img):\n scale_percent = 40 # percent of original size\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n dim = (width, height)\n # resize image\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n return resized\n\n\ndef write_text(image,text,point=(0,0),color=(255,0,0)):\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # fontScale\n fontScale = 1\n\n # Line thickness of 2 px\n thickness = 3\n\n # Using cv2.putText() method\n image = cv2.putText(image, text, point, font, \n fontScale, color, thickness, cv2.LINE_AA)\n return image\n \n\ndef vis_data(image, masks, bboxs,classes,keypoints=None,**kwargs):\n vis_img = (image.detach().numpy()*255).astype(np.uint8)\n vis_img=np.moveaxis(vis_img, 0, -1)\n vis_img=vis_img.copy()\n class_color={i:[random.uniform(0,255) for _ in range(3)] for i in np.unique(classes)}\n # offset for drawing text\n off_x=20\n off_y=50\n for i in range(masks.shape[0]):\n mask = masks[i][...,None].detach().numpy()\n bbox = np.int0(bboxs[i].detach().numpy().copy())\n indicies = np.where(mask >= 0.5)\n vis_mask(vis_img,indicies=indicies,color=class_color[classes[i]])\n # Visualize bounding box\n cv2.rectangle(vis_img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), class_color[classes[i]], 2)\n # write class name\n write_text(vis_img,classes[i],((bbox[0], bbox[1])))\n # Visualize segm classes\n if \"other_masks\" in kwargs and \"seg_labels\" in kwargs:\n for j,k in enumerate(kwargs[\"other_masks\"].keys(),start=1):\n if classes[i] in kwargs[\"seg_labels\"][j]: \n m=kwargs[\"other_masks\"][k][i][...,None].detach().numpy()\n indicies = np.where(m >= 0.1)\n vis_mask(vis_img,indicies=indicies,color=(0,0,255))\n #other_mask_id+=1\n \n ## Visualize keypoints\n if \"kp_labels\" in kwargs and keypoints is not None:\n if classes[i] in kwargs[\"kp_labels\"]: \n keypoint = np.int0(keypoints[i].detach().numpy())\n # Visualize Keypoints\n cv2.circle(vis_img, (keypoint[0][0], keypoint[0][1]), 1, (0, 255, 255), 20)\n write_text(vis_img,\"Head\",(keypoint[0][0]+off_x, keypoint[0][1]+off_y))\n cv2.circle(vis_img, (keypoint[1][0], keypoint[1][1]), 1, (0, 255, 255), 20)\n write_text(vis_img,\"Tail\",(keypoint[1][0]+off_x, keypoint[1][1]+off_y))\n \n # visualize classification\n if \"clas\" in kwargs and \"clas_labels\" in kwargs:\n for j,k in enumerate(kwargs[\"clas\"].keys(),start=0):\n if classes[i] in kwargs[\"clas_labels\"][j]:\n cl=kwargs[\"clas\"][k][i].cpu().item()\n point=(bbox[0]+(bbox[2]-bbox[0])//2+j*off_x,bbox[1]+(bbox[3]-bbox[1])//2+j*off_y)\n write_text(vis_img,f\"{k}: {cl}\",point=point,color=(0,0,255))\n \n vis_img=resize_images_cv(vis_img)\n #if\"\" kwargs[\"DEBUG\"]:\n if \"epoch\" in kwargs:\n cv2.imwrite(\"/home/ec2-user/SageMaker/SMN/res_images/\"+f\"{kwargs['epoch']}\"+\".png\",vis_img)\n #cv2.imshow(\"Input and labels\", vis_img)\n #cv2.waitKey(0)\n\n\n\ndef resize_points(points, scale, window):\n # window: (y1, x1, y2, x2)\n scaled_points = []\n for i in range(len(points)):\n two_point = np.array(points[i])\n two_point = scale * two_point\n two_point[0][0] = two_point[0][0] + window[1]\n two_point[0][1] = two_point[0][1] + window[0]\n two_point[1][0] = two_point[1][0] + window[1]\n two_point[1][1] = two_point[1][1] + window[0]\n scaled_points.append(two_point)\n return np.int0(scaled_points)\n\n\n\n\ndef generate_anchors_tensor(scales, ratios, shape, feature_stride, anchor_stride,device=\"cpu\"):\n \"\"\"\n scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]\n ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]\n shape: [height, width] spatial shape of the feature map over which\n to generate anchors.\n feature_stride: Stride of the feature map relative to the image in pixels.\n anchor_stride: Stride of anchors on the feature map. For example, if the\n value is 2 then generate anchors for every other feature map pixel.\n \"\"\"\n # Get all combinations of scales and ratios\n scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))\n scales = scales.flatten()\n ratios = ratios.flatten()\n\n # Enumerate heights and widths from scales and ratios\n heights = scales / np.sqrt(ratios)\n widths = scales * np.sqrt(ratios)\n\n # Enumerate shifts in feature space\n shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride\n shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride\n shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)\n\n # Enumerate combinations of shifts, widths, and heights\n box_widths, box_centers_x = np.meshgrid(widths, shifts_x)\n box_heights, box_centers_y = np.meshgrid(heights, shifts_y)\n\n # Reshape to get a list of (y, x) and a list of (h, w)\n box_centers = np.stack([box_centers_y, box_centers_x], axis=2).reshape([-1, 2])\n box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])\n\n # Convert to corner coordinates (y1, x1, y2, x2)\n boxes = np.concatenate(\n [box_centers - 0.5 * box_sizes, box_centers + 0.5 * box_sizes], axis=1\n )\n boxes=torch.tensor(boxes,dtype=torch.float32)\n return boxes\n\n\n\ndef visualize_anchors(\n img,\n anchors,\n backbone_shapes,\n RPN_ANCHOR_RATIOS,\n RPN_ANCHOR_STRIDE,\n RPN_ANCHOR_SCALES,\n):\n vis_img = img.copy()\n num_levels = len(backbone_shapes)\n anchors_per_cell = len(RPN_ANCHOR_RATIOS)\n print(\"Anchors Count: \", anchors.shape[0])\n print(\"Scales: \", RPN_ANCHOR_SCALES)\n print(\"ratios: \", RPN_ANCHOR_RATIOS)\n print(\"Anchors per Cell: \", anchors_per_cell)\n print(\"Levels: \", num_levels)\n anchors_per_level = []\n for l in range(num_levels):\n num_cells = backbone_shapes[l][0] * backbone_shapes[l][1]\n anchors_per_level.append(anchors_per_cell * num_cells // RPN_ANCHOR_STRIDE ** 2)\n print(\"Anchors in Level {}: {}\".format(l, anchors_per_level[l]))\n\n for level in range(num_levels):\n colors = [[0, 255, 0]]\n # Compute the index of the anchors at the center of the image\n level_start = sum(\n anchors_per_level[:level]\n ) # sum of anchors of previous levels\n level_anchors = anchors[level_start : level_start + anchors_per_level[level]]\n print(\n \"Level {}. Anchors: {:6} Feature map Shape: {}\".format(\n level, level_anchors.shape[0], backbone_shapes[level]\n )\n )\n center_cell = np.array(backbone_shapes[level]) // 2\n center_cell_index = center_cell[0] * backbone_shapes[level][1] + center_cell[1]\n level_center = center_cell_index * anchors_per_cell\n center_anchor = anchors_per_cell * (\n (center_cell[0] * backbone_shapes[level][1] / RPN_ANCHOR_STRIDE ** 2)\n + center_cell[1] / RPN_ANCHOR_STRIDE\n )\n level_center = int(center_anchor)\n\n # Draw anchors. Brightness show the order in the array, dark to bright.\n for i, rect in enumerate(\n level_anchors[level_center : level_center + anchors_per_cell]\n ):\n y1, x1, y2, x2 = rect\n cv2.rectangle(\n vis_img, (int(x1), int(y1)), (int(x2), int(y2)), colors[level], 2\n )\n\n cv2.imshow(\"Center Anchor Boxes\", vis_img)\n cv2.waitKey(0)\n\n\ndef generate_pyramid_anchors_tensor(scales, ratios, feature_shapes, feature_strides, anchor_stride,device=\"cpu\"):\n \"\"\"Generate anchors at different levels of a feature pyramid. Each scale\n is associated with a level of the pyramid, but each ratio is used in\n all levels of the pyramid.\n Returns:\n anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted\n with the same order of the given scales. So, anchors of scale[0] come\n first, then anchors of scale[1], and so on.\n \"\"\"\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors_tensor(scales[i], ratios, feature_shapes[i], feature_strides[i], anchor_stride,device=device))\n return torch.cat(anchors, axis=0).to(device=device)\n\n\n\ndef compute_iou_tensor(box, boxes, box_area, boxes_area):\n \"\"\"Calculates IoU of the given box with the array of the given boxes.\n box: 1D vector [y1, x1, y2, x2]\n boxes: [boxes_count, (y1, x1, y2, x2)]\n box_area: float. the area of 'box'\n boxes_area: array of length boxes_count.\n Note: the areas are passed in rather than calculated here for\n efficiency. Calculate once in the caller to avoid duplicate work.\n \"\"\"\n # Calculate intersection areas\n y1 = torch.maximum(box[0], boxes[:, 0])\n y2 = torch.minimum(box[2], boxes[:, 2])\n x1 = torch.maximum(box[1], boxes[:, 1])\n x2 = torch.minimum(box[3], boxes[:, 3])\n intersection = torch.maximum(x2 - x1, torch.tensor(0)) * torch.maximum(y2 - y1, torch.tensor(0))\n union = box_area + boxes_area[:] - intersection[:]\n iou = intersection / union\n return iou\n\n\ndef compute_overlaps_tesnor(boxes1, boxes2,device=\"cpu\"):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n For better performance, pass the largest set first and the smaller second.\n \"\"\"\n # Areas of anchors and GT boxes\n area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])\n area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])\n\n # Compute overlaps to generate matrix [boxes1 count, boxes2 count]\n # Each cell contains the IoU value.\n overlaps = torch.zeros((boxes1.shape[0], boxes2.shape[0])).to(device=device)\n for i in range(overlaps.shape[1]):\n box2 = boxes2[i]\n overlaps[:, i] = compute_iou_tensor(box2, boxes1, area2[i], area1)\n return overlaps\n\n\ndef apply_box_deltas_tesnor(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.\n deltas: [N, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= torch.exp(deltas[:, 2])\n width *= torch.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n return torch.stack([y1, x1, y2, x2], axis=1)\n\n\ndef vis_anchors_refined_anchors_(img, anchors, refined_anchors):\n vis_img = img.copy()\n for i, rect in enumerate(anchors):\n y1, x1, y2, x2 = rect\n cv2.rectangle(vis_img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 2)\n y1, x1, y2, x2 = refined_anchors[i]\n cv2.rectangle(vis_img, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 2)\n break\n cv2.imshow(\"Matched Anchor Boxes\", vis_img)\n cv2.waitKey(0)\n\n\ndef build_rpn_targets_tensor(anchors, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = torch.zeros([anchors.shape[0]], dtype=torch.int32).to(device=config.DEVICE)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = torch.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4)).to(device=config.DEVICE)\n\n no_crowd_bool = torch.ones([anchors.shape[0]], dtype=torch.bool).to(device=config.DEVICE)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = compute_overlaps_tesnor(anchors, gt_boxes,config.DEVICE)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = torch.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[torch.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n # original was argwhere\n # gt_iou_argmax = torch.where(torch.tensor(overlaps == torch.max(overlaps, axis=0)))[:, 0]\n a = torch.max(overlaps, axis=0)[0]\n gt_iou_argmax = torch.where(overlaps == a)[0]\n\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = torch.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n unif = torch.ones(ids.shape[0]).to(device=config.DEVICE)\n idx = unif.multinomial(extra, replacement=False)\n ids = ids[idx]\n # ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = torch.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - torch.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n # ids = np.random.choice(ids, extra, replace=False)\n unif = torch.ones(ids.shape[0]).to(device=config.DEVICE)\n idx = unif.multinomial(extra, replacement=False)\n ids = ids[idx]\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = torch.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = torch.tensor(\n [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n torch.log(gt_h / a_h),\n torch.log(gt_w / a_w),\n ]\n )\n # Normalize\n #rpn_bbox[ix] /= torch.tensor(config.RPN_BBOX_STD_DEV, dtype=torch.float32).to(device=config.DEVICE)\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef box_refinement(box, gt_box):\n \"\"\"Compute refinement needed to transform box to gt_box.\n box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is\n assumed to be outside the box.\n \"\"\"\n box = box.type(torch.float32)\n gt_box = gt_box.type(torch.float32)\n\n height = box[:, 2] - box[:, 0]\n width = box[:, 3] - box[:, 1]\n center_y = box[:, 0] + 0.5 * height\n center_x = box[:, 1] + 0.5 * width\n\n gt_height = gt_box[:, 2] - gt_box[:, 0]\n gt_width = gt_box[:, 3] - gt_box[:, 1]\n gt_center_y = gt_box[:, 0] + 0.5 * gt_height\n gt_center_x = gt_box[:, 1] + 0.5 * gt_width\n\n dy = (gt_center_y - center_y) / height\n dx = (gt_center_x - center_x) / width\n dh = torch.log(gt_height / height)\n dw = torch.log(gt_width / width)\n\n return torch.stack([dy, dx, dh, dw], axis=1)\n\n\n\ndef process_box(box, score, image_shape, min_size):\n \"\"\"\n Clip boxes in the image size and remove boxes which are too small.\n \"\"\"\n\n box[:, [0, 2]] = box[:, [0, 2]].clamp(0, image_shape[0])\n box[:, [1, 3]] = box[:, [1, 3]].clamp(0, image_shape[1])\n\n w, h = box[:, 2] - box[:, 0], box[:, 3] - box[:, 1]\n keep = torch.where((w >= min_size) & (h >= min_size))[0]\n box, score = box[keep], score[keep]\n return box, score\n\n\ndef roi_align(\n features, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio\n):\n if torch.__version__ >= \"1.5.0\":\n return torch.ops.torchvision.roi_align(\n features,\n rois,\n spatial_scale,\n pooled_height,\n pooled_width,\n sampling_ratio,\n False,\n )\n else:\n return torch.ops.torchvision.roi_align(\n features, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio\n )\n\n\nclass RoIAlign:\n \"\"\"\n Performs Region of Interest (RoI) Align operator described in Mask R-CNN\n\n \"\"\"\n\n def __init__(self, output_size, sampling_ratio):\n \"\"\"\n Arguments:\n output_size (Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n sampling_ratio (int): number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly sampling_ratio x sampling_ratio grid points are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ceil(roi_width / pooled_w), and likewise for height). Default: -1\n \"\"\"\n\n self.output_size = output_size\n self.sampling_ratio = sampling_ratio\n self.spatial_scale = None\n\n def setup_scale(self, feature_shape, image_shape):\n if self.spatial_scale is not None:\n return\n\n possible_scales = []\n for s1, s2 in zip(feature_shape, image_shape):\n scale = 2 ** int(math.log2(s1 / s2))\n possible_scales.append(scale)\n assert possible_scales[0] == possible_scales[1]\n self.spatial_scale = possible_scales[0]\n\n def __call__(self, feature, proposal, image_shape):\n \"\"\"\n Arguments:\n feature (Tensor[N, C, H, W])\n proposal (Tensor[K, 4])\n image_shape (Torch.Size([H, W]))\n\n Returns:\n output (Tensor[K, C, self.output_size[0], self.output_size[1]])\n\n \"\"\"\n idx = proposal.new_full((proposal.shape[0], 1), 0)\n roi = torch.cat((idx, proposal), dim=1)\n\n self.setup_scale(feature.shape[-2:], image_shape)\n return roi_align(\n feature.to(roi),\n roi,\n self.spatial_scale,\n self.output_size[0],\n self.output_size[1],\n self.sampling_ratio,\n )\n\n\n\n\nclass Matcher:\n def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):\n self.high_threshold = high_threshold\n self.low_threshold = low_threshold\n self.allow_low_quality_matches = allow_low_quality_matches\n\n def __call__(self, iou):\n \"\"\"\n Arguments:\n iou (Tensor[M, N]): containing the pairwise quality between \n M ground-truth boxes and N predicted boxes.\n\n Returns:\n label (Tensor[N]): positive (1) or negative (0) label for each predicted box,\n -1 means ignoring this box.\n matched_idx (Tensor[N]): indices of gt box matched by each predicted box.\n \"\"\"\n \n value, matched_idx = iou.max(dim=0)\n label = torch.full((iou.shape[1],), -1, dtype=torch.float, device=iou.device) \n \n label[value >= self.high_threshold] = 1\n label[value < self.low_threshold] = 0\n \n if self.allow_low_quality_matches:\n highest_quality = iou.max(dim=1)[0]\n gt_pred_pairs = torch.where(iou == highest_quality[:, None])[1]\n label[gt_pred_pairs] = 1\n\n return label, matched_idx\n\n\n\nclass BalancedPositiveNegativeSampler:\n def __init__(self, num_samples, positive_fraction):\n self.num_samples = num_samples\n self.positive_fraction = positive_fraction\n\n def __call__(self, label):\n positive = torch.where(label == 1)[0]\n negative = torch.where(label == 0)[0]\n\n num_pos = int(self.num_samples * self.positive_fraction)\n num_pos = min(positive.numel(), num_pos)\n num_neg = self.num_samples - num_pos\n num_neg = min(negative.numel(), num_neg)\n\n pos_perm = torch.randperm(positive.numel(), device=positive.device)[:num_pos]\n neg_perm = torch.randperm(negative.numel(), device=negative.device)[:num_neg]\n\n pos_idx = positive[pos_perm]\n neg_idx = negative[neg_perm]\n\n return pos_idx, neg_idx\n\n\ndef visualize_inference(in_img, inv_normalize,results):\n \n vis_img = in_img.clone()\n vis_img = (inv_normalize(vis_img).data.numpy() * 255).astype(np.uint8)\n vis_img = np.ascontiguousarray(np.moveaxis(vis_img, 0, -1))\n boxes=np.int0(results[\"boxes\"])\n labels=results[\"labels\"]\n scores=results[\"scores\"]\n print(f\"Labels max {labels.max()}\")\n print(f\"label min {labels.min()}\")\n print(f\"scores max {scores.max()}\")\n print(f\"scores min {scores.min()}\")\n for i in range(boxes.shape[0]):\n bbox=boxes[i]\n cv2.rectangle(vis_img, (bbox[1], bbox[0]), (bbox[3], bbox[2]), (255, 255, 0), 2)\n cv2.imshow(\"Results\",vis_img)\n cv2.waitKey(0)\n \n " ]
[ [ "torch.stack", "scipy.ndimage.zoom", "numpy.any", "numpy.random.seed", "numpy.moveaxis", "torch.log", "torch.maximum", "torch.max", "numpy.stack", "numpy.meshgrid", "torch.cat", "torch.arange", "numpy.where", "numpy.unique", "numpy.int0", "torch.ones", "numpy.zeros", "torch.argmax", "torch.tensor", "torch.zeros", "numpy.arange", "numpy.pad", "numpy.array", "torch.sum", "torch.ops.torchvision.roi_align", "torch.full", "torch.exp", "torch.where", "torch.minimum", "numpy.sqrt", "numpy.concatenate" ] ]
zhouwubai/kaggle
[ "45fbce8834a5c7ce9c925af691f5761d9d88c8d3" ]
[ "src/DoorDash/src/process.py" ]
[ "import numpy as np\nimport pandas as pd\nimport copy\nimport re\n\n\nclass PreProcess(object):\n\n def __init__(self):\n self.df = None\n\n def _standardize_string(self, a_str):\n \"\"\"Replace whitespace with underscore\n remove non-alphanumeric characters\n \"\"\"\n if isinstance(a_str, str) or isinstance(a_str, unicode):\n a_str = re.sub(r'\\s+', '_', a_str)\n a_str = re.sub(r'\\W+', '_', a_str)\n return a_str.lower()\n else:\n return ''\n\n feature2categorizer = {\n \"market_id\": _standardize_string,\n # \"store_id\",\n 'store_primary_category': _standardize_string,\n 'order_protocol': _standardize_string\n }\n\n def _categorize_features(self):\n if type(self.df) is dict:\n pass\n else:\n columns_to_dummify = []\n for feature in self.feature2categorizer.keys():\n categorizer = self.feature2categorizer[feature]\n if feature in self.df:\n # first apply categorizer/replace\n self.df.loc[:, feature] = self.df[feature].apply(lambda x: categorizer(self, x))\n # add the column to be dummified\n columns_to_dummify.append(feature)\n self.df = pd.get_dummies(\n self.df,\n columns=columns_to_dummify).copy(deep=True)\n\n def preprocess(self, df):\n \"\"\"\n Returns:\n preprocess dataframe of features, model ready\n \"\"\"\n\n if df is None or len(df) == 0:\n raise Exception(\"Dataframe in Preprocessing is not initilized\")\n else:\n if type(df) is dict:\n self.df = copy.deepcopy(df)\n else:\n self.df = df # this is for offline training, reference is OK\n\n self._categorize_features()\n\n return self.df\n" ]
[ [ "pandas.get_dummies" ] ]
LucasFranciscoCorreia/ontask_b
[ "5473e9faa24c71a2a1102d47ebc2cbf27608e42a" ]
[ "ontask/dataops/pandas/merge.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"Functions to do data frame merging.\"\"\"\n\nfrom typing import Dict\n\nimport pandas as pd\nfrom django.utils.translation import gettext\n\nfrom ontask.dataops.pandas.columns import has_unique_column, is_unique_column\nfrom ontask.dataops.pandas.dataframe import store_dataframe\n\n\ndef _perform_non_overlapping_column_merge(\n dst_df: pd.DataFrame,\n src_df_no_overlap: pd.DataFrame,\n merge_info: Dict,\n dst_key: str,\n src_key: str,\n) -> pd.DataFrame:\n \"\"\"Merge the non overlapping columns of the new data frame.\n\n :param dst_df: Existing data frame\n\n :param src_df_no_overlap: portion of the src_df with no overlap\n\n :param merge_info: Information about how to merge\n\n :param dst_key: key column in dst_frame\n\n :param src_key: key column in src_frame\n\n :return: Modified data frame\n \"\"\"\n # Step A. Perform the merge of non-overlapping columns\n new_df = dst_df\n if len(src_df_no_overlap.columns) > 1:\n new_df = pd.merge(\n new_df,\n src_df_no_overlap,\n how=merge_info['how_merge'],\n left_on=dst_key,\n right_on=src_key)\n\n # VERY special case: The key used for the merge in src_df can have an\n # identical column in dst_df, but it is not the one used for the\n # merge. For example: DST has columns C1(key), C2, C3, SRC has\n # columns C2(key) and C4. The merge is done matching C1 in DST with\n # C2 in SRC, but this will produce two columns C2_x and C2_y. In this\n # case we drop C2_y because C2_x has been properly updated with the\n # values from C2_y in the previous step (Step A).\n if src_key != dst_key and src_key in dst_df.columns:\n # Drop column_y\n new_df.drop([src_key + '_y'], axis=1, inplace=True)\n # Rename column_x\n new_df = new_df.rename(columns={src_key + '_x': src_key})\n\n return new_df\n\n\ndef _perform_overlap_update(\n dst_df: pd.DataFrame,\n src_df: pd.DataFrame,\n dst_key: str,\n src_key: str,\n how_merge: str,\n) -> pd.DataFrame:\n \"\"\"Perform the updat of the columns that overlap with the data_frame.\n\n :param dst_df: Left data frame with all the columns\n\n :param src_df: Right data frame with the overlapping columns\n\n :param dst_key: Left key column\n\n :param src_key: Right key column\n\n :param how_merge: Merge version: inner, outer, left or right\n\n :return: Returns the updated data frame depending on the type of merge\n variant requested.\n\n For this function the 'update' and 'append' functions in Pandas will be\n used.\n\n The 'update' function will be used for those rows for\n which there is a corresponding key in src_df. This means that the data in\n dst_df_tmp1 will only be updated if the value is not NaN.\n\n The 'append' function will be used for those rows in src_df that are not\n present in dst_df.\n\n There are four possible cases for this STEP depending on the type of\n merge (inner, outer, left, right). Here is the pseudocode used for each\n of these cases:\n\n - left: Simplest case because this is exactly how the function 'update'\n behaves. So, in this case dst_df_tmp1.update(src_df[OVERLAP]) is the\n result.\n\n - inner: First obtain the subset dst_df_tmp1 with intersection of\n dst_df_tmp1 and src_df keys (result in dst_df_tmp2) and then update\n dst_df_tmp2 with src_df[OVERLAP]\n\n - outer: First apply the update operation in the left case, that is\n dst_df_tmp1.update(src_df[OVERLAP], select from src_df[OVERLAP] the rows\n that are not part of dst_df_tmp1, and then append these to dst_df_tmp1 to\n create the result dst_df_tmp2\n\n - right: This is the most complex. It requires first to subset\n dst_df_tmp1 with the intersection of the two keys (src and dst). Then,\n dst_df_tmp1 is updated with the content of src_df[OVERLAP]. Finally,\n the rows only in the src_df need to be appended to the dataframe.\n\n \"\"\"\n # If the src data frame has a single column (they key), there is no need\n # to do any operation\n if len(src_df.columns) <= 1:\n return dst_df\n\n dst_df_tmp1 = dst_df.set_index(dst_key, drop=False)\n src_df_tmp1 = src_df.set_index(src_key, drop=False)\n if how_merge == 'inner':\n # Subset of dst_df_tmp1 with the keys in both DFs\n overlap_df = dst_df_tmp1.loc[\n dst_df_tmp1.index.intersection(src_df_tmp1.index)\n ].copy()\n # Update the subset with the values in the right\n overlap_df.update(src_df_tmp1)\n elif how_merge == 'outer':\n # Update\n overlap_df = dst_df_tmp1\n overlap_df.update(src_df_tmp1)\n # Append the missing rows\n tmp1 = src_df_tmp1.loc[\n src_df_tmp1.index.difference(dst_df_tmp1.index)\n ].copy()\n if not tmp1.empty:\n # Append only if the tmp1 data frame is not empty (otherwise it\n # looses the name of the index column\n overlap_df = overlap_df.append(tmp1, sort=True)\n elif how_merge == 'left':\n overlap_df = dst_df_tmp1\n overlap_df.update(src_df_tmp1)\n else:\n # Right merge\n # Subset of dst_df_tmp1 with the keys in both DFs\n overlap_df = dst_df_tmp1.loc[\n dst_df_tmp1.index.intersection(src_df_tmp1.index)\n ].copy()\n # Update with the right DF\n overlap_df.update(src_df_tmp1)\n # Append the rows that are in right and not in left\n tmp2 = src_df_tmp1.loc[\n src_df_tmp1.index.difference(dst_df_tmp1.index)\n ].copy()\n if not tmp2.empty:\n # Append only if it is not empty\n overlap_df = overlap_df.append(tmp2, sort=True)\n\n # Return result\n return overlap_df.reset_index(drop=True)\n\n\ndef _update_is_key_field(merge_info: Dict, workflow):\n \"\"\"Traverse the list of columns and reset the key property.\n\n :param merge_info: dictionary with the lists of columns to upload, rename\n and keep as key\n\n :param workflow: current workflow (to access columns)\n\n :result: None\n \"\"\"\n # Update the value of is_key based on \"keep_key_column\"\n for to_upload, cname, keep_key in zip(\n merge_info['columns_to_upload'],\n merge_info['rename_column_names'],\n merge_info['keep_key_column'],\n ):\n if not to_upload:\n # Column is not uploaded, nothing to process\n continue\n\n col = workflow.columns.get(name=cname)\n\n # Process the is_key property. The is_key property has been\n # recalculated during the store, now it needs to be updated looking at\n # the keep_key value.\n col.is_key = col.is_key and keep_key\n col.save()\n\n\ndef perform_dataframe_upload_merge(\n workflow,\n dst_df: pd.DataFrame,\n src_df: pd.DataFrame,\n merge_info: Dict,\n):\n \"\"\"Merge the existing data frame (dst) with a new one (src).\n\n It combines the two data frames dst_df and src_df and stores its content.\n\n The combination of dst_df and src_df assumes:\n\n - dst_df has a set of columns (potentially empty) that do not overlap in\n name with the ones in src_df (dst_df[NO_OVERLAP_DST])\n\n - dst_df and src_df have a set of columns (potentially empty) that overlap\n in name (dst_df[OVERLAP] and src_df[OVERLAP] respectively)\n\n - src_df has a set of columns (potentially empty) that do not overlap in\n name with the ones in dst_df (src_df[NO_OVERLAP_SRC])\n\n The function combines dst_df and src_df following two main steps (in both\n steps, the number of rows processed are derived from the parameter\n merge_info['how_merge']).\n\n STEP A: A new data frame dst_df_tmp1 is created using the pandas \"merge\"\n operation between dst_df and src_df[NO_OVERLAP_SRC]. This increases the\n number of columns in dst_df_tmp1 with respect to dst_df by adding the new\n columns from src_df.\n\n The pseudocode for this step is:\n\n dst_df_tmp1 = pd.merge(dst_df,\n src_df[NO_OVERLAP_SRC],\n how=merge['how_merge'],\n left_on=merge_info['dst_selected_key'],\n right_on=merge_info['src_selected_key'])\n\n STEP B: The data frame dst_df_tmp1 is then updated with the values in\n src_df[OVERLAP].\n\n :param workflow: Worlflow with the data frame\n\n :param dst_df: Destination dataframe (already stored in DB)\n\n :param src_df: Source dataframe, stored in temporary table\n\n :param merge_info: Dictionary with merge options\n - initial_column_names: List of initial column names in src data\n frame.\n - rename_column_names: Columns that need to be renamed in src data\n frame.\n - columns_to_uplooad: Columns to be considered for the update\n - src_selected_key: Key in the source data frame\n - dst_selected_key: key in the destination (existing) data frame\n - how_merge: How to merge: inner, outer, left or right\n\n :return: None or Exception with anomaly in the message\n \"\"\"\n # STEP 1 Rename the column names.\n src_df = src_df.rename(\n columns=dict(list(zip(\n merge_info['initial_column_names'],\n merge_info['rename_column_names']))))\n\n # STEP 2 Drop the columns not selected\n columns_to_upload = merge_info['columns_to_upload']\n src_df.drop(\n [\n col for idx, col in enumerate(list(src_df.columns))\n if not columns_to_upload[idx]\n ],\n axis=1,\n inplace=True)\n\n # If no keep_key_column value is given, initialize to True\n if 'keep_key_column' not in merge_info:\n kk_column = []\n for cname in merge_info['rename_column_names']:\n kk_column.append(is_unique_column(src_df[cname]))\n merge_info['keep_key_column'] = kk_column\n\n # Get the keys\n src_key = merge_info['src_selected_key']\n dst_key = merge_info['dst_selected_key']\n\n # STEP 3 Perform the combination\n # Separate the columns in src that overlap from those that do not\n # overlap, but include the key column in both data frames.\n overlap_names = set(dst_df.columns).intersection(src_df.columns)\n src_no_overlap_names = set(src_df.columns).difference(overlap_names)\n src_df_overlap = src_df[list(overlap_names.union({src_key}))]\n src_df_no_overlap = src_df[list(src_no_overlap_names.union({src_key}))]\n\n # Step A. Perform the merge of non-overlapping columns\n new_df = _perform_non_overlapping_column_merge(\n dst_df,\n src_df_no_overlap,\n merge_info,\n dst_key,\n src_key)\n\n # Step B. Perform the update with the overlapping columns\n new_df = _perform_overlap_update(\n new_df,\n src_df_overlap,\n dst_key,\n src_key,\n merge_info['how_merge'])\n\n # If the merge produced a data frame with no rows, flag it as an error to\n # prevent loosing data when there is a mistake in the key column\n if new_df.shape[0] == 0:\n raise Exception(gettext(\n 'Merge operation produced a result with no rows'))\n\n # If the merge produced a data frame with no unique columns, flag it as an\n # error to prevent the data frame from propagating without a key column\n if not has_unique_column(new_df):\n raise Exception(gettext(\n 'Merge operation produced a result without any key columns. '\n + 'Review the key columns in the data to upload.',\n ))\n\n # Store the result back in the DB\n store_dataframe(new_df, workflow)\n\n _update_is_key_field(merge_info, workflow)\n\n # Recompute all the values of the conditions in each of the actions\n for action in workflow.actions.all():\n action.update_n_rows_selected()\n" ]
[ [ "pandas.merge" ] ]
Justice-Eternal/mmrotate
[ "6519a3654e17b707c15d4aa2c5db1257587ea4c0" ]
[ "mmrotate/core/bbox/assigners/atss_kld_assigner.py" ]
[ "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import points_in_polygons\nfrom mmdet.core.bbox.assigners.assign_result import AssignResult\nfrom mmdet.core.bbox.assigners.base_assigner import BaseAssigner\n\nfrom mmrotate.core.bbox.utils import GaussianMixture\nfrom ..builder import ROTATED_BBOX_ASSIGNERS\nfrom ..transforms import gt2gaussian\n\n\n@ROTATED_BBOX_ASSIGNERS.register_module()\nclass ATSSKldAssigner(BaseAssigner):\n \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n Each proposals will be assigned with `0` or a positive integer\n indicating the ground truth index.\n\n - 0: negative sample, no assigned gt\n - positive integer: positive sample, index (1-based) of assigned gt\n\n Args:\n topk (float): Number of bbox selected in each level.\n use_reassign (bool, optional): If true, it is used to reassign samples.\n \"\"\"\n\n def __init__(self, topk, use_reassign=False):\n self.topk = topk\n self.use_reassign = use_reassign\n\n def assign(self,\n bboxes,\n num_level_bboxes,\n gt_bboxes,\n gt_bboxes_ignore=None,\n gt_labels=None):\n \"\"\"Assign gt to bboxes.\n\n The assignment is done in following steps\n\n 1. compute iou between all bbox (bbox of all pyramid levels) and gt\n 2. compute center distance between all bbox and gt\n 3. on each pyramid level, for each gt, select k bbox whose center\n are closest to the gt center, so we total select k*l bbox as\n candidates for each gt\n 4. get corresponding iou for the these candidates, and compute the\n mean and std, set mean + std as the iou threshold\n 5. compute the mean aspect ratio of all gts, and set\n exp((-mean aspect ratio / 4) * (mean + std) as the iou threshold\n 6. select these candidates whose iou are greater than or equal to\n the threshold as positive\n 7. limit the positive sample's center in gt\n\n Args:\n bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).\n num_level_bboxes (List): num of bboxes in each level\n gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n labelled as `ignored`, e.g., crowd boxes in COCO.\n gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n Returns:\n :obj:`AssignResult`: The assign result.\n \"\"\"\n INF = 100000000\n num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n\n overlaps = self.kld_overlaps(gt_bboxes, bboxes)\n overlaps = overlaps.transpose(1, 0)\n # assign 0 by default\n assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n 0,\n dtype=torch.long)\n\n if num_gt == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n max_overlaps = overlaps.new_zeros((num_bboxes, ))\n if num_gt == 0:\n # No truth, assign everything to background\n assigned_gt_inds[:] = 0\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = overlaps.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n # compute center distance between all bbox and gt\n # the center of poly\n gt_bboxes_hbb = self.get_horizontal_bboxes(gt_bboxes)\n\n gt_cx = (gt_bboxes_hbb[:, 0] + gt_bboxes_hbb[:, 2]) / 2.0\n gt_cy = (gt_bboxes_hbb[:, 1] + gt_bboxes_hbb[:, 3]) / 2.0\n gt_points = torch.stack((gt_cx, gt_cy), dim=1)\n\n bboxes = bboxes.reshape(-1, 9, 2)\n # y_first False\n pts_x = bboxes[:, :, 0::2]\n pts_y = bboxes[:, :, 1::2]\n\n pts_x_mean = pts_x.mean(dim=1).squeeze()\n pts_y_mean = pts_y.mean(dim=1).squeeze()\n bboxes_points = torch.stack((pts_x_mean, pts_y_mean), dim=1)\n\n distances = (bboxes_points[:, None, :] -\n gt_points[None, :, :]).pow(2).sum(-1).sqrt()\n\n # Selecting candidates based on the center distance\n candidate_idxs = []\n start_idx = 0\n for level, bboxes_per_level in enumerate(num_level_bboxes):\n # on each pyramid level, for each gt,\n # select k bbox whose center are closest to the gt center\n end_idx = start_idx + bboxes_per_level\n distances_per_level = distances[start_idx:end_idx, :]\n _, topk_idxs_per_level = distances_per_level.topk(\n self.topk, dim=0, largest=False)\n candidate_idxs.append(topk_idxs_per_level + start_idx)\n start_idx = end_idx\n candidate_idxs = torch.cat(candidate_idxs, dim=0)\n\n # max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n # gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n # get corresponding iou for the these candidates, and compute the\n # mean and std, set mean + std as the iou threshold\n # add\n\n gt_bboxes_ratios = self.AspectRatio(gt_bboxes)\n gt_bboxes_ratios_per_gt = gt_bboxes_ratios.mean(0)\n candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]\n overlaps_mean_per_gt = candidate_overlaps.mean(0)\n overlaps_std_per_gt = candidate_overlaps.std(0)\n overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt\n\n if self.use_reassign:\n iou_thr_weight = torch.exp((-1 / 4) * gt_bboxes_ratios_per_gt)\n overlaps_thr_per_gt = overlaps_thr_per_gt * iou_thr_weight\n\n is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]\n\n # inside_flag = torch.full([num_bboxes, num_gt],\n # 0.).to(gt_bboxes.device).float()\n # pointsJf(bboxes_points, gt_bboxes, inside_flag)\n inside_flag = points_in_polygons(bboxes_points, gt_bboxes)\n is_in_gts = inside_flag[candidate_idxs,\n torch.arange(num_gt)].to(is_pos.dtype)\n\n is_pos = is_pos & is_in_gts\n for gt_idx in range(num_gt):\n candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n candidate_idxs = candidate_idxs.view(-1)\n\n # if an anchor box is assigned to multiple gts,\n # the one with the highest IoU will be selected.\n overlaps_inf = torch.full_like(overlaps,\n -INF).t().contiguous().view(-1)\n index = candidate_idxs.view(-1)[is_pos.view(-1)]\n overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n assigned_gt_inds[\n max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n\n if gt_labels is not None:\n assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n pos_inds = torch.nonzero(\n assigned_gt_inds > 0, as_tuple=False).squeeze()\n if pos_inds.numel() > 0:\n assigned_labels[pos_inds] = gt_labels[\n assigned_gt_inds[pos_inds] - 1]\n else:\n assigned_labels = None\n return AssignResult(\n num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n def kld_overlaps(self, gt_rbboxes, points, eps=1e-6):\n \"\"\"Compute overlaps between polygons and points by Kullback-Leibler\n Divergence loss.\n\n Args:\n gt_rbboxes (torch.Tensor): Ground truth polygons, shape (k, 8).\n points (torch.Tensor): Points to be assigned, shape(n, 18).\n eps (float, optional): Defaults to 1e-6.\n\n Returns:\n Tensor: Kullback-Leibler Divergence loss.\n \"\"\"\n points = points.reshape(-1, 9, 2)\n gt_rbboxes = gt_rbboxes.reshape(-1, 4, 2)\n gmm = GaussianMixture(n_components=1)\n gmm.fit(points)\n kld = self.kld_mixture2single(gmm, gt2gaussian(gt_rbboxes))\n kl_agg = kld.clamp(min=eps)\n overlaps = 1 / (2 + kl_agg)\n\n return overlaps\n\n def kld_mixture2single(self, g1, g2):\n \"\"\"Compute Kullback-Leibler Divergence between two Gaussian\n distribution.\n\n Args:\n g1 (dict[str, torch.Tensor]): Gaussian distribution 1.\n g2 (torch.Tensor): Gaussian distribution 2.\n\n Returns:\n torch.Tensor: Kullback-Leibler Divergence.\n \"\"\"\n if g2[0].shape[0] == 0:\n return g2[0].new_zeros((0, 1))\n\n p_mu = g1.mu\n p_var = g1.var\n assert p_mu.dim() == 3 and p_mu.size()[1] == 1\n assert p_var.dim() == 4 and p_var.size()[1] == 1\n N, _, d = p_mu.shape\n p_mu = p_mu.reshape(1, N, d)\n p_var = p_var.reshape(1, N, d, d)\n\n t_mu, t_var = g2\n K = t_mu.shape[0]\n t_mu = t_mu.unsqueeze(1)\n t_var = t_var.unsqueeze(1)\n\n delta = (p_mu - t_mu).unsqueeze(-1)\n t_inv = torch.inverse(t_var)\n term1 = delta.transpose(-1, -2).matmul(t_inv).matmul(delta)\\\n .reshape(K, N)\n term2 = torch.diagonal(t_inv.matmul(p_var), dim1=-2, dim2=-1)\\\n .sum(dim=-1) + torch.log(torch.det(t_var) / torch.det(p_var))\n\n return 0.5 * (term1 + term2) - 1\n\n def get_horizontal_bboxes(self, gt_rbboxes):\n \"\"\"get_horizontal_bboxes from polygons.\n\n Args:\n gt_rbboxes (torch.Tensor): Groundtruth polygons, shape (k, 8).\n\n Returns:\n gt_rect_bboxes (torch.Tensor): The horizontal bboxes, shape (k, 4).\n \"\"\"\n gt_xs, gt_ys = gt_rbboxes[:, 0::2], gt_rbboxes[:, 1::2]\n gt_xmin, _ = gt_xs.min(1)\n gt_ymin, _ = gt_ys.min(1)\n gt_xmax, _ = gt_xs.max(1)\n gt_ymax, _ = gt_ys.max(1)\n gt_rect_bboxes = torch.cat([\n gt_xmin[:, None], gt_ymin[:, None], gt_xmax[:, None], gt_ymax[:,\n None]\n ],\n dim=1)\n return gt_rect_bboxes\n\n def AspectRatio(self, gt_rbboxes):\n \"\"\"compute the aspect ratio of all gts.\n\n Args:\n gt_rbboxes (torch.Tensor): Groundtruth polygons, shape (k, 8).\n\n Returns:\n ratios (torch.Tensor): The aspect ratio of gt_rbboxes,\n shape (k, 1).\n \"\"\"\n pt1, pt2, pt3, pt4 = gt_rbboxes[..., :8].chunk(4, 1)\n\n edge1 = torch.sqrt(\n torch.pow(pt1[..., 0] - pt2[..., 0], 2) +\n torch.pow(pt1[..., 1] - pt2[..., 1], 2))\n edge2 = torch.sqrt(\n torch.pow(pt2[..., 0] - pt3[..., 0], 2) +\n torch.pow(pt2[..., 1] - pt3[..., 1], 2))\n\n edges = torch.stack([edge1, edge2], dim=1)\n width, _ = torch.max(edges, 1)\n height, _ = torch.min(edges, 1)\n ratios = width / height\n return ratios\n" ]
[ [ "torch.min", "torch.inverse", "torch.stack", "torch.nonzero", "torch.det", "torch.exp", "torch.arange", "torch.full_like", "torch.max", "torch.cat", "torch.pow" ] ]
sephiron99/polars
[ "7617eca9a85548c2978771f3a72c5937616c8f25" ]
[ "py-polars/polars/internals/expr.py" ]
[ "import copy\nfrom datetime import date, datetime, timedelta\nfrom typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union\n\nimport numpy as np\n\nfrom polars.utils import _timedelta_to_pl_duration\n\ntry:\n from polars.polars import PyExpr\n\n _DOCUMENTING = False\nexcept ImportError: # pragma: no cover\n _DOCUMENTING = True\n\nfrom polars import internals as pli\nfrom polars.datatypes import (\n DataType,\n Date,\n Datetime,\n Float64,\n Int32,\n Object,\n UInt32,\n py_type_to_dtype,\n)\n\n\ndef selection_to_pyexpr_list(\n exprs: Union[str, \"Expr\", Sequence[Union[str, \"Expr\", \"pli.Series\"]], \"pli.Series\"]\n) -> List[\"PyExpr\"]:\n if isinstance(exprs, (str, Expr, pli.Series)):\n exprs = [exprs]\n\n return [expr_to_lit_or_expr(e, str_to_lit=False)._pyexpr for e in exprs]\n\n\ndef wrap_expr(pyexpr: \"PyExpr\") -> \"Expr\":\n return Expr._from_pyexpr(pyexpr)\n\n\nclass Expr:\n \"\"\"\n Expressions that can be used in various contexts.\n \"\"\"\n\n def __init__(self) -> None:\n self._pyexpr: PyExpr # pragma: no cover\n\n def __str__(self) -> str:\n return self._pyexpr.to_str()\n\n def _repr_html_(self) -> str:\n return self._pyexpr.to_str()\n\n @staticmethod\n def _from_pyexpr(pyexpr: \"PyExpr\") -> \"Expr\":\n self = Expr.__new__(Expr)\n self._pyexpr = pyexpr\n return self\n\n def __to_pyexpr(self, other: Any) -> \"PyExpr\":\n return self.__to_expr(other)._pyexpr\n\n def __to_expr(self, other: Any) -> \"Expr\":\n if isinstance(other, Expr):\n return other\n return pli.lit(other)\n\n def __bool__(self) -> \"Expr\":\n raise ValueError(\n \"Since Expr are lazy, the truthiness of an Expr is ambiguous. \\\n Hint: use '&' or '|' to chain Expr together, not and/or.\"\n )\n\n def __invert__(self) -> \"Expr\":\n return self.is_not()\n\n def __xor__(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr._xor(self.__to_pyexpr(other)))\n\n def __rxor__(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr._xor(self.__to_pyexpr(other)))\n\n def __and__(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr._and(self.__to_pyexpr(other)))\n\n def __rand__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr._and(self.__to_pyexpr(other)))\n\n def __or__(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr._or(self.__to_pyexpr(other)))\n\n def __ror__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other)._or(self._pyexpr))\n\n def __add__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr + self.__to_pyexpr(other))\n\n def __radd__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) + self._pyexpr)\n\n def __sub__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr - self.__to_pyexpr(other))\n\n def __rsub__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) - self._pyexpr)\n\n def __mul__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr * self.__to_pyexpr(other))\n\n def __rmul__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) * self._pyexpr)\n\n def __truediv__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr / self.__to_pyexpr(other))\n\n def __rtruediv__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) / self._pyexpr)\n\n def __floordiv__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr // self.__to_pyexpr(other))\n\n def __rfloordiv__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) // self._pyexpr)\n\n def __mod__(self, other: Any) -> \"Expr\":\n return wrap_expr(self._pyexpr % self.__to_pyexpr(other))\n\n def __rmod__(self, other: Any) -> \"Expr\":\n return wrap_expr(self.__to_pyexpr(other) % self._pyexpr)\n\n def __pow__(self, power: float, modulo: None = None) -> \"Expr\":\n return self.pow(power)\n\n def __ge__(self, other: Any) -> \"Expr\":\n return self.gt_eq(self.__to_expr(other))\n\n def __le__(self, other: Any) -> \"Expr\":\n return self.lt_eq(self.__to_expr(other))\n\n def __eq__(self, other: Any) -> \"Expr\": # type: ignore[override]\n return self.eq(self.__to_expr(other))\n\n def __ne__(self, other: Any) -> \"Expr\": # type: ignore[override]\n return self.neq(self.__to_expr(other))\n\n def __lt__(self, other: Any) -> \"Expr\":\n return self.lt(self.__to_expr(other))\n\n def __gt__(self, other: Any) -> \"Expr\":\n return self.gt(self.__to_expr(other))\n\n def eq(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.eq(other._pyexpr))\n\n def neq(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.neq(other._pyexpr))\n\n def gt(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.gt(other._pyexpr))\n\n def gt_eq(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.gt_eq(other._pyexpr))\n\n def lt_eq(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.lt_eq(other._pyexpr))\n\n def lt(self, other: \"Expr\") -> \"Expr\":\n return wrap_expr(self._pyexpr.lt(other._pyexpr))\n\n def __neg__(self) -> \"Expr\":\n return pli.lit(0) - self\n\n def __array_ufunc__(\n self, ufunc: Callable[..., Any], method: str, *inputs: Any, **kwargs: Any\n ) -> \"Expr\":\n \"\"\"\n Numpy universal functions.\n \"\"\"\n out_type = ufunc(np.array([1])).dtype\n dtype: Optional[Type[DataType]]\n if \"float\" in str(out_type):\n dtype = Float64\n else:\n dtype = None\n\n args = [inp for inp in inputs if not isinstance(inp, Expr)]\n\n def function(s: \"pli.Series\") -> \"pli.Series\":\n return ufunc(s, *args, **kwargs) # pragma: no cover\n\n if \"dtype\" in kwargs:\n dtype = kwargs[\"dtype\"]\n\n return self.map(function, return_dtype=dtype)\n\n def to_physical(self) -> \"Expr\":\n \"\"\"\n Cast to physical representation of the logical dtype.\n\n Date -> Int32\n Datetime -> Int64\n Time -> Int64\n other -> other\n \"\"\"\n return wrap_expr(self._pyexpr.to_physical())\n\n def any(self) -> \"Expr\":\n \"\"\"\n Check if any boolean value in the column is `True`\n\n Returns\n -------\n Boolean literal\n \"\"\"\n return wrap_expr(self._pyexpr.any())\n\n def all(self) -> \"Expr\":\n \"\"\"\n Check if all boolean values in the column are `True`\n\n Returns\n -------\n Boolean literal\n \"\"\"\n return wrap_expr(self._pyexpr.all())\n\n def sqrt(self) -> \"Expr\":\n \"\"\"\n Compute the square root of the elements\n \"\"\"\n return self ** 0.5\n\n def log(self) -> \"Expr\":\n \"\"\"\n Natural logarithm, element-wise.\n\n The natural logarithm log is the inverse of the exponential function, so that log(exp(x)) = x.\n The natural logarithm is logarithm in base e.\n \"\"\"\n return np.log(self) # type: ignore\n\n def log10(self) -> \"Expr\":\n \"\"\"\n Return the base 10 logarithm of the input array, element-wise.\n \"\"\"\n return np.log10(self) # type: ignore\n\n def exp(self) -> \"Expr\":\n \"\"\"\n Return the exponential element-wise\n \"\"\"\n return np.exp(self) # type: ignore\n\n def alias(self, name: str) -> \"Expr\":\n \"\"\"\n Rename the output of an expression.\n\n Parameters\n ----------\n name\n New name.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [1, 2, 3],\n ... \"b\": [\"a\", \"b\", None],\n ... }\n ... )\n >>> df\n shape: (3, 2)\n ┌─────┬──────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ i64 ┆ str │\n ╞═════╪══════╡\n │ 1 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 3 ┆ null │\n └─────┴──────┘\n >>> df.select(\n ... [\n ... pl.col(\"a\").alias(\"bar\"),\n ... pl.col(\"b\").alias(\"foo\"),\n ... ]\n ... )\n shape: (3, 2)\n ┌─────┬──────┐\n │ bar ┆ foo │\n │ --- ┆ --- │\n │ i64 ┆ str │\n ╞═════╪══════╡\n │ 1 ┆ a │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ b │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 3 ┆ null │\n └─────┴──────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.alias(name))\n\n def exclude(\n self,\n columns: Union[str, List[str], Type[DataType], Sequence[Type[DataType]]],\n ) -> \"Expr\":\n \"\"\"\n Exclude certain columns from a wildcard/regex selection.\n\n You may also use regexes in the exclude list. They must start with `^` and end with `$`.\n\n Parameters\n ----------\n columns\n Column(s) to exclude from selection.\n This can be:\n - a column name, or multiple names\n - a regular expression starting with `^` and ending with `$`\n - a dtype or multiple dtypes\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [1, 2, 3],\n ... \"b\": [\"a\", \"b\", None],\n ... \"c\": [None, 2, 1],\n ... }\n ... )\n >>> df\n shape: (3, 3)\n ┌─────┬──────┬──────┐\n │ a ┆ b ┆ c │\n │ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 │\n ╞═════╪══════╪══════╡\n │ 1 ┆ a ┆ null │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ b ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 3 ┆ null ┆ 1 │\n └─────┴──────┴──────┘\n >>> df.select(\n ... pl.col(\"*\").exclude(\"b\"),\n ... )\n shape: (3, 2)\n ┌─────┬──────┐\n │ a ┆ c │\n │ --- ┆ --- │\n │ i64 ┆ i64 │\n ╞═════╪══════╡\n │ 1 ┆ null │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 2 ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ 3 ┆ 1 │\n └─────┴──────┘\n\n \"\"\"\n if isinstance(columns, str):\n columns = [columns]\n return wrap_expr(self._pyexpr.exclude(columns))\n elif not isinstance(columns, list) and issubclass(columns, DataType): # type: ignore\n columns = [columns] # type: ignore\n return wrap_expr(self._pyexpr.exclude_dtype(columns))\n\n if not all(\n [\n isinstance(a, str) or issubclass(a, DataType)\n for a in columns # type: ignore\n ]\n ):\n raise ValueError(\"input should be all string or all DataType\")\n\n if isinstance(columns[0], str): # type: ignore\n return wrap_expr(self._pyexpr.exclude(columns))\n else:\n return wrap_expr(self._pyexpr.exclude_dtype(columns))\n\n def keep_name(self) -> \"Expr\":\n \"\"\"\n Keep the original root name of the expression.\n\n Examples\n --------\n\n A groupby aggregation often changes the name of a column.\n With `keep_name` we can keep the original name of the column\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [1, 2, 3],\n ... \"b\": [\"a\", \"b\", None],\n ... }\n ... )\n >>> df.groupby(\"a\").agg(pl.col(\"b\").list()).sort(by=\"a\")\n shape: (3, 2)\n ┌─────┬────────────┐\n │ a ┆ b_agg_list │\n │ --- ┆ --- │\n │ i64 ┆ list [str] │\n ╞═════╪════════════╡\n │ 1 ┆ [\"a\"] │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ [\"b\"] │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ [null] │\n └─────┴────────────┘\n\n Keep the original column name:\n\n >>> df.groupby(\"a\").agg(pl.col(\"b\").list().keep_name()).sort(by=\"a\")\n shape: (3, 2)\n ┌─────┬────────────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ i64 ┆ list [str] │\n ╞═════╪════════════╡\n │ 1 ┆ [\"a\"] │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ [\"b\"] │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ [null] │\n └─────┴────────────┘\n\n \"\"\"\n\n return wrap_expr(self._pyexpr.keep_name())\n\n def prefix(self, prefix: str) -> \"Expr\":\n \"\"\"\n Add a prefix the to root column name of the expression.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"A\": [1, 2, 3, 4, 5],\n ... \"fruits\": [\"banana\", \"banana\", \"apple\", \"apple\", \"banana\"],\n ... \"B\": [5, 4, 3, 2, 1],\n ... \"cars\": [\"beetle\", \"audi\", \"beetle\", \"beetle\", \"beetle\"],\n ... }\n ... )\n >>> df\n shape: (5, 4)\n ┌─────┬────────┬─────┬────────┐\n │ A ┆ fruits ┆ B ┆ cars │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 ┆ str │\n ╞═════╪════════╪═════╪════════╡\n │ 1 ┆ banana ┆ 5 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2 ┆ banana ┆ 4 ┆ audi │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 3 ┆ apple ┆ 3 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 4 ┆ apple ┆ 2 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 5 ┆ banana ┆ 1 ┆ beetle │\n └─────┴────────┴─────┴────────┘\n >>> df.select(\n ... [\n ... pl.all(),\n ... pl.all().reverse().suffix(\"_reverse\"),\n ... ]\n ... )\n shape: (5, 8)\n ┌─────┬────────┬─────┬────────┬───────────┬────────────────┬───────────┬──────────────┐\n │ A ┆ fruits ┆ B ┆ cars ┆ A_reverse ┆ fruits_reverse ┆ B_reverse ┆ cars_reverse │\n │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str │\n ╞═════╪════════╪═════╪════════╪═══════════╪════════════════╪═══════════╪══════════════╡\n │ 1 ┆ banana ┆ 5 ┆ beetle ┆ 5 ┆ banana ┆ 1 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ banana ┆ 4 ┆ audi ┆ 4 ┆ apple ┆ 2 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ apple ┆ 3 ┆ beetle ┆ 3 ┆ apple ┆ 3 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 ┆ banana ┆ 4 ┆ audi │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 5 ┆ banana ┆ 1 ┆ beetle ┆ 1 ┆ banana ┆ 5 ┆ beetle │\n └─────┴────────┴─────┴────────┴───────────┴────────────────┴───────────┴──────────────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.prefix(prefix))\n\n def suffix(self, suffix: str) -> \"Expr\":\n \"\"\"\n Add a suffix the to root column name of the expression.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"A\": [1, 2, 3, 4, 5],\n ... \"fruits\": [\"banana\", \"banana\", \"apple\", \"apple\", \"banana\"],\n ... \"B\": [5, 4, 3, 2, 1],\n ... \"cars\": [\"beetle\", \"audi\", \"beetle\", \"beetle\", \"beetle\"],\n ... }\n ... )\n >>> df\n shape: (5, 4)\n ┌─────┬────────┬─────┬────────┐\n │ A ┆ fruits ┆ B ┆ cars │\n │ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 ┆ str │\n ╞═════╪════════╪═════╪════════╡\n │ 1 ┆ banana ┆ 5 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 2 ┆ banana ┆ 4 ┆ audi │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 3 ┆ apple ┆ 3 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 4 ┆ apple ┆ 2 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┤\n │ 5 ┆ banana ┆ 1 ┆ beetle │\n └─────┴────────┴─────┴────────┘\n >>> df.select(\n ... [\n ... pl.all(),\n ... pl.all().reverse().prefix(\"reverse_\"),\n ... ]\n ... )\n shape: (5, 8)\n ┌─────┬────────┬─────┬────────┬───────────┬────────────────┬───────────┬──────────────┐\n │ A ┆ fruits ┆ B ┆ cars ┆ reverse_A ┆ reverse_fruits ┆ reverse_B ┆ reverse_cars │\n │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │\n │ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str ┆ i64 ┆ str │\n ╞═════╪════════╪═════╪════════╪═══════════╪════════════════╪═══════════╪══════════════╡\n │ 1 ┆ banana ┆ 5 ┆ beetle ┆ 5 ┆ banana ┆ 1 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 2 ┆ banana ┆ 4 ┆ audi ┆ 4 ┆ apple ┆ 2 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 3 ┆ apple ┆ 3 ┆ beetle ┆ 3 ┆ apple ┆ 3 ┆ beetle │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 4 ┆ apple ┆ 2 ┆ beetle ┆ 2 ┆ banana ┆ 4 ┆ audi │\n ├╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌┼╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌┼╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 5 ┆ banana ┆ 1 ┆ beetle ┆ 1 ┆ banana ┆ 5 ┆ beetle │\n └─────┴────────┴─────┴────────┴───────────┴────────────────┴───────────┴──────────────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.suffix(suffix))\n\n def is_not(self) -> \"Expr\":\n \"\"\"\n Negate a boolean expression.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [True, False, False],\n ... \"b\": [\"a\", \"b\", None],\n ... }\n ... )\n >>> df\n shape: (3, 2)\n ┌───────┬──────┐\n │ a ┆ b │\n │ --- ┆ --- │\n │ bool ┆ str │\n ╞═══════╪══════╡\n │ true ┆ a │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ false ┆ b │\n ├╌╌╌╌╌╌╌┼╌╌╌╌╌╌┤\n │ false ┆ null │\n └───────┴──────┘\n >>> df.select(pl.col(\"a\").is_not())\n shape: (3, 1)\n ┌───────┐\n │ a │\n │ --- │\n │ bool │\n ╞═══════╡\n │ false │\n ├╌╌╌╌╌╌╌┤\n │ true │\n ├╌╌╌╌╌╌╌┤\n │ true │\n └───────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.is_not())\n\n def is_null(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression contains null values.\n \"\"\"\n return wrap_expr(self._pyexpr.is_null())\n\n def is_not_null(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression does not contain null values.\n \"\"\"\n return wrap_expr(self._pyexpr.is_not_null())\n\n def is_finite(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression values are finite.\n \"\"\"\n return wrap_expr(self._pyexpr.is_finite())\n\n def is_infinite(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression values are infinite.\n \"\"\"\n return wrap_expr(self._pyexpr.is_infinite())\n\n def is_nan(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression values are NaN (Not A Number).\n \"\"\"\n return wrap_expr(self._pyexpr.is_nan())\n\n def is_not_nan(self) -> \"Expr\":\n \"\"\"\n Create a boolean expression returning `True` where the expression values are not NaN (Not A Number).\n \"\"\"\n return wrap_expr(self._pyexpr.is_not_nan())\n\n def agg_groups(self) -> \"Expr\":\n \"\"\"\n Get the group indexes of the group by operation.\n Should be used in aggregation context only.\n \"\"\"\n return wrap_expr(self._pyexpr.agg_groups())\n\n def count(self) -> \"Expr\":\n \"\"\"Count the number of values in this expression\"\"\"\n return wrap_expr(self._pyexpr.count())\n\n def len(self) -> \"Expr\":\n \"\"\"\n Alias for count\n Count the number of values in this expression\n \"\"\"\n return self.count()\n\n def slice(self, offset: int, length: int) -> \"Expr\":\n \"\"\"\n Slice the Series.\n\n Parameters\n ----------\n offset\n Start index.\n length\n Length of the slice.\n \"\"\"\n return wrap_expr(self._pyexpr.slice(offset, length))\n\n def drop_nulls(self) -> \"Expr\":\n \"\"\"\n Syntactic sugar for:\n\n >>> pl.col(\"foo\").filter(pl.col(\"foo\").is_not_null()) # doctest: +IGNORE_RESULT\n\n \"\"\"\n return self.filter(self.is_not_null())\n\n def cumsum(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get an array with the cumulative sum computed at every element.\n\n Parameters\n ----------\n reverse\n Reverse the operation.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n \"\"\"\n return wrap_expr(self._pyexpr.cumsum(reverse))\n\n def cumprod(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get an array with the cumulative product computed at every element.\n\n Parameters\n ----------\n reverse\n Reverse the operation.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n \"\"\"\n return wrap_expr(self._pyexpr.cumprod(reverse))\n\n def cummin(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get an array with the cumulative min computed at every element.\n\n Parameters\n ----------\n reverse\n Reverse the operation.\n \"\"\"\n return wrap_expr(self._pyexpr.cummin(reverse))\n\n def cummax(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get an array with the cumulative max computed at every element.\n\n Parameters\n ----------\n reverse\n Reverse the operation.\n \"\"\"\n return wrap_expr(self._pyexpr.cummax(reverse))\n\n def cumcount(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get an array with the cumulative count computed at every element.\n Counting from 0 to len\n\n Parameters\n ----------\n reverse\n Reverse the operation.\n \"\"\"\n return wrap_expr(self._pyexpr.cumcount(reverse))\n\n def floor(self) -> \"Expr\":\n \"\"\"\n Floor underlying floating point array to the lowest integers smaller or equal to the float value.\n\n Only works on floating point Series\n \"\"\"\n return wrap_expr(self._pyexpr.floor())\n\n def ceil(self) -> \"Expr\":\n \"\"\"\n Ceil underlying floating point array to the heighest integers smaller or equal to the float value.\n\n Only works on floating point Series\n \"\"\"\n return wrap_expr(self._pyexpr.ceil())\n\n def round(self, decimals: int) -> \"Expr\":\n \"\"\"\n Round underlying floating point data by `decimals` digits.\n\n Parameters\n ----------\n decimals\n Number of decimals to round by.\n \"\"\"\n return wrap_expr(self._pyexpr.round(decimals))\n\n def dot(self, other: Union[\"Expr\", str]) -> \"Expr\":\n \"\"\"\n Compute the dot/inner product between two Expressions\n\n Parameters\n ----------\n other\n Expression to compute dot product with\n \"\"\"\n other = expr_to_lit_or_expr(other, str_to_lit=False)\n return wrap_expr(self._pyexpr.dot(other._pyexpr))\n\n def mode(self) -> \"Expr\":\n \"\"\"\n Compute the most occurring value(s). Can return multiple Values\n \"\"\"\n return wrap_expr(self._pyexpr.mode())\n\n def cast(self, dtype: Type[Any], strict: bool = True) -> \"Expr\":\n \"\"\"\n Cast between data types.\n\n Parameters\n ----------\n dtype\n DataType to cast to\n strict\n Throw an error if a cast could not be done for instance due to an overflow\n \"\"\"\n dtype = py_type_to_dtype(dtype)\n return wrap_expr(self._pyexpr.cast(dtype, strict))\n\n def sort(self, reverse: bool = False, nulls_last: bool = False) -> \"Expr\":\n \"\"\"\n Sort this column. In projection/ selection context the whole column is sorted.\n If used in a groupby context, the groups are sorted.\n\n Parameters\n ----------\n reverse\n False -> order from small to large.\n True -> order from large to small.\n nulls_last\n If True nulls are considered to be larger than any valid value\n \"\"\"\n return wrap_expr(self._pyexpr.sort_with(reverse, nulls_last))\n\n def arg_sort(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Get the index values that would sort this column.\n\n Parameters\n ----------\n reverse\n False -> order from small to large.\n True -> order from large to small.\n\n Returns\n -------\n out\n Series of type UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.arg_sort(reverse))\n\n def arg_max(self) -> \"Expr\":\n \"\"\"\n Get the index of the maximal value.\n \"\"\"\n return wrap_expr(self._pyexpr.arg_max())\n\n def arg_min(self) -> \"Expr\":\n \"\"\"\n Get the index of the minimal value.\n \"\"\"\n return wrap_expr(self._pyexpr.arg_min())\n\n def sort_by(\n self,\n by: Union[\"Expr\", str, List[Union[\"Expr\", str]]],\n reverse: Union[bool, List[bool]] = False,\n ) -> \"Expr\":\n \"\"\"\n Sort this column by the ordering of another column, or multiple other columns.\n In projection/ selection context the whole column is sorted.\n If used in a groupby context, the groups are sorted.\n\n Parameters\n ----------\n by\n The column(s) used for sorting.\n reverse\n False -> order from small to large.\n True -> order from large to small.\n \"\"\"\n if not isinstance(by, list):\n by = [by]\n if not isinstance(reverse, list):\n reverse = [reverse]\n by = selection_to_pyexpr_list(by)\n\n return wrap_expr(self._pyexpr.sort_by(by, reverse))\n\n def take(self, index: Union[List[int], \"Expr\", \"pli.Series\", np.ndarray]) -> \"Expr\":\n \"\"\"\n Take values by index.\n\n Parameters\n ----------\n index\n An expression that leads to a UInt32 dtyped Series.\n\n Returns\n -------\n Values taken by index\n \"\"\"\n if isinstance(index, (list, np.ndarray)):\n index_lit = pli.lit(pli.Series(\"\", index, dtype=UInt32))\n else:\n index_lit = pli.expr_to_lit_or_expr(index, str_to_lit=False)\n return pli.wrap_expr(self._pyexpr.take(index_lit._pyexpr))\n\n def shift(self, periods: int = 1) -> \"Expr\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with `Nones`.\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n \"\"\"\n return wrap_expr(self._pyexpr.shift(periods))\n\n def shift_and_fill(\n self, periods: int, fill_value: Union[int, float, bool, str, \"Expr\"]\n ) -> \"Expr\":\n \"\"\"\n Shift the values by a given period and fill the parts that will be empty due to this operation\n with the result of the `fill_value` expression.\n\n Parameters\n ----------\n periods\n Number of places to shift (may be negative).\n fill_value\n Fill None values with the result of this expression.\n \"\"\"\n fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)\n return wrap_expr(self._pyexpr.shift_and_fill(periods, fill_value._pyexpr))\n\n def fill_null(self, fill_value: Union[int, float, bool, str, \"Expr\"]) -> \"Expr\":\n \"\"\"\n Fill null values using a filling strategy, literal, or Expr.\n\n fill_value\n One of:\n - \"backward\"\n - \"forward\"\n - \"min\"\n - \"max\"\n - \"mean\"\n - \"one\"\n - \"zero\"\n Or an expression.\n \"\"\"\n # we first must check if it is not an expr, as expr does not implement __bool__\n # and thus leads to a value error in the second comparisson.\n if not isinstance(fill_value, Expr) and fill_value in [\n \"backward\",\n \"forward\",\n \"min\",\n \"max\",\n \"mean\",\n \"zero\",\n \"one\",\n ]:\n return wrap_expr(self._pyexpr.fill_null_with_strategy(fill_value))\n\n fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)\n return wrap_expr(self._pyexpr.fill_null(fill_value._pyexpr))\n\n def fill_nan(self, fill_value: Union[str, int, float, bool, \"Expr\"]) -> \"Expr\":\n \"\"\"\n Fill floating point NaN value with a fill value\n \"\"\"\n fill_value = expr_to_lit_or_expr(fill_value, str_to_lit=True)\n return wrap_expr(self._pyexpr.fill_nan(fill_value._pyexpr))\n\n def forward_fill(self) -> \"Expr\":\n \"\"\"\n Fill missing values with the latest seen values\n \"\"\"\n return wrap_expr(self._pyexpr.forward_fill())\n\n def backward_fill(self) -> \"Expr\":\n \"\"\"\n Fill missing values with the next to be seen values\n \"\"\"\n return wrap_expr(self._pyexpr.backward_fill())\n\n def reverse(self) -> \"Expr\":\n \"\"\"\n Reverse the selection.\n \"\"\"\n return wrap_expr(self._pyexpr.reverse())\n\n def std(self) -> \"Expr\":\n \"\"\"\n Get standard deviation.\n \"\"\"\n return wrap_expr(self._pyexpr.std())\n\n def var(self) -> \"Expr\":\n \"\"\"\n Get variance.\n \"\"\"\n return wrap_expr(self._pyexpr.var())\n\n def max(self) -> \"Expr\":\n \"\"\"\n Get maximum value.\n \"\"\"\n return wrap_expr(self._pyexpr.max())\n\n def min(self) -> \"Expr\":\n \"\"\"\n Get minimum value.\n \"\"\"\n return wrap_expr(self._pyexpr.min())\n\n def sum(self) -> \"Expr\":\n \"\"\"\n Get sum value.\n\n Notes\n -----\n Dtypes in {Int8, UInt8, Int16, UInt16} are cast to\n Int64 before summing to prevent overflow issues.\n \"\"\"\n return wrap_expr(self._pyexpr.sum())\n\n def mean(self) -> \"Expr\":\n \"\"\"\n Get mean value.\n \"\"\"\n return wrap_expr(self._pyexpr.mean())\n\n def median(self) -> \"Expr\":\n \"\"\"\n Get median value using linear interpolation.\n \"\"\"\n return wrap_expr(self._pyexpr.median())\n\n def product(self) -> \"Expr\":\n \"\"\"\n Compute the product of an expression\n \"\"\"\n return wrap_expr(self._pyexpr.product())\n\n def n_unique(self) -> \"Expr\":\n \"\"\"Count unique values.\"\"\"\n return wrap_expr(self._pyexpr.n_unique())\n\n def arg_unique(self) -> \"Expr\":\n \"\"\"Get index of first unique value.\"\"\"\n return wrap_expr(self._pyexpr.arg_unique())\n\n def unique(self) -> \"Expr\":\n \"\"\"Get unique values.\"\"\"\n return wrap_expr(self._pyexpr.unique())\n\n def first(self) -> \"Expr\":\n \"\"\"\n Get the first value.\n \"\"\"\n return wrap_expr(self._pyexpr.first())\n\n def last(self) -> \"Expr\":\n \"\"\"\n Get the last value.\n \"\"\"\n return wrap_expr(self._pyexpr.last())\n\n def list(self) -> \"Expr\":\n \"\"\"\n Aggregate to list.\n \"\"\"\n return wrap_expr(self._pyexpr.list())\n\n def over(self, expr: Union[str, \"Expr\", List[Union[\"Expr\", str]]]) -> \"Expr\":\n \"\"\"\n Apply window function over a subgroup.\n This is similar to a groupby + aggregation + self join.\n Or similar to [window functions in Postgres](https://www.postgresql.org/docs/9.1/tutorial-window.html)\n\n Parameters\n ----------\n expr\n Column(s) to group by.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"groups\": [1, 1, 2, 2, 1, 2, 3, 3, 1],\n ... \"values\": [1, 2, 3, 4, 5, 6, 7, 8, 8],\n ... }\n ... )\n >>> (\n ... df.lazy()\n ... .select(\n ... [\n ... pl.col(\"groups\").sum().over(\"groups\"),\n ... ]\n ... )\n ... .collect()\n ... )\n shape: (9, 1)\n ┌────────┐\n │ groups │\n │ --- │\n │ i64 │\n ╞════════╡\n │ 4 │\n ├╌╌╌╌╌╌╌╌┤\n │ 4 │\n ├╌╌╌╌╌╌╌╌┤\n │ 6 │\n ├╌╌╌╌╌╌╌╌┤\n │ 6 │\n ├╌╌╌╌╌╌╌╌┤\n │ ... │\n ├╌╌╌╌╌╌╌╌┤\n │ 6 │\n ├╌╌╌╌╌╌╌╌┤\n │ 6 │\n ├╌╌╌╌╌╌╌╌┤\n │ 6 │\n ├╌╌╌╌╌╌╌╌┤\n │ 4 │\n └────────┘\n\n \"\"\"\n\n pyexprs = selection_to_pyexpr_list(expr)\n\n return wrap_expr(self._pyexpr.over(pyexprs))\n\n def is_unique(self) -> \"Expr\":\n \"\"\"\n Get mask of unique values.\n \"\"\"\n return wrap_expr(self._pyexpr.is_unique())\n\n def is_first(self) -> \"Expr\":\n \"\"\"\n Get a mask of the first unique value.\n\n Returns\n -------\n Boolean Series\n \"\"\"\n return wrap_expr(self._pyexpr.is_first())\n\n def is_duplicated(self) -> \"Expr\":\n \"\"\"\n Get mask of duplicated values.\n \"\"\"\n return wrap_expr(self._pyexpr.is_duplicated())\n\n def quantile(self, quantile: float, interpolation: str = \"nearest\") -> \"Expr\":\n \"\"\"\n Get quantile value.\n\n\n Parameters\n ----------\n quantile\n quantile between 0.0 and 1.0\n\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n \"\"\"\n return wrap_expr(self._pyexpr.quantile(quantile, interpolation))\n\n def filter(self, predicate: \"Expr\") -> \"Expr\":\n \"\"\"\n Filter a single column.\n Mostly useful in in aggregation context. If you want to filter on a DataFrame level, use `LazyFrame.filter`.\n\n Parameters\n ----------\n predicate\n Boolean expression.\n \"\"\"\n return wrap_expr(self._pyexpr.filter(predicate._pyexpr))\n\n def where(self, predicate: \"Expr\") -> \"Expr\":\n \"\"\"\n Alias for filter\n\n Parameters\n ----------\n predicate\n Boolean expression.\n \"\"\"\n return self.filter(predicate)\n\n def map(\n self,\n f: Callable[[\"pli.Series\"], \"pli.Series\"],\n return_dtype: Optional[Type[DataType]] = None,\n agg_list: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Apply a custom python function. This function must produce a `Series`. Any other value will be stored as\n null/missing. If you want to apply a function over single values, consider using `apply`.\n\n [read more in the book](https://pola-rs.github.io/polars-book/user-guide/howcani/apply/udfs.html)\n\n Parameters\n ----------\n f\n Lambda/ function to apply.\n return_dtype\n Dtype of the output Series.\n agg_list\n\n \"\"\"\n if return_dtype is not None:\n return_dtype = py_type_to_dtype(return_dtype)\n return wrap_expr(self._pyexpr.map(f, return_dtype, agg_list))\n\n def apply(\n self,\n f: Union[Callable[[\"pli.Series\"], \"pli.Series\"], Callable[[Any], Any]],\n return_dtype: Optional[Type[DataType]] = None,\n ) -> \"Expr\":\n \"\"\"\n Apply a custom function in a GroupBy or Projection context.\n\n Depending on the context it has the following behavior:\n\n ## Context\n\n * Select/Project\n expected type `f`: Callable[[Any], Any]\n Applies a python function over each individual value in the column.\n * GroupBy\n expected type `f`: Callable[[Series], Series]\n Applies a python function over each group.\n\n Parameters\n ----------\n f\n Lambda/ function to apply.\n return_dtype\n Dtype of the output Series.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [1, 2, 1, 1],\n ... \"b\": [\"a\", \"b\", \"c\", \"c\"],\n ... }\n ... )\n >>> (\n ... df.lazy()\n ... .groupby(\"b\", maintain_order=True)\n ... .agg(\n ... [\n ... pl.col(\"a\").apply(lambda x: x.sum()),\n ... ]\n ... )\n ... .collect()\n ... )\n shape: (3, 2)\n ┌─────┬─────┐\n │ b ┆ a │\n │ --- ┆ --- │\n │ str ┆ i64 │\n ╞═════╪═════╡\n │ a ┆ 1 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ b ┆ 2 │\n ├╌╌╌╌╌┼╌╌╌╌╌┤\n │ c ┆ 2 │\n └─────┴─────┘\n\n \"\"\"\n\n # input x: Series of type list containing the group values\n def wrap_f(x: \"pli.Series\") -> \"pli.Series\": # pragma: no cover\n return x.apply(f, return_dtype=return_dtype)\n\n return self.map(wrap_f, agg_list=True)\n\n def flatten(self) -> \"Expr\":\n \"\"\"\n Alias for explode.\n\n Explode a list or utf8 Series. This means that every item is expanded to a new row.\n\n Returns\n -------\n Exploded Series of same dtype\n \"\"\"\n\n return wrap_expr(self._pyexpr.explode())\n\n def explode(self) -> \"Expr\":\n \"\"\"\n Explode a list or utf8 Series. This means that every item is expanded to a new row.\n\n Returns\n -------\n Exploded Series of same dtype\n \"\"\"\n return wrap_expr(self._pyexpr.explode())\n\n def take_every(self, n: int) -> \"Expr\":\n \"\"\"\n Take every nth value in the Series and return as a new Series.\n \"\"\"\n return wrap_expr(self._pyexpr.take_every(n))\n\n def head(self, n: Optional[int] = None) -> \"Expr\":\n \"\"\"\n Take the first n values.\n \"\"\"\n return wrap_expr(self._pyexpr.head(n))\n\n def tail(self, n: Optional[int] = None) -> \"Expr\":\n \"\"\"\n Take the last n values.\n \"\"\"\n return wrap_expr(self._pyexpr.tail(n))\n\n def pow(self, exponent: float) -> \"Expr\":\n \"\"\"\n Raise expression to the power of exponent.\n \"\"\"\n return wrap_expr(self._pyexpr.pow(exponent))\n\n def is_in(self, other: Union[\"Expr\", List[Any]]) -> \"Expr\":\n \"\"\"\n Check if elements of this Series are in the right Series, or List values of the right Series.\n\n Parameters\n ----------\n other\n Series of primitive type or List type.\n\n Returns\n -------\n Expr that evaluates to a Boolean Series.\n\n Examples\n --------\n >>> df = pl.DataFrame(\n ... {\"sets\": [[1, 2, 3], [1, 2], [9, 10]], \"optional_members\": [1, 2, 3]}\n ... )\n >>> df.select([pl.col(\"optional_members\").is_in(\"sets\").alias(\"contains\")])\n shape: (3, 1)\n ┌──────────┐\n │ contains │\n │ --- │\n │ bool │\n ╞══════════╡\n │ true │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ true │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ false │\n └──────────┘\n\n \"\"\"\n if isinstance(other, list):\n other = pli.lit(pli.Series(other))\n else:\n other = expr_to_lit_or_expr(other, str_to_lit=False)\n return wrap_expr(self._pyexpr.is_in(other._pyexpr))\n\n def repeat_by(self, by: Union[\"Expr\", str]) -> \"Expr\":\n \"\"\"\n Repeat the elements in this Series `n` times by dictated by the number given by `by`.\n The elements are expanded into a `List`\n\n Parameters\n ----------\n by\n Numeric column that determines how often the values will be repeated.\n The column will be coerced to UInt32. Give this dtype to make the coercion a no-op.\n\n Returns\n -------\n Series of type List\n \"\"\"\n by = expr_to_lit_or_expr(by, False)\n return wrap_expr(self._pyexpr.repeat_by(by._pyexpr))\n\n def is_between(\n self,\n start: Union[\"Expr\", datetime],\n end: Union[\"Expr\", datetime],\n include_bounds: Union[bool, Sequence[bool]] = False,\n ) -> \"Expr\":\n \"\"\"\n Check if this expression is between start and end.\n\n Parameters\n ----------\n start\n Lower bound as primitive type or datetime.\n end\n Upper bound as primitive type or datetime.\n include_bounds\n False: Exclude both start and end (default).\n True: Include both start and end.\n [False, False]: Exclude start and exclude end.\n [True, True]: Include start and include end.\n [False, True]: Exclude start and include end.\n [True, False]: Include start and exclude end.\n\n Returns\n -------\n Expr that evaluates to a Boolean Series.\n \"\"\"\n cast_to_datetime = False\n if isinstance(start, datetime):\n start = pli.lit(start)\n cast_to_datetime = True\n if isinstance(end, datetime):\n end = pli.lit(end)\n cast_to_datetime = True\n if cast_to_datetime:\n expr = self.cast(Datetime)\n else:\n expr = self\n if include_bounds is False or include_bounds == [False, False]:\n return ((expr > start) & (expr < end)).alias(\"is_between\")\n elif include_bounds is True or include_bounds == [True, True]:\n return ((expr >= start) & (expr <= end)).alias(\"is_between\")\n elif include_bounds == [False, True]:\n return ((expr > start) & (expr <= end)).alias(\"is_between\")\n elif include_bounds == [True, False]:\n return ((expr >= start) & (expr < end)).alias(\"is_between\")\n else:\n raise ValueError(\n \"include_bounds should be a boolean or [boolean, boolean].\"\n )\n\n def hash(self, k0: int = 0, k1: int = 1, k2: int = 2, k3: int = 3) -> \"Expr\":\n \"\"\"\n Hash the Series.\n\n The hash value is of type `Datetime`\n\n Parameters\n ----------\n k0\n seed parameter\n k1\n seed parameter\n k2\n seed parameter\n k3\n seed parameter\n \"\"\"\n return wrap_expr(self._pyexpr.hash(k0, k1, k2, k3))\n\n def reinterpret(self, signed: bool) -> \"Expr\":\n \"\"\"\n Reinterpret the underlying bits as a signed/unsigned integer.\n This operation is only allowed for 64bit integers. For lower bits integers,\n you can safely use that cast operation.\n\n Parameters\n ----------\n signed\n True -> pl.Int64\n False -> pl.UInt64\n \"\"\"\n return wrap_expr(self._pyexpr.reinterpret(signed))\n\n def inspect(self, fmt: str = \"{}\") -> \"Expr\":\n \"\"\"\n Prints the value that this expression evaluates to and passes on the value.\n\n >>> df = pl.DataFrame({\"foo\": [1, 1, 2]})\n >>> df.select(pl.col(\"foo\").cumsum().inspect(\"value is: {}\").alias(\"bar\"))\n value is: shape: (3,)\n Series: 'foo' [i64]\n [\n 1\n 2\n 4\n ]\n shape: (3, 1)\n ┌─────┐\n │ bar │\n │ --- │\n │ i64 │\n ╞═════╡\n │ 1 │\n ├╌╌╌╌╌┤\n │ 2 │\n ├╌╌╌╌╌┤\n │ 4 │\n └─────┘\n\n \"\"\"\n\n def inspect(s: \"pli.Series\") -> \"pli.Series\": # pragma: no cover\n print(fmt.format(s))\n return s\n\n return self.map(inspect, return_dtype=None, agg_list=True)\n\n def interpolate(self) -> \"Expr\":\n \"\"\"\n Interpolate intermediate values. The interpolation method is linear.\n \"\"\"\n return wrap_expr(self._pyexpr.interpolate())\n\n def rolling_min(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n apply a rolling min (moving min) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_min(window_size, weights, min_periods, center)\n )\n\n def rolling_max(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Apply a rolling max (moving max) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_max(window_size, weights, min_periods, center)\n )\n\n def rolling_mean(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Apply a rolling mean (moving mean) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"A\": [1.0, 8.0, 6.0, 2.0, 16.0, 10.0]})\n >>> df.select(\n ... [\n ... pl.col(\"A\").rolling_mean(window_size=2),\n ... ]\n ... )\n shape: (6, 1)\n ┌──────┐\n │ A │\n │ --- │\n │ f64 │\n ╞══════╡\n │ null │\n ├╌╌╌╌╌╌┤\n │ 4.5 │\n ├╌╌╌╌╌╌┤\n │ 7 │\n ├╌╌╌╌╌╌┤\n │ 4 │\n ├╌╌╌╌╌╌┤\n │ 9 │\n ├╌╌╌╌╌╌┤\n │ 13 │\n └──────┘\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_mean(window_size, weights, min_periods, center)\n )\n\n def rolling_sum(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Apply a rolling sum (moving sum) over the values in this array.\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length of the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_sum(window_size, weights, min_periods, center)\n )\n\n def rolling_std(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Compute a rolling std dev\n\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_std(window_size, weights, min_periods, center)\n )\n\n def rolling_var(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Compute a rolling variance.\n\n A window of length `window_size` will traverse the array. The values that fill this window\n will (optionally) be multiplied with the weights given by the `weight` vector. The resulting\n values will be aggregated to their sum.\n\n Parameters\n ----------\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_var(window_size, weights, min_periods, center)\n )\n\n def rolling_apply(\n self,\n function: Callable[[\"pli.Series\"], Any],\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Allows a custom rolling window function.\n Prefer the specific rolling window functions over this one, as they are faster.\n\n Prefer:\n\n * rolling_min\n * rolling_max\n * rolling_mean\n * rolling_sum\n\n Parameters\n ----------\n function\n Aggregation function\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"A\": [1.0, 2.0, 9.0, 2.0, 13.0],\n ... }\n ... )\n >>> df.select(\n ... [\n ... pl.col(\"A\").rolling_apply(lambda s: s.std(), window_size=3),\n ... ]\n ... )\n shape: (5, 1)\n ┌────────────────────┐\n │ A │\n │ --- │\n │ f64 │\n ╞════════════════════╡\n │ null │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ null │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 4.358898943540674 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 4.041451884327381 │\n ├╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┤\n │ 5.5677643628300215 │\n └────────────────────┘\n\n \"\"\"\n if min_periods is None:\n min_periods = window_size\n return wrap_expr(\n self._pyexpr.rolling_apply(\n function, window_size, weights, min_periods, center\n )\n )\n\n def rolling_median(\n self,\n window_size: int,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Compute a rolling median\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n return wrap_expr(\n self._pyexpr.rolling_median(window_size, weights, min_periods, center)\n )\n\n def rolling_quantile(\n self,\n quantile: float,\n interpolation: str = \"nearest\",\n window_size: int = 2,\n weights: Optional[List[float]] = None,\n min_periods: Optional[int] = None,\n center: bool = False,\n ) -> \"Expr\":\n \"\"\"\n Compute a rolling quantile\n\n Parameters\n ----------\n quantile\n quantile to compute\n interpolation\n interpolation type, options: ['nearest', 'higher', 'lower', 'midpoint', 'linear']\n window_size\n The length of the window.\n weights\n An optional slice with the same length as the window that will be multiplied\n elementwise with the values in the window.\n min_periods\n The number of values in the window that should be non-null before computing a result.\n If None, it will be set equal to window size.\n center\n Set the labels at the center of the window\n \"\"\"\n return wrap_expr(\n self._pyexpr.rolling_quantile(\n quantile, interpolation, window_size, weights, min_periods, center\n )\n )\n\n def rolling_skew(self, window_size: int, bias: bool = True) -> \"Expr\":\n \"\"\"\n Compute a rolling skew\n\n Parameters\n ----------\n window_size\n Size of the rolling window\n bias\n If False, then the calculations are corrected for statistical bias.\n \"\"\"\n return wrap_expr(self._pyexpr.rolling_skew(window_size, bias))\n\n def abs(self) -> \"Expr\":\n \"\"\"\n Take absolute values\n \"\"\"\n return wrap_expr(self._pyexpr.abs())\n\n def argsort(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Index location of the sorted variant of this Series.\n\n Parameters\n ----------\n reverse\n Reverse the ordering. Default is from low to high.\n \"\"\"\n return pli.argsort_by([self], [reverse])\n\n def rank(self, method: str = \"average\", reverse: bool = False) -> \"Expr\":\n \"\"\"\n Assign ranks to data, dealing with ties appropriately.\n\n Parameters\n ----------\n method\n {'average', 'min', 'max', 'dense', 'ordinal', 'random'}, optional\n The method used to assign ranks to tied elements.\n The following methods are available (default is 'average'):\n - 'average': The average of the ranks that would have been assigned to\n all the tied values is assigned to each value.\n - 'min': The minimum of the ranks that would have been assigned to all\n the tied values is assigned to each value. (This is also\n referred to as \"competition\" ranking.)\n - 'max': The maximum of the ranks that would have been assigned to all\n the tied values is assigned to each value.\n - 'dense': Like 'min', but the rank of the next highest element is\n assigned the rank immediately after those assigned to the tied\n elements.\n - 'ordinal': All values are given a distinct rank, corresponding to\n the order that the values occur in `a`.\n - 'random': Like 'ordinal', but the rank for ties is not dependent\n on the order that the values occur in `a`.\n reverse\n reverse the operation\n \"\"\"\n return wrap_expr(self._pyexpr.rank(method, reverse))\n\n def diff(self, n: int = 1, null_behavior: str = \"ignore\") -> \"Expr\":\n \"\"\"\n Calculate the n-th discrete difference.\n\n Parameters\n ----------\n n\n number of slots to shift\n null_behavior\n {'ignore', 'drop'}\n \"\"\"\n return wrap_expr(self._pyexpr.diff(n, null_behavior))\n\n def pct_change(self, n: int = 1) -> \"Expr\":\n \"\"\"\n Percentage change (as fraction) between current element and most-recent\n non-null element at least n period(s) before the current element.\n\n Computes the change from the previous row by default.\n\n Parameters\n ----------\n n\n periods to shift for forming percent change.\n \"\"\"\n return wrap_expr(self._pyexpr.pct_change(n))\n\n def skew(self, bias: bool = True) -> \"Expr\":\n r\"\"\"Compute the sample skewness of a data set.\n For normally distributed data, the skewness should be about zero. For\n unimodal continuous distributions, a skewness value greater than zero means\n that there is more weight in the right tail of the distribution. The\n function `skewtest` can be used to determine if the skewness value\n is close enough to zero, statistically speaking.\n\n\n See scipy.stats for more information.\n\n Parameters\n ----------\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n\n Notes\n -----\n The sample skewness is computed as the Fisher-Pearson coefficient\n of skewness, i.e.\n\n .. math:: g_1=\\frac{m_3}{m_2^{3/2}}\n\n where\n\n .. math:: m_i=\\frac{1}{N}\\sum_{n=1}^N(x[n]-\\bar{x})^i\n\n is the biased sample :math:`i\\texttt{th}` central moment, and\n :math:`\\bar{x}` is\n the sample mean. If ``bias`` is False, the calculations are\n corrected for bias and the value computed is the adjusted\n Fisher-Pearson standardized moment coefficient, i.e.\n\n .. math:: G_1=\\frac{k_3}{k_2^{3/2}}= \\frac{\\sqrt{N(N-1)}}{N-2}\\frac{m_3}{m_2^{3/2}}\n\n \"\"\"\n return wrap_expr(self._pyexpr.skew(bias))\n\n def kurtosis(self, fisher: bool = True, bias: bool = True) -> \"Expr\":\n \"\"\"Compute the kurtosis (Fisher or Pearson) of a dataset.\n Kurtosis is the fourth central moment divided by the square of the\n variance. If Fisher's definition is used, then 3.0 is subtracted from\n the result to give 0.0 for a normal distribution.\n If bias is False then the kurtosis is calculated using k statistics to\n eliminate bias coming from biased moment estimators\n\n See scipy.stats for more information\n\n Parameters\n ----------\n fisher : bool, optional\n If True, Fisher's definition is used (normal ==> 0.0). If False,\n Pearson's definition is used (normal ==> 3.0).\n bias : bool, optional\n If False, then the calculations are corrected for statistical bias.\n \"\"\"\n return wrap_expr(self._pyexpr.kurtosis(fisher, bias))\n\n def clip(self, min_val: Union[int, float], max_val: Union[int, float]) -> \"Expr\":\n \"\"\"\n Clip (limit) the values in an array to any value that fits in 64 floating poitns range.\n\n Only works for the following dtypes: {Int32, Int64, Float32, Float64, UInt32}.\n\n If you want to clip other dtypes, consider writing a when -> then -> otherwise expression\n\n Parameters\n ----------\n min_val\n Minimum value.\n max_val\n Maximum value.\n \"\"\"\n return wrap_expr(self._pyexpr.clip(min_val, max_val))\n\n def lower_bound(self) -> \"Expr\":\n \"\"\"\n Returns a unit Series with the lowest value possible for the dtype of this expression.\n \"\"\"\n return wrap_expr(self._pyexpr.lower_bound())\n\n def upper_bound(self) -> \"Expr\":\n \"\"\"\n Returns a unit Series with the highest value possible for the dtype of this expression.\n \"\"\"\n return wrap_expr(self._pyexpr.upper_bound())\n\n def sin(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric sine on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"a\": [0.0]})\n >>> df.select(pl.col(\"a\").sin())\n shape: (1, 1)\n ┌─────┐\n │ a │\n │ --- │\n │ f64 │\n ╞═════╡\n │ 0.0 │\n └─────┘\n\n \"\"\"\n return np.sin(self) # type: ignore\n\n def cos(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric cosine on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"a\": [0.0]})\n >>> df.select(pl.col(\"a\").cos())\n shape: (1, 1)\n ┌─────┐\n │ a │\n │ --- │\n │ f64 │\n ╞═════╡\n │ 1 │\n └─────┘\n\n \"\"\"\n return np.cos(self) # type: ignore\n\n def tan(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric tangent on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"a\": [1.0]})\n >>> df.select(pl.col(\"a\").tan().round(2))\n shape: (1, 1)\n ┌──────┐\n │ a │\n │ --- │\n │ f64 │\n ╞══════╡\n │ 1.56 │\n └──────┘\n\n \"\"\"\n return np.tan(self) # type: ignore\n\n def arcsin(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric sine on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n >>> df = pl.DataFrame({\"a\": [1.0]})\n >>> df.select(pl.col(\"a\").arcsin())\n shape: (1, 1)\n ┌────────────────────┐\n │ a │\n │ --- │\n │ f64 │\n ╞════════════════════╡\n │ 1.5707963267948966 │\n └────────────────────┘\n\n \"\"\"\n return np.arcsin(self) # type: ignore\n\n def arccos(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric cosine on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n >>> df = pl.DataFrame({\"a\": [0.0]})\n >>> df.select(pl.col(\"a\").arccos())\n shape: (1, 1)\n ┌────────────────────┐\n │ a │\n │ --- │\n │ f64 │\n ╞════════════════════╡\n │ 1.5707963267948966 │\n └────────────────────┘\n\n \"\"\"\n return np.arccos(self) # type: ignore\n\n def arctan(self) -> \"Expr\":\n \"\"\"\n Compute the element-wise value for Trigonometric tangent on an array\n\n Returns\n -------\n Series of dtype Float64\n\n Examples\n --------\n >>> df = pl.DataFrame({\"a\": [1.0]})\n >>> df.select(pl.col(\"a\").arctan())\n shape: (1, 1)\n ┌────────────────────┐\n │ a │\n │ --- │\n │ f64 │\n ╞════════════════════╡\n │ 0.7853981633974483 │\n └────────────────────┘\n\n \"\"\"\n return np.arctan(self) # type: ignore\n\n def reshape(self, dims: Tuple[int, ...]) -> \"Expr\":\n \"\"\"\n Reshape this Expr to a flat series, shape: (len,)\n or a List series, shape: (rows, cols)\n\n if a -1 is used in any of the dimensions, that dimension is inferred.\n\n Parameters\n ----------\n dims\n Tuple of the dimension sizes\n\n Returns\n -------\n Expr\n \"\"\"\n return wrap_expr(self._pyexpr.reshape(dims))\n\n def shuffle(self, seed: Optional[int] = None) -> \"Expr\":\n \"\"\"\n Shuffle the contents of this expr.\n\n Parameters\n ----------\n seed\n Seed initialization. If None given numpy is used.\n \"\"\"\n if seed is None:\n seed = int(np.random.randint(0, 10000))\n return wrap_expr(self._pyexpr.shuffle(seed))\n\n def sample(\n self,\n fraction: float = 1.0,\n with_replacement: bool = True,\n seed: Optional[int] = 0,\n ) -> \"Expr\":\n \"\"\"\n Sample a fraction of the `Series`.\n\n Parameters\n ----------\n fraction\n Fraction 0.0 <= value <= 1.0\n with_replacement\n Allow values to be sampled more than once.\n seed\n Seed initialization. If None given numpy is used.\n \"\"\"\n if seed is None:\n seed = int(np.random.randint(0, 10000))\n return wrap_expr(self._pyexpr.sample_frac(fraction, with_replacement, seed))\n\n def ewm_mean(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Expr\":\n r\"\"\"\n Exponential moving average.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n alpha = _prepare_alpha(com, span, half_life, alpha)\n return wrap_expr(self._pyexpr.ewm_mean(alpha, adjust, min_periods))\n\n def ewm_std(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Expr\":\n r\"\"\"\n Exponential moving standard deviation.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n alpha = _prepare_alpha(com, span, half_life, alpha)\n return wrap_expr(self._pyexpr.ewm_std(alpha, adjust, min_periods))\n\n def ewm_var(\n self,\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n adjust: bool = True,\n min_periods: int = 1,\n ) -> \"Expr\":\n r\"\"\"\n Exponential moving standard deviation.\n\n Parameters\n ----------\n com\n Specify decay in terms of center of mass, :math:`alpha = 1/(1 + com) \\;for\\; com >= 0`.\n span\n Specify decay in terms of span, :math:`alpha = 2/(span + 1) \\;for\\; span >= 1`\n half_life\n Specify decay in terms of half-life, :math:`alpha = 1 - exp(-ln(2) / halflife) \\;for\\; halflife > 0`\n alpha\n Specify smoothing factor alpha directly, :math:`0 < alpha < 1`.\n adjust\n Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings\n\n - When adjust = True the EW function is calculated using weights :math:`w_i = (1 - alpha)^i`\n - When adjust = False the EW function is calculated recursively.\n min_periods\n Minimum number of observations in window required to have a value (otherwise result is Null).\n\n \"\"\"\n alpha = _prepare_alpha(com, span, half_life, alpha)\n return wrap_expr(self._pyexpr.ewm_var(alpha, adjust, min_periods))\n\n def extend_constant(\n self, value: Optional[Union[int, float, str, bool]], n: int\n ) -> \"Expr\":\n \"\"\"\n Extend the Series with given number of values.\n\n Parameters\n ----------\n value\n The value to extend the Series with. This value may be None to fill with nulls.\n n\n The number of values to extend.\n \"\"\"\n return wrap_expr(self._pyexpr.extend_constant(value, n))\n\n # Below are the namespaces defined. Keep these at the end of the definition of Expr, as to not confuse mypy with\n # the type annotation `str` with the namespace \"str\"\n\n @property\n def dt(self) -> \"ExprDateTimeNameSpace\":\n \"\"\"\n Create an object namespace of all datetime related methods.\n \"\"\"\n return ExprDateTimeNameSpace(self)\n\n @property\n def str(self) -> \"ExprStringNameSpace\":\n \"\"\"\n Create an object namespace of all string related methods.\n \"\"\"\n return ExprStringNameSpace(self)\n\n @property\n def arr(self) -> \"ExprListNameSpace\":\n \"\"\"\n Create an object namespace of all datetime related methods.\n \"\"\"\n return ExprListNameSpace(self)\n\n\nclass ExprListNameSpace:\n \"\"\"\n Namespace for list related expressions\n \"\"\"\n\n def __init__(self, expr: Expr):\n self._pyexpr = expr._pyexpr\n\n def lengths(self) -> Expr:\n \"\"\"\n Get the length of the arrays as UInt32.\n \"\"\"\n return wrap_expr(self._pyexpr.arr_lengths())\n\n def sum(self) -> \"Expr\":\n \"\"\"\n Sum all the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_sum())\n\n def max(self) -> \"Expr\":\n \"\"\"\n Compute the max value of the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_max())\n\n def min(self) -> \"Expr\":\n \"\"\"\n Compute the min value of the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_min())\n\n def mean(self) -> \"Expr\":\n \"\"\"\n Compute the mean value of the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_mean())\n\n def sort(self, reverse: bool = False) -> \"Expr\":\n \"\"\"\n Sort the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_sort(reverse))\n\n def reverse(self) -> \"Expr\":\n \"\"\"\n Reverse the arrays in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_reverse())\n\n def unique(self) -> \"Expr\":\n \"\"\"\n Get the unique/distinct values in the list\n \"\"\"\n return wrap_expr(self._pyexpr.lst_unique())\n\n def concat(\n self, other: Union[List[Union[Expr, str]], Expr, str, \"pli.Series\", List[Any]]\n ) -> \"Expr\":\n \"\"\"\n Concat the arrays in a Series dtype List in linear time.\n\n Parameters\n ----------\n other\n Columns to concat into a List Series\n \"\"\"\n if isinstance(other, list) and (\n not isinstance(other[0], (Expr, str, pli.Series))\n ):\n return self.concat(pli.Series([other]))\n\n other_list: List[Union[Expr, str, \"pli.Series\"]]\n if not isinstance(other, list):\n other_list = [other]\n else:\n other_list = copy.copy(other) # type: ignore\n\n other_list.insert(0, wrap_expr(self._pyexpr))\n return pli.concat_list(other_list)\n\n def get(self, index: int) -> \"Expr\":\n \"\"\"\n Get the value by index in the sublists.\n So index `0` would return the first item of every sublist\n and index `-1` would return the last item of every sublist\n if an index is out of bounds, it will return a `None`.\n\n Parameters\n ----------\n index\n Index to return per sublist\n \"\"\"\n return wrap_expr(self._pyexpr.lst_get(index))\n\n def first(self) -> \"Expr\":\n \"\"\"\n Get the first value of the sublists.\n \"\"\"\n return self.get(0)\n\n def last(self) -> \"Expr\":\n \"\"\"\n Get the last value of the sublists.\n \"\"\"\n return self.get(-1)\n\n def contains(self, item: Union[float, str, bool, int, date, datetime]) -> \"Expr\":\n \"\"\"\n Check if sublists contain the given item.\n\n Parameters\n ----------\n item\n Item that will be checked for membership\n\n Returns\n -------\n Boolean mask\n \"\"\"\n return wrap_expr(self._pyexpr).map(lambda s: s.arr.contains(item))\n\n def join(self, separator: str) -> \"Expr\":\n \"\"\"\n Join all string items in a sublist and place a separator between them.\n This errors if inner type of list `!= Utf8`.\n\n Parameters\n ----------\n separator\n string to separate the items with\n\n Returns\n -------\n Series of dtype Utf8\n \"\"\"\n\n return wrap_expr(self._pyexpr.lst_join(separator))\n\n\nclass ExprStringNameSpace:\n \"\"\"\n Namespace for string related expressions\n \"\"\"\n\n def __init__(self, expr: Expr):\n self._pyexpr = expr._pyexpr\n\n def strptime(\n self,\n datatype: Union[Type[Date], Type[Datetime]],\n fmt: Optional[str] = None,\n strict: bool = True,\n exact: bool = True,\n ) -> Expr:\n \"\"\"\n Parse utf8 expression as a Date/Datetimetype.\n\n Parameters\n ----------\n datatype\n Date | Datetime.\n fmt\n format to use, see the following link for examples:\n https://docs.rs/chrono/latest/chrono/format/strftime/index.html\n\n example: \"%y-%m-%d\".\n strict\n raise an error if any conversion fails\n exact\n - If True, require an exact format match.\n - If False, allow the format to match anywhere in the target string.\n \"\"\"\n if not issubclass(datatype, DataType):\n raise ValueError(\n f\"expected: {DataType} got: {datatype}\"\n ) # pragma: no cover\n if datatype == Date:\n return wrap_expr(self._pyexpr.str_parse_date(fmt, strict, exact))\n elif datatype == Datetime:\n return wrap_expr(self._pyexpr.str_parse_datetime(fmt, strict, exact))\n else:\n raise ValueError(\n \"dtype should be of type {Date, Datetime}\"\n ) # pragma: no cover\n\n def lengths(self) -> Expr:\n \"\"\"\n Get the length of the Strings as UInt32.\n \"\"\"\n return wrap_expr(self._pyexpr.str_lengths())\n\n def concat(self, delimiter: str = \"-\") -> \"Expr\":\n \"\"\"\n Vertically concat the values in the Series to a single string value.\n\n Returns\n -------\n Series of dtype Utf8\n\n Examples\n --------\n\n >>> df = pl.DataFrame({\"foo\": [1, None, 2]})\n >>> df = df.select(pl.col(\"foo\").str.concat(\"-\"))\n >>> df\n shape: (1, 1)\n ┌──────────┐\n │ foo │\n │ --- │\n │ str │\n ╞══════════╡\n │ 1-null-2 │\n └──────────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.str_concat(delimiter))\n\n def to_uppercase(self) -> Expr:\n \"\"\"\n Transform to uppercase variant.\n \"\"\"\n return wrap_expr(self._pyexpr.str_to_uppercase())\n\n def to_lowercase(self) -> Expr:\n \"\"\"\n Transform to lowercase variant.\n \"\"\"\n return wrap_expr(self._pyexpr.str_to_lowercase())\n\n def strip(self) -> Expr:\n \"\"\"\n Remove leading and trailing whitespace.\n \"\"\"\n return wrap_expr(self._pyexpr.str_strip())\n\n def lstrip(self) -> Expr:\n \"\"\"\n Remove leading whitespace.\n \"\"\"\n return wrap_expr(self._pyexpr.str_lstrip())\n\n def rstrip(self) -> Expr:\n \"\"\"\n Remove trailing whitespace.\n \"\"\"\n return wrap_expr(self._pyexpr.str_rstrip())\n\n def contains(self, pattern: str) -> Expr:\n \"\"\"\n Check if string contains regex.\n\n Parameters\n ----------\n pattern\n Regex pattern.\n \"\"\"\n return wrap_expr(self._pyexpr.str_contains(pattern))\n\n def json_path_match(self, json_path: str) -> Expr:\n \"\"\"\n Extract the first match of json string with provided JSONPath expression.\n Throw errors if encounter invalid json strings.\n All return value will be casted to Utf8 regardless of the original value.\n Documentation on JSONPath standard: https://goessner.net/articles/JsonPath/\n\n Parameters\n ----------\n json_path\n A valid JSON path query string\n\n Returns\n -------\n Utf8 array. Contain null if original value is null or the json_path return nothing.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\"json_val\": ['{\"a\":\"1\"}', None, '{\"a\":2}', '{\"a\":2.1}', '{\"a\":true}']}\n ... )\n >>> df.select(pl.col(\"json_val\").str.json_path_match(\"$.a\"))\n shape: (5, 1)\n ┌──────────┐\n │ json_val │\n │ --- │\n │ str │\n ╞══════════╡\n │ 1 │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ null │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ 2 │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ 2.1 │\n ├╌╌╌╌╌╌╌╌╌╌┤\n │ true │\n └──────────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.str_json_path_match(json_path))\n\n def decode(self, encoding: str, strict: bool = False) -> Expr:\n \"\"\"\n Decodes a value using the provided encoding\n\n Parameters\n ----------\n encoding\n 'hex' or 'base64'\n strict\n how to handle invalid inputs\n - True: method will throw error if unable to decode a value\n - False: unhandled values will be replaced with `None`\n\n Examples\n --------\n >>> df = pl.DataFrame({\"encoded\": [\"666f6f\", \"626172\", None]})\n >>> df.select(pl.col(\"encoded\").str.decode(\"hex\"))\n shape: (3, 1)\n ┌─────────┐\n │ encoded │\n │ --- │\n │ str │\n ╞═════════╡\n │ foo │\n ├╌╌╌╌╌╌╌╌╌┤\n │ bar │\n ├╌╌╌╌╌╌╌╌╌┤\n │ null │\n └─────────┘\n \"\"\"\n if encoding == \"hex\":\n return wrap_expr(self._pyexpr.str_hex_decode(strict))\n elif encoding == \"base64\":\n return wrap_expr(self._pyexpr.str_base64_decode(strict))\n else:\n raise ValueError(\"supported encodings are 'hex' and 'base64'\")\n\n def encode(self, encoding: str) -> Expr:\n \"\"\"\n Encodes a value using the provided encoding\n\n Parameters\n ----------\n encoding\n 'hex' or 'base64'\n\n Returns\n -------\n Utf8 array with values encoded using provided encoding\n\n Examples\n --------\n >>> df = pl.DataFrame({\"strings\": [\"foo\", \"bar\", None]})\n >>> df.select(pl.col(\"strings\").str.encode(\"hex\"))\n shape: (3, 1)\n ┌─────────┐\n │ strings │\n │ --- │\n │ str │\n ╞═════════╡\n │ 666f6f │\n ├╌╌╌╌╌╌╌╌╌┤\n │ 626172 │\n ├╌╌╌╌╌╌╌╌╌┤\n │ null │\n └─────────┘\n \"\"\"\n if encoding == \"hex\":\n return wrap_expr(self._pyexpr.str_hex_encode())\n elif encoding == \"base64\":\n return wrap_expr(self._pyexpr.str_base64_encode())\n else:\n raise ValueError(\"supported encodings are 'hex' and 'base64'\")\n\n def extract(self, pattern: str, group_index: int = 1) -> Expr:\n r\"\"\"\n Extract the target capture group from provided patterns.\n\n Parameters\n ----------\n pattern\n A valid regex pattern\n group_index\n Index of the targeted capture group.\n Group 0 mean the whole pattern, first group begin at index 1\n Default to the first capture group\n\n Returns\n -------\n Utf8 array. Contain null if original value is null or regex capture nothing.\n\n Examples\n --------\n\n >>> df = pl.DataFrame(\n ... {\n ... \"a\": [\n ... \"http://vote.com/ballon_dor?candidate=messi&ref=polars\",\n ... \"http://vote.com/ballon_dor?candidat=jorginho&ref=polars\",\n ... \"http://vote.com/ballon_dor?candidate=ronaldo&ref=polars\",\n ... ]\n ... }\n ... )\n >>> df.select(\n ... [\n ... pl.col(\"a\").str.extract(r\"candidate=(\\w+)\", 1),\n ... ]\n ... )\n shape: (3, 1)\n ┌─────────┐\n │ a │\n │ --- │\n │ str │\n ╞═════════╡\n │ messi │\n ├╌╌╌╌╌╌╌╌╌┤\n │ null │\n ├╌╌╌╌╌╌╌╌╌┤\n │ ronaldo │\n └─────────┘\n\n \"\"\"\n return wrap_expr(self._pyexpr.str_extract(pattern, group_index))\n\n def split(self, by: str, inclusive: bool = False) -> Expr:\n \"\"\"\n Split the string by a substring.\n The return type will by of type List<Utf8>\n\n Parameters\n ----------\n by\n substring\n inclusive\n Include the split character/string in the results\n \"\"\"\n if inclusive:\n return wrap_expr(self._pyexpr.str_split_inclusive(by))\n return wrap_expr(self._pyexpr.str_split(by))\n\n def replace(self, pattern: str, value: str) -> Expr:\n \"\"\"\n Replace first regex match with a string value.\n\n Parameters\n ----------\n pattern\n Regex pattern.\n value\n Replacement string.\n \"\"\"\n return wrap_expr(self._pyexpr.str_replace(pattern, value))\n\n def replace_all(self, pattern: str, value: str) -> Expr:\n \"\"\"\n Replace substring on all regex pattern matches.\n\n Parameters\n ----------\n pattern\n Regex pattern.\n value\n Replacement string.\n \"\"\"\n return wrap_expr(self._pyexpr.str_replace_all(pattern, value))\n\n def slice(self, start: int, length: Optional[int] = None) -> Expr:\n \"\"\"\n Create subslices of the string values of a Utf8 Series.\n\n Parameters\n ----------\n start\n Start of the slice (negative indexing may be used).\n length\n Optional length of the slice.\n\n Returns\n -------\n Series of Utf8 type\n \"\"\"\n return wrap_expr(self._pyexpr.str_slice(start, length))\n\n\nclass ExprDateTimeNameSpace:\n \"\"\"\n Namespace for datetime related expressions.\n \"\"\"\n\n def __init__(self, expr: Expr):\n self._pyexpr = expr._pyexpr\n\n def truncate(\n self,\n every: Union[str, timedelta],\n offset: Optional[Union[str, timedelta]] = None,\n ) -> Expr:\n \"\"\"\n .. warning::\n This API is experimental and may change without it being considered a breaking change.\n\n Divide the date/ datetime range into buckets.\n Data must be sorted, if not the output does not make sense.\n\n The `every` and `offset` arguments are created with\n the following string language:\n\n 1ns # 1 nanosecond\n 1us # 1 microsecond\n 1ms # 1 millisecond\n 1s # 1 second\n 1m # 1 minute\n 1h # 1 hour\n 1d # 1 day\n 1w # 1 week\n 1mo # 1 calendar month\n 1y # 1 calendar year\n\n 3d12h4m25s # 3 days, 12 hours, 4 minutes, and 25 seconds\n\n Parameters\n ----------\n every\n Every interval start and period length\n offset\n Offset the window\n\n Returns\n -------\n Date/Datetime series\n\n Examples\n --------\n\n >>> from datetime import timedelta, datetime\n >>> start = datetime(2001, 1, 1)\n >>> stop = datetime(2001, 1, 2)\n >>> s = pl.date_range(start, stop, timedelta(minutes=30), name=\"dates\")\n >>> s\n shape: (49,)\n Series: 'dates' [datetime[ns]]\n [\n 2001-01-01 00:00:00\n 2001-01-01 00:30:00\n 2001-01-01 01:00:00\n 2001-01-01 01:30:00\n 2001-01-01 02:00:00\n 2001-01-01 02:30:00\n 2001-01-01 03:00:00\n 2001-01-01 03:30:00\n 2001-01-01 04:00:00\n 2001-01-01 04:30:00\n 2001-01-01 05:00:00\n 2001-01-01 05:30:00\n ...\n 2001-01-01 18:30:00\n 2001-01-01 19:00:00\n 2001-01-01 19:30:00\n 2001-01-01 20:00:00\n 2001-01-01 20:30:00\n 2001-01-01 21:00:00\n 2001-01-01 21:30:00\n 2001-01-01 22:00:00\n 2001-01-01 22:30:00\n 2001-01-01 23:00:00\n 2001-01-01 23:30:00\n 2001-01-02 00:00:00\n ]\n >>> s.dt.truncate(\"1h\")\n shape: (49,)\n Series: 'dates' [datetime[ns]]\n [\n 2001-01-01 00:00:00\n 2001-01-01 00:00:00\n 2001-01-01 01:00:00\n 2001-01-01 01:00:00\n 2001-01-01 02:00:00\n 2001-01-01 02:00:00\n 2001-01-01 03:00:00\n 2001-01-01 03:00:00\n 2001-01-01 04:00:00\n 2001-01-01 04:00:00\n 2001-01-01 05:00:00\n 2001-01-01 05:00:00\n ...\n 2001-01-01 18:00:00\n 2001-01-01 19:00:00\n 2001-01-01 19:00:00\n 2001-01-01 20:00:00\n 2001-01-01 20:00:00\n 2001-01-01 21:00:00\n 2001-01-01 21:00:00\n 2001-01-01 22:00:00\n 2001-01-01 22:00:00\n 2001-01-01 23:00:00\n 2001-01-01 23:00:00\n 2001-01-02 00:00:00\n ]\n >>> assert s.dt.truncate(\"1h\") == s.dt.truncate(timedelta(hours=1))\n\n \"\"\"\n if offset is None:\n offset = \"0ns\"\n if isinstance(every, timedelta):\n every = _timedelta_to_pl_duration(every)\n if isinstance(offset, timedelta):\n offset = _timedelta_to_pl_duration(offset)\n return wrap_expr(self._pyexpr.date_truncate(every, offset))\n\n def strftime(self, fmt: str) -> Expr:\n \"\"\"\n Format Date/datetime with a formatting rule: See [chrono strftime/strptime](https://docs.rs/chrono/0.4.19/chrono/format/strftime/index.html).\n \"\"\"\n return wrap_expr(self._pyexpr.strftime(fmt))\n\n def year(self) -> Expr:\n \"\"\"\n Extract year from underlying Date representation.\n Can be performed on Date and Datetime.\n\n Returns the year number in the calendar date.\n\n Returns\n -------\n Year as Int32\n \"\"\"\n return wrap_expr(self._pyexpr.year())\n\n def month(self) -> Expr:\n \"\"\"\n Extract month from underlying Date representation.\n Can be performed on Date and Datetime.\n\n Returns the month number starting from 1.\n The return value ranges from 1 to 12.\n\n Returns\n -------\n Month as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.month())\n\n def week(self) -> Expr:\n \"\"\"\n Extract the week from the underlying Date representation.\n Can be performed on Date and Datetime\n\n Returns the ISO week number starting from 1.\n The return value ranges from 1 to 53. (The last week of year differs by years.)\n\n Returns\n -------\n Week number as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.week())\n\n def weekday(self) -> Expr:\n \"\"\"\n Extract the week day from the underlying Date representation.\n Can be performed on Date and Datetime.\n\n Returns the weekday number where monday = 0 and sunday = 6\n\n Returns\n -------\n Week day as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.weekday())\n\n def day(self) -> Expr:\n \"\"\"\n Extract day from underlying Date representation.\n Can be performed on Date and Datetime.\n\n Returns the day of month starting from 1.\n The return value ranges from 1 to 31. (The last day of month differs by months.)\n\n Returns\n -------\n Day as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.day())\n\n def ordinal_day(self) -> Expr:\n \"\"\"\n Extract ordinal day from underlying Date representation.\n Can be performed on Date and Datetime.\n\n Returns the day of year starting from 1.\n The return value ranges from 1 to 366. (The last day of year differs by years.)\n\n Returns\n -------\n Day as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.ordinal_day())\n\n def hour(self) -> Expr:\n \"\"\"\n Extract hour from underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the hour number from 0 to 23.\n\n Returns\n -------\n Hour as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.hour())\n\n def minute(self) -> Expr:\n \"\"\"\n Extract minutes from underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the minute number from 0 to 59.\n\n Returns\n -------\n Minute as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.minute())\n\n def second(self) -> Expr:\n \"\"\"\n Extract seconds from underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the second number from 0 to 59.\n\n Returns\n -------\n Second as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.second())\n\n def nanosecond(self) -> Expr:\n \"\"\"\n Extract seconds from underlying DateTime representation.\n Can be performed on Datetime.\n\n Returns the number of nanoseconds since the whole non-leap second.\n The range from 1,000,000,000 to 1,999,999,999 represents the leap second.\n\n Returns\n -------\n Nanosecond as UInt32\n \"\"\"\n return wrap_expr(self._pyexpr.nanosecond())\n\n def to_python_datetime(self) -> Expr:\n \"\"\"\n Go from Date/Datetime to python DateTime objects\n \"\"\"\n return wrap_expr(self._pyexpr).map(\n lambda s: s.dt.to_python_datetime(), return_dtype=Object\n )\n\n def epoch_days(self) -> Expr:\n \"\"\"\n Get the number of days since the unix EPOCH.\n If the date is before the unix EPOCH, the number of days will be negative.\n\n Returns\n -------\n Days as Int32\n \"\"\"\n return wrap_expr(self._pyexpr).cast(Date).cast(Int32)\n\n def epoch_milliseconds(self) -> Expr:\n \"\"\"\n Get the number of milliseconds since the unix EPOCH\n If the date is before the unix EPOCH, the number of milliseconds will be negative.\n\n Returns\n -------\n Milliseconds as Int64\n \"\"\"\n return self.timestamp()\n\n def epoch_seconds(self) -> Expr:\n \"\"\"\n Get the number of seconds since the unix EPOCH\n If the date is before the unix EPOCH, the number of seconds will be negative.\n\n Returns\n -------\n Milliseconds as Int64\n \"\"\"\n return wrap_expr(self._pyexpr.dt_epoch_seconds())\n\n def timestamp(self) -> Expr:\n \"\"\"Return timestamp in milliseconds as Int64 type.\"\"\"\n return wrap_expr(self._pyexpr.timestamp())\n\n def with_time_unit(self, tu: str) -> Expr:\n \"\"\"\n Set time unit a Series of dtype Datetime or Duration. This does not modify underlying data,\n and should be used to fix an incorrect time unit.\n\n Parameters\n ----------\n tu\n Time unit for the `Datetime` Series: any of {\"ns\", \"us\", \"ms\"}\n \"\"\"\n return wrap_expr(self._pyexpr.dt_with_time_unit(tu))\n\n def cast_time_unit(self, tu: str) -> Expr:\n \"\"\"\n Cast the underlying data to another time unit. This may lose precision.\n\n Parameters\n ----------\n tu\n Time unit for the `Datetime` Series: any of {\"ns\", \"us\", \"ms\"}\n \"\"\"\n return wrap_expr(self._pyexpr.dt_cast_time_unit(tu))\n\n def and_time_unit(self, tu: str, dtype: Type[DataType] = Datetime) -> Expr:\n \"\"\"\n Set time unit a Series of type Datetime. This does not modify underlying data,\n and should be used to fix an incorrect time unit.\n\n ..deprecated::\n Use `with_time_unit`\n\n\n Parameters\n ----------\n tu\n Time unit for the `Datetime` Series: any of {\"ns\", \"us\", \"ms\"}\n dtype\n Output data type.\n \"\"\"\n return self.with_time_unit(tu)\n\n def and_time_zone(self, tz: Optional[str]) -> Expr:\n \"\"\"\n Set time zone for a Series of type Datetime.\n\n ..deprecated::\n Use `with_time_zone`\n\n Parameters\n ----------\n tz\n Time zone for the `Datetime` Series\n\n \"\"\"\n return wrap_expr(self._pyexpr).map(\n lambda s: s.dt.with_time_zone(tz), return_dtype=Datetime\n )\n\n def with_time_zone(self, tz: Optional[str]) -> Expr:\n \"\"\"\n Set time zone for a Series of type Datetime.\n\n Parameters\n ----------\n tz\n Time zone for the `Datetime` Series\n\n \"\"\"\n return wrap_expr(self._pyexpr).map(\n lambda s: s.dt.with_time_zone(tz), return_dtype=Datetime\n )\n\n def days(self) -> Expr:\n \"\"\"\n Extract the days from a Duration type.\n\n Returns\n -------\n A series of dtype Int64\n \"\"\"\n return wrap_expr(self._pyexpr.duration_days())\n\n def hours(self) -> Expr:\n \"\"\"\n Extract the hours from a Duration type.\n\n Returns\n -------\n A series of dtype Int64\n \"\"\"\n return wrap_expr(self._pyexpr.duration_hours())\n\n def seconds(self) -> Expr:\n \"\"\"\n Extract the seconds from a Duration type.\n\n Returns\n -------\n A series of dtype Int64\n \"\"\"\n return wrap_expr(self._pyexpr.duration_seconds())\n\n def milliseconds(self) -> Expr:\n \"\"\"\n Extract the milliseconds from a Duration type.\n\n Returns\n -------\n A series of dtype Int64\n \"\"\"\n return wrap_expr(self._pyexpr.duration_milliseconds())\n\n def nanoseconds(self) -> Expr:\n \"\"\"\n Extract the nanoseconds from a Duration type.\n\n Returns\n -------\n A series of dtype Int64\n \"\"\"\n return wrap_expr(self._pyexpr.duration_nanoseconds())\n\n\ndef expr_to_lit_or_expr(\n expr: Union[Expr, bool, int, float, str, \"pli.Series\"],\n str_to_lit: bool = True,\n) -> Expr:\n \"\"\"\n Helper function that converts args to expressions.\n\n Parameters\n ----------\n expr\n Any argument.\n str_to_lit\n If True string argument `\"foo\"` will be converted to `lit(\"foo\")`,\n If False it will be converted to `col(\"foo\")`\n\n Returns\n -------\n\n \"\"\"\n if isinstance(expr, str) and not str_to_lit:\n return pli.col(expr)\n elif (\n isinstance(expr, (int, float, str, pli.Series, datetime, date)) or expr is None\n ):\n return pli.lit(expr)\n elif isinstance(expr, Expr):\n return expr\n else:\n raise ValueError(\n f\"did not expect value {expr} of type {type(expr)}, maybe disambiguate with pl.lit or pl.col\"\n )\n\n\ndef _prepare_alpha(\n com: Optional[float] = None,\n span: Optional[float] = None,\n half_life: Optional[float] = None,\n alpha: Optional[float] = None,\n) -> float:\n if com is not None and alpha is None:\n assert com >= 0.0\n alpha = 1.0 / (1.0 + com)\n if span is not None and alpha is None:\n assert span >= 1.0\n alpha = 2.0 / (span + 1.0)\n if half_life is not None and alpha is None:\n assert half_life > 0.0\n alpha = 1.0 - np.exp(-np.log(2.0) / half_life)\n if alpha is None:\n raise ValueError(\"at least one of {com, span, half_life, alpha} should be set\")\n return alpha\n" ]
[ [ "numpy.arcsin", "numpy.arctan", "numpy.arccos", "numpy.cos", "numpy.exp", "numpy.log", "numpy.log10", "numpy.tan", "numpy.array", "numpy.sin", "numpy.random.randint" ] ]
lunarnautics/Stocksera
[ "09c114f588e95be28068af88c525565fdb98f92b" ]
[ "scheduled_tasks/government/get_retail_sales.py" ]
[ "import sqlite3\nimport pandas as pd\n\nconn = sqlite3.connect(r\"database/database.db\", check_same_thread=False)\ndb = conn.cursor()\n\n\ndef retail_sales():\n \"\"\"\n Get retail sales and compare it with avg monthly covid cases\n \"\"\"\n df = pd.read_html(\"https://ycharts.com/indicators/us_retail_and_food_services_sales\")\n # print(df[3]) next date\n combined_df = df[6][::-1].append(df[5][::-1])\n\n combined_df[\"Value\"] = combined_df[\"Value\"].str.replace(\"B\", \"\")\n combined_df[\"Value\"] = combined_df[\"Value\"].astype(float)\n\n combined_df[\"Percent Change\"] = combined_df[\"Value\"].shift(1)\n combined_df[\"Percent Change\"] = combined_df[\"Percent Change\"].astype(float)\n combined_df[\"Percent Change\"] = 100 * (combined_df[\"Value\"] - combined_df[\"Percent Change\"]) / combined_df[\"Percent Change\"]\n combined_df[\"Percent Change\"] = combined_df[\"Percent Change\"].round(2)\n\n combined_df[\"Date\"] = combined_df[\"Date\"].astype('datetime64[ns]').astype(str)\n\n covid_df = pd.read_csv(r\"C:\\Users\\Acer\\PycharmProjects\\StocksAnalysis\\database\\owid-covid-data.csv\")\n usa_df = covid_df[covid_df[\"iso_code\"] == \"USA\"]\n usa_df.index = pd.to_datetime(usa_df[\"date\"])\n usa_df = usa_df.groupby(pd.Grouper(freq=\"M\"))\n usa_df = usa_df.mean()[\"new_cases\"]\n usa_df = pd.DataFrame(usa_df)\n usa_df[\"new_cases\"] = usa_df[\"new_cases\"].round(2)\n usa_df.reset_index(inplace=True)\n usa_df[\"date\"] = usa_df[\"date\"].astype(str)\n usa_df.rename(columns={\"date\": \"Date\"}, inplace=True)\n combined_df = pd.merge(combined_df, usa_df, how='left', on='Date')\n combined_df.fillna(0, inplace=True)\n\n print(combined_df)\n for index, row in combined_df.iterrows():\n db.execute(\"INSERT OR IGNORE INTO retail_sales VALUES (?, ?, ?, ?)\", (row[0], row[1], row[2], row[3]))\n conn.commit()\n\n\nif __name__ == '__main__':\n retail_sales()\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.read_html", "pandas.to_datetime", "pandas.merge", "pandas.Grouper" ] ]
PyEst/PyEst
[ "4b7be6e29396605eb11fdfec7b409898ac49b9b9" ]
[ "source/PyEst.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nfrom tkinter import *\nimport tkinter\nimport tkinter.scrolledtext as tkst\nfrom tkinter.ttk import *\nfrom tkinter import ttk\nfrom tkinter.constants import END,HORIZONTAL, VERTICAL, NW, N, E, W, S, SUNKEN, LEFT, RIGHT, TOP, BOTH, YES, NE, X, RAISED, SUNKEN, DISABLED, NORMAL, CENTER, WORD\nimport tkinter.filedialog as fdlg\nfrom tkinter import messagebox as msb\nimport pandas as pd\nfrom pandas import ols\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n\n## Default\n#matplotlib.style.use('ggplot')\n\n###janela Principal\njanela = Tk()\n\n\"\"\"\n------------------------------------\nInicializando menu (cascata)\n------------------------------------\n\"\"\"\nmenubar = Menu(janela)\n\njanela.config(menu=menubar)\n\n##Menus - Quantidade\nfilemenu = Menu(menubar)\nfilemenu2 = Menu(menubar)\nfilemenu3 = Menu(menubar)\nfilemenu4 = Menu(menubar)\nfilemenu5 = Menu(menubar)\n\n###Autoria\nautoria = '\\n'+('==='*21)+\"\"\"\nAnálise realizada com PyEst\nSoftware desenvolvido por Jackson Osvaldo da Silva Braga\nGraduando em Engenharia Ambiental e Energias Renováveis\nmail: [email protected]\nfone: +5591991949964\n\"\"\"\n\n#####################\n# Definindo funções #\n#####################\ndef sair():\n sairQuestion = msb.askquestion(title='Sair', message='Você deseja sair?')\n if sairQuestion == \"yes\":\n janela.destroy()\n\n\ndef erro():\n msb.showerror(title='Erro', message='Não foi possível executar esta ação. Verique se o seu arquivo foi carregado corretamente.')\n\n\ndef exibir():\n exibir = msb.askquestion(title=\"Operação concluida\", message='Operação realizada com sucesso. Deseja exibir os dados na área de saída?')\n return exibir\n\n\ndef carregarArquivo():\n\n try:\n arquivoTratamento = str(fdlg.askopenfilename(defaultextension='.csv'))\n global arquivoTratamento\n\n def teste():\n with open(arquivoTratamento) as t:\n a = t.readline()\n return a\n \n if teste() == None:\n msb.showwarning('Aviso','Arquivo não carregado. Para executar as operações estatísticas você precisa carregar um arquivo csv.')\n \n else:\n \n if msb.askquestion(title='Sucesso', message='Seu arquivo foi carregado com sucesso. Deseja exibi-lo?') == \"yes\":\n exibir = pd.read_csv(arquivoTratamento)\n saida.insert(END, str(exibir)+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n \n except NameError:\n msb.showwarning('Aviso','Arquivo não carregado. Para executar as operações estatísticas você precisa carregar um arquivo csv.')\n except OSError:\n msb.showwarning('Aviso','Arquivo não carregado. Para executar as operações estatísticas você precisa carregar um arquivo csv.')\n except FileNotFoundError:\n msb.showwarning('Aviso','Arquivo não carregado. Para executar as operações estatísticas você precisa carregar um arquivo csv.')\n except AttributeError:\n msb.showwarning('Aviso','Arquivo não carregado. Para executar as operações estatísticas você precisa carregar um arquivo csv.')\n\ndef exibir():\n exibir = pd.read_csv(arquivoTratamento)\n saida.insert(END, str(exibir)+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n\ndef salvar():\n\n try:\n f = fdlg.asksaveasfile(mode='w', defaultextension = \".txt\")\n if f is None:\n return\n tex2Save = str(saida.get(1.0, END)+autoria)\n f.write(tex2Save)\n msb.showinfo(title='Salvar', message='Dados salvos com sucesso!')\n f.close()\n except NameError:\n erro()\n\n###Operações estatíticas - Medidas de Tendência Central\ndef resumo():\n try:\n resumo = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - RESUMO\\n'+('==='*15)+'\\n'+str(resumo.describe())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef media():\n try:\n tratarMedia = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - MEDIAS\\n'+('==='*15)+'\\n'+str(tratarMedia.mean())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef mediana():\n try:\n tratarMediana = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - MEDIANAS\\n'+('==='*15)+'\\n'+str(tratarMediana.median())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\ndef quantil():\n try:\n tratarQuantil = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - QUANTIS\\n'+('==='*15)+'\\n'+str(tratarQuantil.quantile())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef moda():\n try:\n tratarModa = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - MODAS\\n'+('==='*15)+'\\n'+str(tratarModa.mode())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\n\n\n###Operações estatíticas - Medidas de dispersão\ndef amplitude():\n try:\n tratarAmplitude = pd.read_csv(arquivoTratamento)\n saida.insert(END,(('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - AMPLITUDE\\n'+('==='*15)+'\\n'+('==='*15)+'\\n\\t\\t\\tMÁXIMOS\\n'+('==='*15)+'\\n'+str(tratarAmplitude.max())+'\\n'+('==='*15)+'\\n\\t\\t\\tMÍNIMOS\\n'+('==='*15)+'\\n'+str(tratarAmplitude.min())+'\\n'))\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef variancia():\n try:\n tratarVar = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - VARIÂNCIAS\\n'+('==='*15)+'\\n'+str(tratarVar.var())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\ndef desvioPadrao():\n try:\n tratarDesvPad = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\tANÁLISE CONCLUÍDA - DESVIO PADRÃO\\n'+('==='*15)+'\\n'+str(tratarDesvPad.std())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef desvAbsoluto():\n try:\n tratarDesvAbs = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\tANÁLISE CONCLUÍDA - DESVIO ABSOLUTO\\n'+('==='*15)+'\\n'+str(tratarDesvAbs.mad())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef covar():\n try:\n tratarCov = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\t\\tANÁLISE CONCLUÍDA - COVARIÂNCIA\\n'+('==='*15)+'\\n'+str(tratarCov.cov())+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef Corr():\n try:\n tratarCorr = pd.read_csv(arquivoTratamento)\n saida.insert(END, ('==='*15)+'\\n\\tANÁLISE CONCLUÍDA - CORRELAÇÃO (pearson)\\n'+('==='*15)+'\\n'+str(tratarCorr.corr(method='pearson'))+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n except NameError:\n erro()\n except OSError:\n erro()\n\ndef Regressão():\n\n try:\n\n arquivoTratamento\n\n try:\n\n reg = Tk()\n\n def gerarRegressao():\n\n \"\"\"\n --- Carregar arquivo/DB e, após isso, converter o DB em um Data Frame.\n \n db = pd.read_csv()\n dbFrame = pd.DataFrame()\n\n \"\"\"\n\n db = pd.read_csv(arquivoTratamento)\n dbFrame = pd.DataFrame(db)\n\n model = model = ols(y=dbFrame['{}'.format(eixoY.get())], x=dbFrame['{}'.format(eixoX.get())])\n \n saida.insert(END, ('==='*15)+'\\n\\tANÁLISE CONCLUÍDA - REGRESSÃO LINEAR\\n'+('==='*15)+'\\n'+str(model)+'\\n')\n msb.showinfo(title='Concluido', message='Operação realizada com sucesso!')\n\n \n ##Box variáveis\n colunas = ttk.Combobox(reg)\n colunas['font'] = ('12')\n\n #modelo = Label(reg, text='Modelo: {}'.format(), font='12')\n\n ## Eixo x e Label Eixo x\n eixoX = ttk.Combobox(reg)\n eixoX['font'] = ('12')\n lEixoX = Label(reg, text='Eixo X', font='12')\n\n ## Eixo Y e Label Eixo Y\n eixoY = ttk.Combobox(reg)\n eixoY['font'] = ('12')\n lEixoY = Label(reg, text='Eixo Y', font='12')\n\n ## Gerar Análise\n gerarAnalise = Button(reg, text='Gerar Análise', comman = gerarRegressao)\n\n ## Rodando wid's\n lEixoX.pack()\n eixoX.pack()\n lEixoY.pack()\n eixoY.pack()\n gerarAnalise.pack()\n\n # Lendo linhas do arquivo e selecionando variáveis\n a = open(str(arquivoTratamento),'r')\n i = 0\n for linha in a:\n linha = linha.strip()\n i = i + 1\n b = []\n col = []\n col = linha.split(sep=',')\n if i == 1:\n colunas['values'] = ['Todas']+col\n eixoX['values'] = col\n eixoY['values'] = col\n break\n \n reg.resizable(0,0)\n reg.title('Regressão Linear')\n reg.mainloop()\n\n except NameError:\n erro()\n except OSError:\n erro()\n \n except NameError:\n erro()\n except OSError:\n erro()\n\n\n\n###Configurando exibição dos gráficos \ndef graph():\n\n \n try:\n arquivoTratamento\n \n try:\n \n graficos = Tk()\n\n ## Definindo função para criação dos gráficos\n def gerar():\n\n plotGraph = pd.read_csv(arquivoTratamento) \n\n try:\n if tiposGra.get() == 'cumsum' and colunas.get() == 'Todas':\n plotGraph.cumsum()\n plotGraph.plot()\n plt.show()\n else:\n if tiposGra.get() == 'cumsum' and colunas.get() != 'Todas':\n msb.showerror(title='Erro', message='Para exibir este tipo de gráfico, você deve selecionar a opção todas, em variáveis.')\n \n if tiposGra.get() == 'bar' and colunas.get() != None and colunas.get() != 'Todas':\n plotGraph[colunas.get()].plot(kind='bar', title=str(colunas.get()))\n plt.show()\n else:\n if tiposGra.get() == 'bar' and colunas.get() == 'Todas':\n plotGraph.plot(kind='bar')\n # plotGraph.plot(kind='bar', stacked=True)\n \n plt.show()\n if tiposGra.get() == 'bar-stacked' and colunas.get() != None and colunas.get() == 'Todas':\n plotGraph.plot(kind='bar', stacked=True)\n plt.show()\n\n else:\n if tiposGra.get() == 'bar-stacked' and colunas.get() != None and colunas.get() != 'Todas':\n msb.showerror(title='Erro', message='Para exibir este tipo de gráfico, você deve selecionar a opção todas, em variáveis.')\n \n \n if tiposGra.get() == 'barh-stacked' and colunas.get() != None and colunas.get() == 'Todas':\n plotGraph.plot(kind='barh', stacked=True)\n plt.show()\n\n else:\n if tiposGra.get() == 'barh-stacked' and colunas.get() != None and colunas.get() != 'Todas':\n msb.showerror(title='Erro', message='Para exibir este tipo de gráfico, você deve selecionar a opção todas, em variáveis.')\n\n if tiposGra.get() == 'hist' and colunas.get() != None and colunas.get() == 'Todas':\n plotGraph.plot(kind='hist')\n plotGraph.diff().hist(color='k',alpha=0.5)\n plt.show()\n else:\n if tiposGra.get() == 'hist' and colunas.get() != None and colunas.get() != 'Todas':\n plotGraph[colunas.get()].plot(kind='hist', title=colunas.get())\n plt.show()\n \n if tiposGra.get() == 'box' and colunas.get() != None and colunas.get() != 'Todas':\n plotGraph[colunas.get()].plot(kind='box', title=colunas.get())\n plt.show()\n else:\n if tiposGra.get() == 'box' and colunas.get() != None and colunas.get() == 'Todas':\n plotGraph.plot(kind='box')\n plt.show()\n \n if tiposGra.get() == 'scatter' and eixoX.get() != None and eixoY.get() != None:\n plotGraph.plot(kind='scatter', x = eixoX.get(), y = eixoY.get())\n plt.show()\n\n else:\n if tiposGra.get() == 'scatter' and eixoX.get() == None and eixoY.get() == None:\n msb.showerror(title='Erro', message='Para exibir este tipo de gráfico, você deve uma opção para o Eixo X e outra para o Eixo Y.')\n\n if tiposGra.get() == 'kde' and colunas.get() != None and colunas.get() == 'Todas':\n from pandas.tools.plotting import scatter_matrix\n scatter_matrix(plotGraph, alpha=0.2, figsize=(6, 6), diagonal='kde')\n plt.show()\n else:\n if tiposGra.get() == 'kde' and colunas.get() != None and colunas.get() != 'Todas':\n plotGraph[colunas.get()].plot(kind='kde',title=colunas.get())\n plt.show()\n except KeyError:\n msb.showerror(title='Erro', message='Selecione os valores correspondentes ao tipo do gráfico. Apenas assim poderá exibi-lo.')\n except NameError:\n msb.showerror(title='Erro', message='Selecione os valores correspondentes ao tipo do gráfico. Apenas assim poderá exibi-lo.')\n\n ##Box variáveis\n colunas = ttk.Combobox(graficos)\n colunas['font'] = ('12')\n\n ## Label Variáveis\n var = Label(graficos, text='Variáveis',font='12')\n\n ##Box tipo de gráfico\n tiposGra = ttk.Combobox(graficos)\n tiposGra['font'] = ('12')\n tiposGra['values'] = ('cumsum','bar','bar-stacked','barh-stacked', 'hist', 'box','scatter','kde')\n ## Label tipo de gráfico\n textTposGraf = Label(graficos, text='Tipo de Gráfico', font='12')\n\n \n ## Eixo x\n eixoX = ttk.Combobox(graficos)\n eixoX['font'] = ('12')\n ## Label Eixo X\n lEixoX = Label(graficos, text='Eixo X', font='12')\n \n ## Eixo y\n eixoY = ttk.Combobox(graficos)\n eixoY['font'] = ('12')\n ## Label Eixo X\n lEixoY = Label(graficos, text='Eixo Y', font='12')\n\n ## Botão para gerar gráfico\n plotar = Button(graficos, text='Plotar', comman = gerar)\n \n ## Rodando wid's\n var.pack()\n colunas.pack()\n textTposGraf.pack()\n tiposGra.pack()\n lEixoX.pack()\n eixoX.pack()\n lEixoY.pack()\n eixoY.pack()\n plotar.pack()\n\n a = open(str(arquivoTratamento),'r')\n i = 0\n for linha in a:\n linha = linha.strip()\n i = i + 1\n b = []\n col = []\n col = linha.split(sep=',')\n if i == 1:\n colunas['values'] = ['Todas']+col\n eixoX['values'] = col\n eixoY['values'] = col\n break\n \n\n graficos.resizable(0,0)\n graficos.title('Gráficos')\n graficos.mainloop()\n except NameError:\n erro()\n except FileNotFoundError:\n erro()\n except OSError:\n erro()\n except NameError:\n erro()\n except FileNotFoundError:\n \terro()\n except OSError:\n erro() \n\n###Configurando Aba Ajuda\ndef doc():\n msb._show(title='Documentação', message='Você pode ter acesso a documentação do PyEst no site da aplicação, o mesmo que você usou para fazer o download do arquivo executável.')\n\ndef formatoCSV():\n msb._show(title='Formato do Arquivo CSV para análise', message=\"\"\"\n O arquivo csv para análise não deve conter acentos gráficos, nem espaços entre os nomes dos parâmetros. A primeira linha deve conter apenas os nomes das colunas. Nas demais linhas, seguem-se os dados para análise.\"\"\"\n )\n\ndef sobre():\n msb._show(title='Sobre', message=\"\"\"\n O PyEst é uma aplicação desenvolvida em Python (3.5) por Jackson Osvaldo da Silva Braga. A proposta da aplicação é ser útil em diversas áreas, dinamizando o tratamento e análise de dados, sejam eles ambientais ou não.\"\"\"\n )\n\n## Menu 01 - Arquivo\nmenubar.add_cascade(label='Arquivo',font='12', menu=filemenu)\nmenubar.add_separator()\nfilemenu.add_command(label='Carregar Arquivo',font='12', command = carregarArquivo)\nfilemenu.add_command(label='Resumo do Banco de Dados',font='12', command = resumo)\nfilemenu.add_command(label='Gráficos',font='12',command=graph)\nfilemenu.add_command(label='Salvar',font='12', command=salvar)\nfilemenu.add_separator()\nfilemenu.add_command(label='Sair',font='12', command=sair)\n\n## Menu 02 - Medidas de Tendência Central\nmenubar.add_cascade(label = 'Medidas de Tendência Central',font='12', menu=filemenu2)\nfilemenu2.add_command(label='Média',font='12', command = media)\nfilemenu2.add_command(label='Mediana',font='12', command = mediana)\nfilemenu2.add_command(label='Quantil',font='12', command=quantil)\nfilemenu2.add_command(label='Moda',font='12', command=moda)\n\n\n##Menu 03 - Medidas de Dispersão\nmenubar.add_cascade(label = 'Medidas de Dispersão',font='12', menu=filemenu3)\nfilemenu3.add_command(label='Amplitude',font='12', command = amplitude)\nfilemenu3.add_command(label='Variância',font='12',command=variancia)\nfilemenu3.add_command(label='Desvio Padrão',font='12',command=desvioPadrao)\nfilemenu3.add_command(label='Desvio Absoluto',font='12',command=desvAbsoluto)\nfilemenu3.add_command(label='Covariância',font='12',command=covar)\nfilemenu3.add_command(label='Correlação',font='12',command=Corr)\n\n\n##Menu 04 - Ajuda e Documentação\nmenubar.add_cascade(label = 'Inferência Estatística',font='12', menu=filemenu5)\nfilemenu5.add_command(label='Regressão Linear',font='12', command=Regressão)\n\n##Menu 05 - Ajuda e Documentação\nmenubar.add_cascade(label = 'Ajuda',font='12', menu=filemenu4)\nfilemenu4.add_command(label='Documentação',font='12', command=doc)\nfilemenu4.add_command(label='Formatação do arquivo csv para análise',font='12',command=formatoCSV)\nfilemenu4.add_command(label='Sobre',font='12', command=sobre)\n\n\"-----------------------------------\"\n\n### Configurando janela de saida das análises\nsaida = tkst.ScrolledText(master = janela,wrap= WORD,width = 20,height = 10)\nsaida.pack(padx=10, pady=10, fill=BOTH, expand=True)\n\n\njanela.title('PyEst - Desenvolvido por Jackson Osvaldo da Silva Braga')\n\n\njanela.geometry('{}x{}'.format(janela.winfo_screenwidth(),janela.winfo_screenheight()))\njanela.mainloop()\n" ]
[ [ "pandas.read_csv", "matplotlib.pyplot.show", "pandas.DataFrame", "pandas.tools.plotting.scatter_matrix" ] ]
microsoft/ai-edu
[ "2f59fa4d3cf19f14e0b291e907d89664bcdc8df3" ]
[ "实践案例/B15-基于深度学习的代码搜索案例/src/3Model/codenn.py" ]
[ "# Copyright (c) Microsoft. All rights reserved.\n# Licensed under the MIT license. See LICENSE file in the project root for full license information.\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.optim import Adam\nfrom tensorboardX import SummaryWriter\nimport spacy\nfrom typing import Any\n\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nfrom helper.file import load_json, load_pickle, save_pickle\nfrom joint_embedder import get_model, codenn_search\nfrom dataset import get_load_dataset\nfrom retrieval import eval_retrieval\nfrom data_parallel import get_data_parallel\nfrom args import get_codenn_argparser\nfrom helper.running_log import RunningLog\nfrom helper.helper import load_epoch\nfrom helper.web_search import run_web_search\nfrom helper.extractor import unescape\n\n\ndef normalize(data):\n \"\"\"normalize matrix by rows\"\"\"\n normalized_data = data / np.linalg.norm(data, axis=1) \\\n .reshape((data.shape[0], 1))\n return normalized_data\n\n\nif __name__ == '__main__':\n parser = get_codenn_argparser()\n args = parser.parse_args()\n running_log = RunningLog(args.model_path)\n running_log.set('parameters', vars(args))\n os.makedirs(args.model_path, exist_ok=True)\n assert args.dataset_path is not None or args.task in ['search'], \\\n '%s task requires dataset' % args.task\n assert args.load > 0 or args.task in ['train'], \\\n \"it's nonsense to %s on an untrained model\" % args.task\n dataset_statistics = load_json(\n os.path.join(args.dataset_path, 'statistics.json'))\n model = get_data_parallel(get_model(dataset_statistics, args), args.gpu)\n if args.gpu:\n if -1 in args.gpu:\n deviceids = 'cuda'\n else:\n deviceids = 'cuda:%d' % args.gpu[0]\n else:\n deviceids = 'cpu'\n device = torch.device(deviceids)\n load_dataset = get_load_dataset(args)\n optimizer_state_dict = None\n if args.load > 0:\n model_state_dict, optimizer_state_dict = \\\n load_epoch(args.model_path, args.load)\n model.load_state_dict(model_state_dict)\n model.to(device)\n running_log.set('state', 'interrupted')\n if args.task == 'train':\n train_data_loader = DataLoader(load_dataset('train'),\n batch_size=args.batch_size,\n shuffle=True, drop_last=True)\n valid_data_loader = None\n optimizer = Adam(model.parameters(), lr=args.learning_rate)\n if optimizer_state_dict is not None:\n optimizer.load_state_dict(optimizer_state_dict)\n writer = SummaryWriter(comment=args.comment or\n os.path.basename(args.model_path))\n step = 0\n for epoch in tqdm(range(args.load + 1, args.epoch + 1), desc='Epoch'):\n losses = []\n for iter, data in enumerate(tqdm(train_data_loader, desc='Iter'),\n 1):\n data = [x.to(device) for x in data]\n loss = model(*data).mean()\n losses.append(loss.item())\n writer.add_scalar('train/loss', loss.item(), step)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if iter % args.log_every_iter == 0 or iter == len(train_data_loader)-1:\n # noinspection PyStringFormat\n tqdm.write('epoch:[%d/%d] iter:[%d/%d] Loss=%.5f' %\n (epoch, args.epoch, iter, len(train_data_loader),\n np.mean(losses)))\n losses = []\n step += 1\n if args.valid_every_epoch and epoch % args.valid_every_epoch == 0:\n if valid_data_loader is None:\n valid_data_loader = DataLoader(\n load_dataset('valid'), batch_size=args.eval_pool_size,\n shuffle=True, drop_last=True)\n model.eval()\n acc, mrr, map, ndcg = eval_retrieval(model, valid_data_loader,\n device,\n args.eval_pool_size,\n args.eval_k)\n tqdm.write('ACC=%f, MRR=%f, MAP=%f, nDCG=%f' %\n (acc, mrr, map, ndcg))\n writer.add_scalar('eval/acc', acc, epoch)\n writer.add_scalar('eval/mrr', mrr, epoch)\n writer.add_scalar('eval/map', map, epoch)\n writer.add_scalar('eval/ndcg', ndcg, epoch)\n model.train()\n if args.save_every_epoch and epoch % args.save_every_epoch == 0:\n tqdm.write('saving to epoch.%04d.pth' % epoch)\n torch.save((model.state_dict(), optimizer.state_dict()),\n os.path.join(args.model_path,\n 'epoch.%04d.pth' % epoch))\n elif args.task in ['valid', 'test']:\n model.eval()\n eval_data_loader = DataLoader(load_dataset(args.task),\n batch_size=args.eval_pool_size,\n shuffle=True, drop_last=True)\n print('ACC=%f, MRR=%f, MAP=%f, nDCG=%f' % eval_retrieval(\n model, eval_data_loader, device, args.eval_pool_size, args.eval_k))\n elif args.task == 'repr':\n model.eval()\n repr_data_loader = DataLoader(load_dataset('total'),\n batch_size=args.batch_size,\n shuffle=False, drop_last=False)\n vecs = None\n for data in tqdm(repr_data_loader, desc='Repr'):\n data = [x.to(device) for x in data]\n reprs = model.forward_code(*data).data.cpu().numpy()\n vecs = reprs if vecs is None else np.concatenate((vecs, reprs), 0)\n vecs = normalize(vecs)\n print('saving codes to use.codes.pkl')\n save_pickle(vecs, os.path.join(args.model_path, 'use.codes.pkl'))\n elif args.task == 'search':\n model.eval()\n reprs = load_pickle(os.path.join(args.model_path, 'use.codes.pkl'))\n code = pd.read_csv(os.path.join(args.dataset_path, 'use.codemap.csv'))\n assert reprs.shape[0] == code.shape[0], 'Broken data'\n word2code = load_pickle(os.path.join(args.dataset_path,\n 'word2code.desc.pkl'))\n nlp = spacy.load('en_core_web_lg')\n while True:\n try:\n query = input('> ')\n except EOFError:\n break\n idx = codenn_search(query, model, word2code, reprs, nlp, device,\n args.search_top_n)\n for i in idx:\n record = code.loc[i]\n if 'code' in record.index:\n print('========')\n print(unescape(record['code']))\n else:\n print('==== %s:%d ====' % (record['file'], record['start']))\n start = record['start'] - 1\n end = record['end'] if 'end' in record else start + 10\n with open(record['file']) as f:\n print(''.join(f.readlines()[start:end]).strip())\n elif args.task == 'serve':\n model.eval()\n reprs = load_pickle(os.path.join(args.model_path, 'use.codes.pkl'))\n code = pd.read_csv(os.path.join(args.dataset_path, 'use.codemap.csv'), na_filter=False)\n assert reprs.shape[0] == code.shape[0], 'Broken data'\n word2code = load_pickle(os.path.join(args.dataset_path,\n 'word2code.desc.pkl'))\n nlp = spacy.load('en_core_web_lg')\n\n def handler(query: str, count: int) -> Any:\n results = []\n idx = codenn_search(query, model, word2code, reprs, nlp, device,\n count)\n for i in idx:\n record = code.loc[i]\n if 'code' in record.index:\n if 'url' in record.index:\n results.append({\n 'code': unescape(record['code']),\n 'url': record['url']\n })\n else:\n results.append({\n 'code': unescape(record['code'])\n })\n else:\n start = record['start'] - 1\n end = record['end'] if 'end' in record else start + 10\n with open(record['file']) as f:\n results.append({\n 'code': ''.join(f.readlines()[start:end]).strip()\n })\n return results\n run_web_search(args.host, args.port, handler)\n running_log.set('state', 'succeeded')\n" ]
[ [ "numpy.concatenate", "numpy.linalg.norm", "torch.device", "numpy.mean" ] ]
NancyFulda/towards-neural-programming-interfaces
[ "21b467af56848c4fc8642fb0412f9f8d1b7718a2" ]
[ "src/npi/models/classifiers.py" ]
[ "import torch.nn as nn\n\nclass StyleClassifier(nn.Module): # classifies NPI outputs\n def __init__(self, n=200, m=768, k=1):\n \"\"\"\n input_activs_shape: tuple of (b, n, m, 1)\n b is the number of batches\n n x m x 1 slices contain the elements of the original activations, flattened into a 2D array\n target_label: tuple of (b, 1, m, 1)\n the desired label for the predicted activations, as passed into the NPI network\n \"\"\"\n super(StyleClassifier, self).__init__()\n\n print(\"Classifier INIT\", flush=True)\n self.n = n\n self.m = m\n self.k = k\n self.N = self.n * self.m\n\n fact1 = 2 ** 4\n fact2 = 2 ** 5\n fact3 = 2 ** 6\n\n print(\"Defining classifier model\", flush=True)\n\n self.model = nn.Sequential(\n nn.Linear(self.n * self.m * self.k, self.n // fact1),\n nn.ReLU(),\n nn.Linear(self.n // fact1, self.n // fact2),\n nn.ReLU(),\n nn.Linear(self.n // fact2, self.n // fact3),\n nn.ReLU(),\n nn.Linear(self.n // fact3, 1),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n return self.model(x.view(-1, self.n * self.m * self.k))\n\n\nclass Discriminator(nn.Module): # classifies NPI outputs\n def __init__(self, input_activs_shape, input_targ_shape):\n \"\"\"\n input_activs_shape: tuple of (n, m, 1)\n n x m x 1 slices contain the elements of the original activations, flattened into a 2D array\n target_label: tuple of (b, 1, m, 1)\n the desired label for the predicted activations, as passed into the NPI network\n \"\"\"\n super(Discriminator, self).__init__()\n\n print(\"GenerationClassifier INIT\")\n self.n = input_activs_shape[0]\n self.m = input_activs_shape[1]\n self.k = input_activs_shape[2]\n\n self.l = 1\n\n fact1 = 2 ** 3\n fact2 = 2 ** 4\n fact3 = 2 ** 5\n\n print(\"Defining GenerationClassifier model\")\n\n self.layer1 = nn.Sequential(nn.Linear(self.n * self.m * self.k, self.n // fact1),\n nn.ReLU())\n self.layer2 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),\n nn.ReLU())\n self.layer3 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact2),\n nn.ReLU())\n self.layer4 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact2),\n nn.ReLU())\n self.layer5 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact3),\n nn.ReLU())\n self.layer6 = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),\n nn.ReLU())\n self.layer7 = nn.Sequential(nn.Linear(self.n // fact3, self.l * self.k),\n nn.Sigmoid())\n\n def forward(self, x):\n metadata = {'ordered_hidden_activations': [], 'final_out_preview': None, 'final_out_returned': None}\n\n out1 = self.layer1(x.view(-1, self.n * self.m * self.k))\n out2 = self.layer2(out1)\n out3 = self.layer3(out2)\n out4 = self.layer4(out3)\n out5 = self.layer5(out4)\n out6 = self.layer6(out5)\n final_out = self.layer7(out6)\n\n # metadata['ordered_hidden_activations'] = [out1.detach().data.cpu().numpy(),\n # out2.detach().data.cpu().numpy(), \n # out3.detach().data.cpu().numpy(), \n # out4.detach().data.cpu().numpy(), \n # out5.detach().data.cpu().numpy(), \n # out6.detach().data.cpu().numpy(), \n # ]\n # metadata['final_out_preview'] = final_out.detach().data.cpu().numpy()\n # metadata['final_out_returned'] = final_out.view(-1, 1, self.l, self.k).detach().data.cpu().numpy()\n return final_out.view(-1, 1, self.l, self.k) # , metadata\n\nclass ContentClassifier(nn.Module): # classifies NPI outputs\n def __init__(self, input_activs_shape, input_targ_shape):\n raise NotImplementedError(\"Content classifier should be pre-trained\")\n \"\"\"\n input_activs_shape: tuple of (b, n, m, 1)\n b is the number of batches\n n x m x 1 slices contain the elements of the original activations, flattened into a 2D array\n \"\"\"\n super(ContentClassifier, self).__init__()\n\n print(\"ContentClassifier INIT\")\n self.b = input_activs_shape[0]\n self.n = input_activs_shape[1]\n self.m = input_activs_shape[2]\n self.k = input_activs_shape[3]\n\n self.l = 1 # input_targ_shape[2]\n\n fact1 = 2 ** 3\n fact2 = 2 ** 3\n fact3 = 2 ** 3\n\n print(\"Defining ContentClassifier model\")\n self.linear1 = nn.Sequential(nn.Linear(self.n * self.m * self.k, self.n // fact1),\n nn.ReLU())\n self.linear1Post = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),\n nn.ReLU())\n self.linear2 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),\n nn.ReLU())\n self.linear3 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact2),\n nn.ReLU())\n self.linear4 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact2),\n nn.ReLU())\n self.linear5 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact3),\n nn.ReLU())\n self.linear6 = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),\n nn.ReLU())\n self.linear7Pre = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),\n nn.ReLU())\n self.linear7 = nn.Sequential(nn.Linear(self.n // fact3, 1 * self.l * self.k),\n nn.Sigmoid())\n\n def forward(self, x):\n metadata = {'ordered_hidden_activations': [], 'final_out_preview': None, 'final_out_returned': None}\n out1 = self.linear1(x.view(-1, self.n * self.m * self.k))\n out1Post = self.linear1Post(out1)\n out2 = self.linear2(out1Post)\n out3 = self.linear3(out2)\n out4 = self.linear4(out3)\n out5 = self.linear5(out4)\n out6 = self.linear6(out5)\n out7Pre = self.linear7Pre(out6)\n final_out = self.linear7(out6)\n\n metadata['ordered_hidden_activations'] = [out1.detach().data.cpu().numpy(),\n out1Post.detach().data.cpu().numpy(),\n out2.detach().data.cpu().numpy(),\n out3.detach().data.cpu().numpy(),\n out4.detach().data.cpu().numpy(),\n out5.detach().data.cpu().numpy(),\n out6.detach().data.cpu().numpy(),\n out7Pre.detach().data.cpu().numpy(),\n ]\n metadata['final_out_preview'] = final_out.detach().data.cpu().numpy()\n metadata['final_out_returned'] = final_out.view(-1, 1, self.l, self.k).detach().data.cpu().numpy()\n return final_out.view(-1, 1, self.l, self.k), metadata\n" ]
[ [ "torch.nn.ReLU", "torch.nn.Sigmoid", "torch.nn.Linear" ] ]
phamhuyhoang97/spark
[ "6a2452fb5cd776dc1f292704e6b86bbec0ff24e7" ]
[ "python/pyspark/pandas/typedef/typehints.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUtilities to deal with types. This is mostly focused on python3.\n\"\"\"\nimport datetime\nimport decimal\nimport sys\nimport typing\nfrom collections import Iterable\nfrom distutils.version import LooseVersion\nfrom inspect import getfullargspec, isclass\nfrom typing import ( # noqa: F401\n Any,\n Callable,\n Generic,\n List,\n Optional,\n Tuple,\n Union,\n Type,\n)\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype, pandas_dtype\nfrom pandas.api.extensions import ExtensionDtype\n\ntry:\n from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype\n\n extension_dtypes_available = True\n extension_dtypes = (Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype) # type: Tuple\n\n try:\n from pandas import BooleanDtype, StringDtype\n\n extension_object_dtypes_available = True\n extension_dtypes += (BooleanDtype, StringDtype)\n except ImportError:\n extension_object_dtypes_available = False\n\n try:\n from pandas import Float32Dtype, Float64Dtype\n\n extension_float_dtypes_available = True\n extension_dtypes += (Float32Dtype, Float64Dtype)\n except ImportError:\n extension_float_dtypes_available = False\n\nexcept ImportError:\n extension_dtypes_available = False\n extension_object_dtypes_available = False\n extension_float_dtypes_available = False\n extension_dtypes = ()\n\nimport pyarrow as pa\nimport pyspark.sql.types as types\nfrom pyspark.sql.pandas.types import to_arrow_type, from_arrow_type\n\nfrom pyspark import pandas as ps # For running doctests and reference resolution in PyCharm.\nfrom pyspark.pandas._typing import Dtype, T\nfrom pyspark.pandas.typedef.string_typehints import resolve_string_type_hint\n\nif typing.TYPE_CHECKING:\n from pyspark.pandas.internal import InternalField\n\n\n# A column of data, with the data type.\nclass SeriesType(Generic[T]):\n def __init__(self, dtype: Dtype, spark_type: types.DataType):\n self.dtype = dtype\n self.spark_type = spark_type\n\n def __repr__(self) -> str:\n return \"SeriesType[{}]\".format(self.spark_type)\n\n\nclass DataFrameType(object):\n def __init__(\n self,\n index_fields: List[\"InternalField\"],\n data_fields: List[\"InternalField\"],\n ):\n self.index_fields = index_fields\n self.data_fields = data_fields\n self.fields = index_fields + data_fields\n\n @property\n def dtypes(self) -> List[Dtype]:\n return [field.dtype for field in self.fields]\n\n @property\n def spark_type(self) -> types.StructType:\n return types.StructType([field.struct_field for field in self.fields])\n\n def __repr__(self) -> str:\n return \"DataFrameType[{}]\".format(self.spark_type)\n\n\n# The type is a scalar type that is furthermore understood by Spark.\nclass ScalarType(object):\n def __init__(self, dtype: Dtype, spark_type: types.DataType):\n self.dtype = dtype\n self.spark_type = spark_type\n\n def __repr__(self) -> str:\n return \"ScalarType[{}]\".format(self.spark_type)\n\n\n# The type is left unspecified or we do not know about this type.\nclass UnknownType(object):\n def __init__(self, tpe: Any):\n self.tpe = tpe\n\n def __repr__(self) -> str:\n return \"UnknownType[{}]\".format(self.tpe)\n\n\nclass IndexNameTypeHolder(object):\n name = None\n tpe = None\n short_name = \"IndexNameType\"\n\n\nclass NameTypeHolder(object):\n name = None\n tpe = None\n short_name = \"NameType\"\n\n\ndef as_spark_type(\n tpe: Union[str, type, Dtype], *, raise_error: bool = True, prefer_timestamp_ntz: bool = False\n) -> types.DataType:\n \"\"\"\n Given a Python type, returns the equivalent spark type.\n Accepts:\n - the built-in types in Python\n - the built-in types in numpy\n - list of pairs of (field_name, type)\n - dictionaries of field_name -> type\n - Python3's typing system\n \"\"\"\n # For NumPy typing, NumPy version should be 1.21+ and Python version should be 3.8+\n if sys.version_info >= (3, 8) and LooseVersion(np.__version__) >= LooseVersion(\"1.21\"):\n if (\n hasattr(tpe, \"__origin__\")\n and tpe.__origin__ is np.ndarray # type: ignore[union-attr]\n and hasattr(tpe, \"__args__\")\n and len(tpe.__args__) > 1 # type: ignore[union-attr]\n ):\n # numpy.typing.NDArray\n return types.ArrayType(\n as_spark_type(\n tpe.__args__[1].__args__[0], raise_error=raise_error # type: ignore[union-attr]\n )\n )\n\n if isinstance(tpe, np.dtype) and tpe == np.dtype(\"object\"):\n pass\n # ArrayType\n elif tpe in (np.ndarray,):\n return types.ArrayType(types.StringType())\n elif hasattr(tpe, \"__origin__\") and issubclass(\n tpe.__origin__, list # type: ignore[union-attr]\n ):\n element_type = as_spark_type(\n tpe.__args__[0], raise_error=raise_error # type: ignore[union-attr]\n )\n if element_type is None:\n return None\n return types.ArrayType(element_type)\n # BinaryType\n elif tpe in (bytes, np.character, np.bytes_, np.string_):\n return types.BinaryType()\n # BooleanType\n elif tpe in (bool, np.bool_, \"bool\", \"?\"):\n return types.BooleanType()\n # DateType\n elif tpe in (datetime.date,):\n return types.DateType()\n # NumericType\n elif tpe in (np.int8, np.byte, \"int8\", \"byte\", \"b\"):\n return types.ByteType()\n elif tpe in (decimal.Decimal,):\n # TODO: considering about the precision & scale for decimal type.\n return types.DecimalType(38, 18)\n elif tpe in (float, np.float_, np.float64, \"float\", \"float64\", \"double\"):\n return types.DoubleType()\n elif tpe in (np.float32, \"float32\", \"f\"):\n return types.FloatType()\n elif tpe in (np.int32, \"int32\", \"i\"):\n return types.IntegerType()\n elif tpe in (int, np.int64, \"int\", \"int64\", \"long\"):\n return types.LongType()\n elif tpe in (np.int16, \"int16\", \"short\"):\n return types.ShortType()\n # StringType\n elif tpe in (str, np.unicode_, \"str\", \"U\"):\n return types.StringType()\n # TimestampType or TimestampNTZType if timezone is not specified.\n elif tpe in (datetime.datetime, np.datetime64, \"datetime64[ns]\", \"M\"):\n return types.TimestampNTZType() if prefer_timestamp_ntz else types.TimestampType()\n\n # categorical types\n elif isinstance(tpe, CategoricalDtype) or (isinstance(tpe, str) and type == \"category\"):\n return types.LongType()\n\n # extension types\n elif extension_dtypes_available:\n # IntegralType\n if isinstance(tpe, Int8Dtype) or (isinstance(tpe, str) and tpe == \"Int8\"):\n return types.ByteType()\n elif isinstance(tpe, Int16Dtype) or (isinstance(tpe, str) and tpe == \"Int16\"):\n return types.ShortType()\n elif isinstance(tpe, Int32Dtype) or (isinstance(tpe, str) and tpe == \"Int32\"):\n return types.IntegerType()\n elif isinstance(tpe, Int64Dtype) or (isinstance(tpe, str) and tpe == \"Int64\"):\n return types.LongType()\n\n if extension_object_dtypes_available:\n # BooleanType\n if isinstance(tpe, BooleanDtype) or (isinstance(tpe, str) and tpe == \"boolean\"):\n return types.BooleanType()\n # StringType\n elif isinstance(tpe, StringDtype) or (isinstance(tpe, str) and tpe == \"string\"):\n return types.StringType()\n\n if extension_float_dtypes_available:\n # FractionalType\n if isinstance(tpe, Float32Dtype) or (isinstance(tpe, str) and tpe == \"Float32\"):\n return types.FloatType()\n elif isinstance(tpe, Float64Dtype) or (isinstance(tpe, str) and tpe == \"Float64\"):\n return types.DoubleType()\n\n if raise_error:\n raise TypeError(\"Type %s was not understood.\" % tpe)\n else:\n return None\n\n\ndef spark_type_to_pandas_dtype(\n spark_type: types.DataType, *, use_extension_dtypes: bool = False\n) -> Dtype:\n \"\"\"Return the given Spark DataType to pandas dtype.\"\"\"\n\n if use_extension_dtypes and extension_dtypes_available:\n # IntegralType\n if isinstance(spark_type, types.ByteType):\n return Int8Dtype()\n elif isinstance(spark_type, types.ShortType):\n return Int16Dtype()\n elif isinstance(spark_type, types.IntegerType):\n return Int32Dtype()\n elif isinstance(spark_type, types.LongType):\n return Int64Dtype()\n\n if extension_object_dtypes_available:\n # BooleanType\n if isinstance(spark_type, types.BooleanType):\n return BooleanDtype()\n # StringType\n elif isinstance(spark_type, types.StringType):\n return StringDtype()\n\n # FractionalType\n if extension_float_dtypes_available:\n if isinstance(spark_type, types.FloatType):\n return Float32Dtype()\n elif isinstance(spark_type, types.DoubleType):\n return Float64Dtype()\n\n if isinstance(\n spark_type,\n (\n types.DateType,\n types.NullType,\n types.ArrayType,\n types.MapType,\n types.StructType,\n types.UserDefinedType,\n ),\n ):\n return np.dtype(\"object\")\n elif isinstance(spark_type, types.TimestampType):\n return np.dtype(\"datetime64[ns]\")\n else:\n return np.dtype(to_arrow_type(spark_type).to_pandas_dtype())\n\n\ndef pandas_on_spark_type(tpe: Union[str, type, Dtype]) -> Tuple[Dtype, types.DataType]:\n \"\"\"\n Convert input into a pandas only dtype object or a numpy dtype object,\n and its corresponding Spark DataType.\n\n Parameters\n ----------\n tpe : object to be converted\n\n Returns\n -------\n tuple of np.dtype or a pandas dtype, and Spark DataType\n\n Raises\n ------\n TypeError if not a dtype\n\n Examples\n --------\n >>> pandas_on_spark_type(int)\n (dtype('int64'), LongType)\n >>> pandas_on_spark_type(str)\n (dtype('<U'), StringType)\n >>> pandas_on_spark_type(datetime.date)\n (dtype('O'), DateType)\n >>> pandas_on_spark_type(datetime.datetime)\n (dtype('<M8[ns]'), TimestampType)\n >>> pandas_on_spark_type(List[bool])\n (dtype('O'), ArrayType(BooleanType,true))\n \"\"\"\n try:\n dtype = pandas_dtype(tpe)\n spark_type = as_spark_type(dtype)\n except TypeError:\n spark_type = as_spark_type(tpe)\n dtype = spark_type_to_pandas_dtype(spark_type)\n return dtype, spark_type\n\n\ndef infer_pd_series_spark_type(\n pser: pd.Series, dtype: Dtype, prefer_timestamp_ntz: bool = False\n) -> types.DataType:\n \"\"\"Infer Spark DataType from pandas Series dtype.\n\n :param pser: :class:`pandas.Series` to be inferred\n :param dtype: the Series' dtype\n :param prefer_timestamp_ntz: if true, infers datetime without timezone as\n TimestampNTZType type. If false, infers it as TimestampType.\n :return: the inferred Spark data type\n \"\"\"\n if dtype == np.dtype(\"object\"):\n if len(pser) == 0 or pser.isnull().all():\n return types.NullType()\n elif hasattr(pser.iloc[0], \"__UDT__\"):\n return pser.iloc[0].__UDT__\n else:\n return from_arrow_type(pa.Array.from_pandas(pser).type, prefer_timestamp_ntz)\n elif isinstance(dtype, CategoricalDtype):\n if isinstance(pser.dtype, CategoricalDtype):\n return as_spark_type(pser.cat.codes.dtype, prefer_timestamp_ntz=prefer_timestamp_ntz)\n else:\n # `pser` must already be converted to codes.\n return as_spark_type(pser.dtype, prefer_timestamp_ntz=prefer_timestamp_ntz)\n else:\n return as_spark_type(dtype, prefer_timestamp_ntz=prefer_timestamp_ntz)\n\n\ndef infer_return_type(f: Callable) -> Union[SeriesType, DataFrameType, ScalarType, UnknownType]:\n \"\"\"\n Infer the return type from the return type annotation of the given function.\n\n The returned type class indicates both dtypes (a pandas only dtype object\n or a numpy dtype object) and its corresponding Spark DataType.\n\n >>> def func() -> int:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtype\n dtype('int64')\n >>> inferred.spark_type\n LongType\n\n >>> def func() -> ps.Series[int]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtype\n dtype('int64')\n >>> inferred.spark_type\n LongType\n\n >>> def func() -> ps.DataFrame[np.float, str]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64'), dtype('<U')]\n >>> inferred.spark_type\n StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))\n\n >>> def func() -> ps.DataFrame[np.float]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64')]\n >>> inferred.spark_type\n StructType(List(StructField(c0,DoubleType,true)))\n\n >>> def func() -> 'int':\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtype\n dtype('int64')\n >>> inferred.spark_type\n LongType\n\n >>> def func() -> 'ps.Series[int]':\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtype\n dtype('int64')\n >>> inferred.spark_type\n LongType\n\n >>> def func() -> 'ps.DataFrame[np.float, str]':\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64'), dtype('<U')]\n >>> inferred.spark_type\n StructType(List(StructField(c0,DoubleType,true),StructField(c1,StringType,true)))\n\n >>> def func() -> 'ps.DataFrame[np.float]':\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64')]\n >>> inferred.spark_type\n StructType(List(StructField(c0,DoubleType,true)))\n\n >>> def func() -> ps.DataFrame['a': np.float, 'b': int]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64'), dtype('int64')]\n >>> inferred.spark_type\n StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))\n\n >>> def func() -> \"ps.DataFrame['a': np.float, 'b': int]\":\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('float64'), dtype('int64')]\n >>> inferred.spark_type\n StructType(List(StructField(a,DoubleType,true),StructField(b,LongType,true)))\n\n >>> pdf = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]})\n >>> def func() -> ps.DataFrame[pdf.dtypes]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64')]\n >>> inferred.spark_type\n StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))\n\n >>> pdf = pd.DataFrame({\"a\": [1, 2, 3], \"b\": [3, 4, 5]})\n >>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64')]\n >>> inferred.spark_type\n StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))\n\n >>> pdf = pd.DataFrame({(\"x\", \"a\"): [1, 2, 3], (\"y\", \"b\"): [3, 4, 5]})\n >>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64')]\n >>> inferred.spark_type\n StructType(List(StructField((x, a),LongType,true),StructField((y, b),LongType,true)))\n\n >>> pdf = pd.DataFrame({\"a\": [1, 2, 3], \"b\": pd.Categorical([3, 4, 5])})\n >>> def func() -> ps.DataFrame[pdf.dtypes]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]\n >>> inferred.spark_type\n StructType(List(StructField(c0,LongType,true),StructField(c1,LongType,true)))\n\n >>> def func() -> ps.DataFrame[zip(pdf.columns, pdf.dtypes)]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]\n >>> inferred.spark_type\n StructType(List(StructField(a,LongType,true),StructField(b,LongType,true)))\n\n >>> def func() -> ps.Series[pdf.b.dtype]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtype\n CategoricalDtype(categories=[3, 4, 5], ordered=False)\n >>> inferred.spark_type\n LongType\n\n >>> def func() -> ps.DataFrame[int, [int, int]]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64'), dtype('int64')]\n >>> inferred.spark_type.simpleString()\n 'struct<__index_level_0__:bigint,c0:bigint,c1:bigint>'\n >>> inferred.index_fields\n [InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,true))]\n\n >>> def func() -> ps.DataFrame[pdf.index.dtype, pdf.dtypes]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]\n >>> inferred.spark_type.simpleString()\n 'struct<__index_level_0__:bigint,c0:bigint,c1:bigint>'\n >>> inferred.index_fields\n [InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,true))]\n\n >>> def func() -> ps.DataFrame[\n ... (\"index\", CategoricalDtype(categories=[3, 4, 5], ordered=False)),\n ... [(\"id\", int), (\"A\", int)]]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [CategoricalDtype(categories=[3, 4, 5], ordered=False), dtype('int64'), dtype('int64')]\n >>> inferred.spark_type.simpleString()\n 'struct<index:bigint,id:bigint,A:bigint>'\n >>> inferred.index_fields\n [InternalField(dtype=category,struct_field=StructField(index,LongType,true))]\n\n >>> def func() -> ps.DataFrame[\n ... (pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]:\n ... pass\n >>> inferred = infer_return_type(func)\n >>> inferred.dtypes\n [dtype('int64'), dtype('int64'), CategoricalDtype(categories=[3, 4, 5], ordered=False)]\n >>> inferred.spark_type.simpleString()\n 'struct<__index_level_0__:bigint,a:bigint,b:bigint>'\n >>> inferred.index_fields\n [InternalField(dtype=int64,struct_field=StructField(__index_level_0__,LongType,true))]\n \"\"\"\n # We should re-import to make sure the class 'SeriesType' is not treated as a class\n # within this module locally. See Series.__class_getitem__ which imports this class\n # canonically.\n from pyspark.pandas.internal import InternalField, SPARK_INDEX_NAME_FORMAT\n from pyspark.pandas.typedef import SeriesType, NameTypeHolder, IndexNameTypeHolder\n from pyspark.pandas.utils import name_like_string\n\n spec = getfullargspec(f)\n tpe = spec.annotations.get(\"return\", None)\n if isinstance(tpe, str):\n # This type hint can happen when given hints are string to avoid forward reference.\n tpe = resolve_string_type_hint(tpe)\n\n if hasattr(tpe, \"__origin__\") and (\n tpe.__origin__ == ps.DataFrame or tpe.__origin__ == ps.Series\n ):\n # When Python version is lower then 3.7. Unwrap it to a Tuple/SeriesType type hints.\n tpe = tpe.__args__[0]\n\n if hasattr(tpe, \"__origin__\") and issubclass(tpe.__origin__, SeriesType):\n tpe = tpe.__args__[0]\n if issubclass(tpe, NameTypeHolder):\n tpe = tpe.tpe\n dtype, spark_type = pandas_on_spark_type(tpe)\n return SeriesType(dtype, spark_type)\n\n # Note that, DataFrame type hints will create a Tuple.\n # Python 3.6 has `__name__`. Python 3.7 and 3.8 have `_name`.\n # Check if the name is Tuple.\n name = getattr(tpe, \"_name\", getattr(tpe, \"__name__\", None))\n if name == \"Tuple\":\n tuple_type = tpe\n if hasattr(tuple_type, \"__tuple_params__\"):\n # Python 3.5.0 to 3.5.2 has '__tuple_params__' instead.\n # See https://github.com/python/cpython/blob/v3.5.2/Lib/typing.py\n parameters = getattr(tuple_type, \"__tuple_params__\")\n else:\n parameters = getattr(tuple_type, \"__args__\")\n\n index_parameters = [\n p for p in parameters if isclass(p) and issubclass(p, IndexNameTypeHolder)\n ]\n data_parameters = [p for p in parameters if p not in index_parameters]\n assert len(data_parameters) > 0, \"Type hints for data must not be empty.\"\n\n index_fields = []\n if len(index_parameters) >= 1:\n for level, index_parameter in enumerate(index_parameters):\n index_name = index_parameter.name\n index_dtype, index_spark_type = pandas_on_spark_type(index_parameter.tpe)\n index_fields.append(\n InternalField(\n dtype=index_dtype,\n struct_field=types.StructField(\n name=index_name\n if index_name is not None\n else SPARK_INDEX_NAME_FORMAT(level),\n dataType=index_spark_type,\n ),\n )\n )\n else:\n # No type hint for index.\n assert len(index_parameters) == 0\n\n data_dtypes, data_spark_types = zip(\n *(\n pandas_on_spark_type(p.tpe)\n if isclass(p) and issubclass(p, NameTypeHolder)\n else pandas_on_spark_type(p)\n for p in data_parameters\n )\n )\n data_names = [\n p.name if isclass(p) and issubclass(p, NameTypeHolder) else None\n for p in data_parameters\n ]\n data_fields = []\n for i, (data_name, data_dtype, data_spark_type) in enumerate(\n zip(data_names, data_dtypes, data_spark_types)\n ):\n data_fields.append(\n InternalField(\n dtype=data_dtype,\n struct_field=types.StructField(\n name=name_like_string(data_name) if data_name is not None else (\"c%s\" % i),\n dataType=data_spark_type,\n ),\n )\n )\n\n return DataFrameType(index_fields=index_fields, data_fields=data_fields)\n\n tpes = pandas_on_spark_type(tpe)\n if tpes is None:\n return UnknownType(tpe)\n else:\n return ScalarType(*tpes)\n\n\n# TODO: once pandas exposes a typing module like numpy.typing, we should deprecate\n# this logic and migrate to it with implementing the typing module in pandas API on Spark.\n\n\ndef create_type_for_series_type(param: Any) -> Type[SeriesType]:\n \"\"\"\n Supported syntax:\n\n >>> str(ps.Series[float]).endswith(\"SeriesType[float]\")\n True\n \"\"\"\n from pyspark.pandas.typedef import NameTypeHolder\n\n if isinstance(param, ExtensionDtype):\n new_class = type(\n NameTypeHolder.short_name, (NameTypeHolder,), {}\n ) # type: Type[NameTypeHolder]\n new_class.tpe = param\n else:\n new_class = param.type if isinstance(param, np.dtype) else param\n\n return SeriesType[new_class] # type: ignore[valid-type]\n\n\n# TODO: Remove this variadic-generic hack by tuple once ww drop Python up to 3.9.\n# See also PEP 646. One problem is that pandas doesn't inherits Generic[T]\n# so we might have to leave this hack only for monkey-patching pandas DataFrame.\ndef create_tuple_for_frame_type(params: Any) -> object:\n \"\"\"\n This is a workaround to support variadic generic in DataFrame.\n\n See https://github.com/python/typing/issues/193\n we always wraps the given type hints by a tuple to mimic the variadic generic.\n\n Supported syntax:\n\n >>> import pandas as pd\n >>> pdf = pd.DataFrame({'a': range(1)})\n\n Typing data columns only:\n\n >>> ps.DataFrame[float, float] # doctest: +ELLIPSIS\n typing.Tuple[...NameType, ...NameType]\n >>> ps.DataFrame[pdf.dtypes] # doctest: +ELLIPSIS\n typing.Tuple[...NameType]\n >>> ps.DataFrame[\"id\": int, \"A\": int] # doctest: +ELLIPSIS\n typing.Tuple[...NameType, ...NameType]\n >>> ps.DataFrame[zip(pdf.columns, pdf.dtypes)] # doctest: +ELLIPSIS\n typing.Tuple[...NameType]\n\n Typing data columns with an index:\n\n >>> ps.DataFrame[int, [int, int]] # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType, ...NameType]\n >>> ps.DataFrame[pdf.index.dtype, pdf.dtypes] # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType]\n >>> ps.DataFrame[(\"index\", int), [(\"id\", int), (\"A\", int)]] # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType, ...NameType]\n >>> ps.DataFrame[(pdf.index.name, pdf.index.dtype), zip(pdf.columns, pdf.dtypes)]\n ... # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType]\n\n Typing data columns with an Multi-index:\n >>> arrays = [[1, 1, 2], ['red', 'blue', 'red']]\n >>> idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))\n >>> pdf = pd.DataFrame({'a': range(3)}, index=idx)\n >>> ps.DataFrame[[int, int], [int, int]] # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...IndexNameType, ...NameType, ...NameType]\n >>> ps.DataFrame[pdf.index.dtypes, pdf.dtypes] # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType]\n >>> ps.DataFrame[[(\"index-1\", int), (\"index-2\", int)], [(\"id\", int), (\"A\", int)]]\n ... # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...IndexNameType, ...NameType, ...NameType]\n >>> ps.DataFrame[zip(pdf.index.names, pdf.index.dtypes), zip(pdf.columns, pdf.dtypes)]\n ... # doctest: +ELLIPSIS\n typing.Tuple[...IndexNameType, ...NameType]\n \"\"\"\n return Tuple[_to_type_holders(params)]\n\n\ndef _to_type_holders(params: Any) -> Tuple:\n from pyspark.pandas.typedef import NameTypeHolder, IndexNameTypeHolder\n\n is_with_index = (\n isinstance(params, tuple)\n and len(params) == 2\n and isinstance(params[1], (zip, list, pd.Series))\n )\n\n if is_with_index:\n # With index\n # DataFrame[index_type, [type, ...]]\n # DataFrame[dtype instance, dtypes instance]\n # DataFrame[[index_type, ...], [type, ...]]\n # DataFrame[dtypes instance, dtypes instance]\n # DataFrame[(index_name, index_type), [(name, type), ...]]\n # DataFrame[(index_name, index_type), zip(names, types)]\n # DataFrame[[(index_name, index_type), ...], [(name, type), ...]]\n # DataFrame[zip(index_names, index_types), zip(names, types)]\n def is_list_of_pairs(p: Any) -> bool:\n return (\n isinstance(p, list)\n and len(p) >= 1\n and all(isinstance(param, tuple) and (len(param) == 2) for param in p)\n )\n\n index_params = params[0]\n if isinstance(index_params, tuple) and len(index_params) == 2:\n # DataFrame[(\"index\", int), ...]\n index_params = [index_params]\n\n if is_list_of_pairs(index_params):\n # DataFrame[[(\"index\", int), (\"index-2\", int)], ...]\n index_params = tuple(slice(name, tpe) for name, tpe in index_params)\n\n index_types = _new_type_holders(index_params, IndexNameTypeHolder)\n\n data_types = params[1]\n if is_list_of_pairs(data_types):\n # DataFrame[..., [(\"id\", int), (\"A\", int)]]\n data_types = tuple(slice(*data_type) for data_type in data_types)\n\n data_types = _new_type_holders(data_types, NameTypeHolder)\n\n return index_types + data_types\n else:\n # Without index\n # DataFrame[type, type, ...]\n # DataFrame[name: type, name: type, ...]\n # DataFrame[dtypes instance]\n # DataFrame[zip(names, types)]\n return _new_type_holders(params, NameTypeHolder)\n\n\ndef _new_type_holders(\n params: Any, holder_clazz: Type[Union[NameTypeHolder, IndexNameTypeHolder]]\n) -> Tuple:\n if isinstance(params, zip):\n # DataFrame[zip(names, types)]\n params = tuple(slice(name, tpe) for name, tpe in params) # type: ignore[misc, has-type]\n\n if isinstance(params, Iterable):\n # DataFrame[type, type, ...]\n # DataFrame[name: type, name: type, ...]\n # DataFrame[dtypes instance]\n params = tuple(params)\n else:\n # DataFrame[type, type]\n # DataFrame[name: type]\n params = (params,)\n\n is_named_params = all(\n isinstance(param, slice) and param.step is None and param.stop is not None\n for param in params\n )\n is_unnamed_params = all(\n not isinstance(param, slice) and not isinstance(param, Iterable) for param in params\n )\n\n if is_named_params:\n # DataFrame[\"id\": int, \"A\": int]\n new_params = []\n for param in params:\n new_param = type(\n holder_clazz.short_name, (holder_clazz,), {}\n ) # type: Type[Union[NameTypeHolder, IndexNameTypeHolder]]\n new_param.name = param.start\n if isinstance(param.stop, ExtensionDtype):\n new_param.tpe = param.stop\n else:\n # When the given argument is a numpy's dtype instance.\n new_param.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop\n new_params.append(new_param)\n return tuple(new_params)\n elif is_unnamed_params:\n # DataFrame[float, float]\n new_types = []\n for param in params:\n new_type = type(\n holder_clazz.short_name, (holder_clazz,), {}\n ) # type: Type[Union[NameTypeHolder, IndexNameTypeHolder]]\n if isinstance(param, ExtensionDtype):\n new_type.tpe = param\n else:\n new_type.tpe = param.type if isinstance(param, np.dtype) else param\n new_types.append(new_type)\n return tuple(new_types)\n else:\n raise TypeError(\n \"\"\"Type hints should be specified as one of:\n - DataFrame[type, type, ...]\n - DataFrame[name: type, name: type, ...]\n - DataFrame[dtypes instance]\n - DataFrame[zip(names, types)]\n - DataFrame[index_type, [type, ...]]\n - DataFrame[(index_name, index_type), [(name, type), ...]]\n - DataFrame[dtype instance, dtypes instance]\n - DataFrame[(index_name, index_type), zip(names, types)]\n - DataFrame[[index_type, ...], [type, ...]]\n - DataFrame[[(index_name, index_type), ...], [(name, type), ...]]\n - DataFrame[dtypes instance, dtypes instance]\n - DataFrame[zip(index_names, index_types), zip(names, types)]\\n\"\"\"\n + \"However, got %s.\" % str(params)\n )\n\n\ndef _test() -> None:\n import doctest\n import sys\n import pyspark.pandas.typedef.typehints\n\n globs = pyspark.pandas.typedef.typehints.__dict__.copy()\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.typedef.typehints,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "pandas.Int64Dtype", "numpy.dtype", "pandas.Float32Dtype", "pandas.Float64Dtype", "pandas.Int32Dtype", "pandas.StringDtype", "pandas.BooleanDtype", "pandas.Int8Dtype", "pandas.Int16Dtype", "pandas.api.types.pandas_dtype" ] ]
nipunagarwala/cs273b_final_project
[ "9d816cb562feb741ebf883bc03d399b42fb2622f" ]
[ "cae_layers.py" ]
[ "import tensorflow as tf\nimport numpy as np\nfrom cae_input_brain import *\nfrom utils import *\n\n\n# Define custom API for creating and adding layers to NN Model\n# Wrapper around Tensorflow API, for ease of use and readibility\n\nclass Layers(object):\n\n def __init__(self):\n self.stdDev = 0.35\n\n ''' Initializes the weights based on the std dev set in the constructor\n\n '''\n def init_weights(self, shape):\n return tf.Variable(tf.random_normal(shape, stddev=self.stdDev))\n\n def createVariables(self, train, data_list, batch_size, dimensions):\n # train: Boolean\n # data_list: Path of a file containing a list of all binary data file paths\n # batch_size: int\n p_keep_conv = tf.placeholder(\"float\")\n keys, X_image, X_data, Y = inputs(train, data_list, batch_size, dimensions)\n return keys, X_image, X_data, Y, p_keep_conv\n\n\n def dropout(self, prev_layer, p_keep):\n next_layer = tf.nn.dropout(prev_layer, p_keep)\n return next_layer\n\n def sigmoid(self, prev_layer):\n next_layer = tf.sigmoid(prev_layer)\n return next_layer\n\n def batch_norm(self, prev_layer, axes, beta_shape,scale_shape, var_eps = 1e-6):\n mu, sigma = tf.nn.moments(prev_layer, axes)\n beta = self.init_weights(beta_shape)\n scale = self.init_weights(scale_shape)\n next_layer = tf.nn.batch_normalization(prev_layer, mu, sigma, beta, scale, var_eps)\n return next_layer\n\n def fcLayer(self, prev_layer, wshape, sigmoid=True, batch_norm=False):\n wOut = self.init_weights(wshape)\n b = self.init_weights([wshape[1]])\n next_layer = tf.add(tf.matmul(prev_layer, wOut), b)\n if batch_norm:\n next_layer = self.batch_norm(next_layer,[0],[wshape[1]],[wshape[1]] )\n if sigmoid:\n next_layer = self.sigmoid(next_layer)\n\n\n return next_layer, wOut\n\n def cost_function(self, model_output, Y, op='square'):\n cost = None\n if op == 'square':\n cost = tf.reduce_mean(tf.square(tf.sub(model_output,Y)))\n elif op == 'cross-entropy':\n cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, Y))\n elif op == 'softmax':\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(model_output, Y))\n\n return cost\n\n def minimization_function(self, cost, learning_rate, beta1, beta2, opt='Rmsprop'):\n train_op = None\n if opt == 'Rmsprop':\n train_op = tf.train.RMSPropOptimizer(learning_rate, beta1).minimize(cost)\n elif opt == 'adam':\n train_op = tf.train.AdamOptimizer(learning_rate, beta1, beta2).minimize(cost)\n elif opt == 'adagrad':\n train_op = tf.train.AdagradOptimizer(learning_rate, initial_accumulator_value=0.1).minimize(cost)\n\n return train_op\n\n def add_regularization(self, loss, wgt, lmbda, rho, op='kl'):\n nextLoss = None\n if op == 'l2':\n nextLoss = tf.add(loss, tf.mul(lmbda,tf.nn.l2_loss(wgt)))\n elif op == 'kl':\n nextLoss = tf.add(loss, tf.mul(lmbda, self.kl_sparse_regularization(wgt, lmbda, rho)))\n return nextLoss\n\n def kl_sparse_regularization(self, wgt, lmbda, rho):\n rho_hat = tf.reduce_mean(wgt)\n invrho = tf.sub(tf.constant(1.), rho)\n invrhohat = tf.sub(tf.constant(1.), rho_hat)\n logrho = tf.add(tf.abs(self.logfunc(rho,rho_hat)), tf.abs(self.logfunc(invrho, invrhohat)))\n return logrho\n\n def logfunc(self, x1, x2):\n clippDiv = tf.clip_by_value(tf.div(x1,x2),1e-12,1e10)\n return tf.mul( x1,tf.log(clippDiv))\n\n\n def prediction(self, model_output):\n predict_op = tf.argmax(model_output, 1)\n return predict_op\n\n\nclass CNNLayers(Layers):\n\n ''' Constructor for the ConvolutionalNN class. Initializes the\n std dev for the distributions used for weight initializations\n '''\n def __init__(self):\n Layers.__init__(self)\n self.stdDev = 0.35\n\n def init_weights(self, shape):\n return tf.Variable(tf.random_normal(shape, stddev=self.stdDev))\n\n def conv_layer(self, prev_layer_out, w_shape, layer_stride, w_name, num_dim = '2d', padding='SAME',if_relu = True, batchNorm = True):\n w_conv = tf.Variable(tf.random_normal(w_shape, stddev=self.stdDev),\n name=w_name)\n\n numFilters = w_shape[len(w_shape)-1]\n b = tf.Variable(tf.random_normal([numFilters], stddev=self.stdDev))\n\n nextLayer = None\n if num_dim == '3d':\n nextLayer = tf.add(tf.nn.conv3d(prev_layer_out, w_conv,\n strides=layer_stride, padding=padding,name=w_name),b)\n else:\n nextLayer = tf.add(tf.nn.conv2d(prev_layer_out, w_conv,\n strides=layer_stride, padding=padding,name=w_name),b)\n\n if batchNorm:\n nextLayer = self.batch_norm(nextLayer, [0,1,2,3], [numFilters], [numFilters])\n\n if if_relu:\n nextLayer = self.relu(nextLayer)\n\n\n return nextLayer, w_conv\n\n\n def deconv_layer(self, prev_layer_out, filter_shape, out_shape, layer_stride, w_name, num_dim = '2d',padding='SAME', if_relu = True, batchNorm = True):\n w_deconv = tf.Variable(tf.random_normal(filter_shape, stddev=self.stdDev),\n name=w_name)\n\n\n numFilters =filter_shape[len(filter_shape)-2]\n b = tf.Variable(tf.random_normal([numFilters], stddev=self.stdDev))\n\n nextLayer = None\n\n if num_dim == '3d':\n nextLayer = tf.add(tf.nn.conv3d_transpose(prev_layer_out, w_deconv, out_shape,\n strides=layer_stride, padding=padding),b)\n else:\n nextLayer = tf.add(tf.nn.conv2d_transpose(prev_layer_out, w_deconv, out_shape,\n strides=layer_stride, padding=padding),b)\n\n if batchNorm:\n nextLayer = self.batch_norm(nextLayer, [0,1,2,3], [numFilters], [numFilters])\n\n if if_relu:\n nextLayer = self.relu(nextLayer)\n\n\n return nextLayer, w_deconv\n\n def pool(self, prev_layer, window_size, str_size, poolType = 'max'):\n next_layer = None\n if poolType == 'max':\n next_layer = tf.nn.max_pool3d(prev_layer, ksize=window_size,\n strides=str_size, padding='SAME')\n elif poolType == 'avg':\n next_layer = tf.nn.avg_pool3d(prev_layer, ksize=window_size,\n strides=str_size, padding='SAME')\n\n return next_layer\n\n def relu(self, prev_layer):\n next_layer = tf.nn.relu(prev_layer)\n return next_layer\n\n def lrelu(x, leak=0.2, name=\"lrelu\"):\n return tf.maximum(x, leak*x)\n\n\n def residual_unit(self, input_layer, output_layer):\n res = input_layer + output_layer\n return res\n" ]
[ [ "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.nn.avg_pool3d", "tensorflow.sigmoid", "tensorflow.nn.l2_loss", "tensorflow.matmul", "tensorflow.random_normal", "tensorflow.nn.dropout", "tensorflow.sub", "tensorflow.constant", "tensorflow.nn.sigmoid_cross_entropy_with_logits", "tensorflow.nn.max_pool3d", "tensorflow.nn.conv2d_transpose", "tensorflow.nn.batch_normalization", "tensorflow.train.RMSPropOptimizer", "tensorflow.nn.moments", "tensorflow.placeholder", "tensorflow.div", "tensorflow.train.AdamOptimizer", "tensorflow.reduce_mean", "tensorflow.nn.conv3d", "tensorflow.nn.conv2d", "tensorflow.train.AdagradOptimizer", "tensorflow.argmax", "tensorflow.log", "tensorflow.nn.relu", "tensorflow.maximum", "tensorflow.nn.conv3d_transpose" ] ]
ahojnnes/numpy
[ "55b766d549dc7a9cd417de001a2f7ba8d445579f" ]
[ "numpy/distutils/system_info.py" ]
[ "#!/bin/env python\n\"\"\"\nThis file defines a set of system_info classes for getting\ninformation about various resources (libraries, library directories,\ninclude directories, etc.) in the system. Currently, the following\nclasses are available:\n\n atlas_info\n atlas_threads_info\n atlas_blas_info\n atlas_blas_threads_info\n lapack_atlas_info\n lapack_atlas_threads_info\n atlas_3_10_info\n atlas_3_10_threads_info\n atlas_3_10_blas_info,\n atlas_3_10_blas_threads_info,\n lapack_atlas_3_10_info\n lapack_atlas_3_10_threads_info\n blas_info\n lapack_info\n openblas_info\n blis_info\n blas_opt_info # usage recommended\n lapack_opt_info # usage recommended\n fftw_info,dfftw_info,sfftw_info\n fftw_threads_info,dfftw_threads_info,sfftw_threads_info\n djbfft_info\n x11_info\n lapack_src_info\n blas_src_info\n numpy_info\n numarray_info\n numpy_info\n boost_python_info\n agg2_info\n wx_info\n gdk_pixbuf_xlib_2_info\n gdk_pixbuf_2_info\n gdk_x11_2_info\n gtkp_x11_2_info\n gtkp_2_info\n xft_info\n freetype2_info\n umfpack_info\n\nUsage:\n info_dict = get_info(<name>)\n where <name> is a string 'atlas','x11','fftw','lapack','blas',\n 'lapack_src', 'blas_src', etc. For a complete list of allowed names,\n see the definition of get_info() function below.\n\n Returned info_dict is a dictionary which is compatible with\n distutils.setup keyword arguments. If info_dict == {}, then the\n asked resource is not available (system_info could not find it).\n\n Several *_info classes specify an environment variable to specify\n the locations of software. When setting the corresponding environment\n variable to 'None' then the software will be ignored, even when it\n is available in system.\n\nGlobal parameters:\n system_info.search_static_first - search static libraries (.a)\n in precedence to shared ones (.so, .sl) if enabled.\n system_info.verbosity - output the results to stdout if enabled.\n\nThe file 'site.cfg' is looked for in\n\n1) Directory of main setup.py file being run.\n2) Home directory of user running the setup.py file as ~/.numpy-site.cfg\n3) System wide directory (location of this file...)\n\nThe first one found is used to get system configuration options The\nformat is that used by ConfigParser (i.e., Windows .INI style). The\nsection ALL has options that are the default for each section. The\navailable sections are fftw, atlas, and x11. Appropriate defaults are\nused if nothing is specified.\n\nThe order of finding the locations of resources is the following:\n 1. environment variable\n 2. section in site.cfg\n 3. ALL section in site.cfg\nOnly the first complete match is returned.\n\nExample:\n----------\n[ALL]\nlibrary_dirs = /usr/lib:/usr/local/lib:/opt/lib\ninclude_dirs = /usr/include:/usr/local/include:/opt/include\nsrc_dirs = /usr/local/src:/opt/src\n# search static libraries (.a) in preference to shared ones (.so)\nsearch_static_first = 0\n\n[fftw]\nfftw_libs = rfftw, fftw\nfftw_opt_libs = rfftw_threaded, fftw_threaded\n# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs\n\n[atlas]\nlibrary_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas\n# for overriding the names of the atlas libraries\natlas_libs = lapack, f77blas, cblas, atlas\n\n[x11]\nlibrary_dirs = /usr/X11R6/lib\ninclude_dirs = /usr/X11R6/include\n----------\n\nAuthors:\n Pearu Peterson <[email protected]>, February 2002\n David M. Cooke <[email protected]>, April 2002\n\nCopyright 2002 Pearu Peterson all rights reserved,\nPearu Peterson <[email protected]>\nPermission to use, modify, and distribute this software is given under the\nterms of the NumPy (BSD style) license. See LICENSE.txt that came with\nthis distribution for specifics.\n\nNO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport sys\nimport os\nimport re\nimport copy\nimport warnings\nfrom glob import glob\nfrom functools import reduce\nif sys.version_info[0] < 3:\n from ConfigParser import NoOptionError\n from ConfigParser import RawConfigParser as ConfigParser\nelse:\n from configparser import NoOptionError\n from configparser import RawConfigParser as ConfigParser\n# It seems that some people are importing ConfigParser from here so is\n# good to keep its class name. Use of RawConfigParser is needed in\n# order to be able to load path names with percent in them, like\n# `feature%2Fcool` which is common on git flow branch names.\n\nfrom distutils.errors import DistutilsError\nfrom distutils.dist import Distribution\nimport distutils.sysconfig\nfrom distutils import log\nfrom distutils.util import get_platform\n\nfrom numpy.distutils.exec_command import \\\n find_executable, exec_command, get_pythonexe\nfrom numpy.distutils.misc_util import is_sequence, is_string, \\\n get_shared_lib_extension\nfrom numpy.distutils.command.config import config as cmd_config\nfrom numpy.distutils.compat import get_exception\nimport distutils.ccompiler\nimport tempfile\nimport shutil\n\n\n# Determine number of bits\nimport platform\n_bits = {'32bit': 32, '64bit': 64}\nplatform_bits = _bits[platform.architecture()[0]]\n\n\ndef libpaths(paths, bits):\n \"\"\"Return a list of library paths valid on 32 or 64 bit systems.\n\n Inputs:\n paths : sequence\n A sequence of strings (typically paths)\n bits : int\n An integer, the only valid values are 32 or 64. A ValueError exception\n is raised otherwise.\n\n Examples:\n\n Consider a list of directories\n >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib']\n\n For a 32-bit platform, this is already valid:\n >>> np.distutils.system_info.libpaths(paths,32)\n ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib']\n\n On 64 bits, we prepend the '64' postfix\n >>> np.distutils.system_info.libpaths(paths,64)\n ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib',\n '/usr/lib64', '/usr/lib']\n \"\"\"\n if bits not in (32, 64):\n raise ValueError(\"Invalid bit size in libpaths: 32 or 64 only\")\n\n # Handle 32bit case\n if bits == 32:\n return paths\n\n # Handle 64bit case\n out = []\n for p in paths:\n out.extend([p + '64', p])\n\n return out\n\n\nif sys.platform == 'win32':\n default_lib_dirs = ['C:\\\\',\n os.path.join(distutils.sysconfig.EXEC_PREFIX,\n 'libs')]\n default_runtime_dirs = []\n default_include_dirs = []\n default_src_dirs = ['.']\n default_x11_lib_dirs = []\n default_x11_include_dirs = []\nelse:\n default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib',\n '/opt/local/lib', '/sw/lib'], platform_bits)\n default_runtime_dirs = []\n default_include_dirs = ['/usr/local/include',\n '/opt/include', '/usr/include',\n # path of umfpack under macports\n '/opt/local/include/ufsparse',\n '/opt/local/include', '/sw/include',\n '/usr/include/suitesparse']\n default_src_dirs = ['.', '/usr/local/src', '/opt/src', '/sw/src']\n\n default_x11_lib_dirs = libpaths(['/usr/X11R6/lib', '/usr/X11/lib',\n '/usr/lib'], platform_bits)\n default_x11_include_dirs = ['/usr/X11R6/include', '/usr/X11/include',\n '/usr/include']\n\n if os.path.exists('/usr/lib/X11'):\n globbed_x11_dir = glob('/usr/lib/*/libX11.so')\n if globbed_x11_dir:\n x11_so_dir = os.path.split(globbed_x11_dir[0])[0]\n default_x11_lib_dirs.extend([x11_so_dir, '/usr/lib/X11'])\n default_x11_include_dirs.extend(['/usr/lib/X11/include',\n '/usr/include/X11'])\n\n import subprocess as sp\n tmp = None\n try:\n # Explicitly open/close file to avoid ResourceWarning when\n # tests are run in debug mode Python 3.\n tmp = open(os.devnull, 'w')\n p = sp.Popen([\"gcc\", \"-print-multiarch\"], stdout=sp.PIPE,\n stderr=tmp)\n except (OSError, DistutilsError):\n # OSError if gcc is not installed, or SandboxViolation (DistutilsError\n # subclass) if an old setuptools bug is triggered (see gh-3160).\n pass\n else:\n triplet = str(p.communicate()[0].decode().strip())\n if p.returncode == 0:\n # gcc supports the \"-print-multiarch\" option\n default_x11_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n default_lib_dirs += [os.path.join(\"/usr/lib/\", triplet)]\n finally:\n if tmp is not None:\n tmp.close()\n\nif os.path.join(sys.prefix, 'lib') not in default_lib_dirs:\n default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))\n default_include_dirs.append(os.path.join(sys.prefix, 'include'))\n default_src_dirs.append(os.path.join(sys.prefix, 'src'))\n\ndefault_lib_dirs = [_m for _m in default_lib_dirs if os.path.isdir(_m)]\ndefault_runtime_dirs = [_m for _m in default_runtime_dirs if os.path.isdir(_m)]\ndefault_include_dirs = [_m for _m in default_include_dirs if os.path.isdir(_m)]\ndefault_src_dirs = [_m for _m in default_src_dirs if os.path.isdir(_m)]\n\nso_ext = get_shared_lib_extension()\n\n\ndef get_standard_file(fname):\n \"\"\"Returns a list of files named 'fname' from\n 1) System-wide directory (directory-location of this module)\n 2) Users HOME directory (os.environ['HOME'])\n 3) Local directory\n \"\"\"\n # System-wide file\n filenames = []\n try:\n f = __file__\n except NameError:\n f = sys.argv[0]\n else:\n sysfile = os.path.join(os.path.split(os.path.abspath(f))[0],\n fname)\n if os.path.isfile(sysfile):\n filenames.append(sysfile)\n\n # Home directory\n # And look for the user config file\n try:\n f = os.path.expanduser('~')\n except KeyError:\n pass\n else:\n user_file = os.path.join(f, fname)\n if os.path.isfile(user_file):\n filenames.append(user_file)\n\n # Local file\n if os.path.isfile(fname):\n filenames.append(os.path.abspath(fname))\n\n return filenames\n\n\ndef get_info(name, notfound_action=0):\n \"\"\"\n notfound_action:\n 0 - do nothing\n 1 - display warning message\n 2 - raise error\n \"\"\"\n cl = {'atlas': atlas_info, # use lapack_opt or blas_opt instead\n 'atlas_threads': atlas_threads_info, # ditto\n 'atlas_blas': atlas_blas_info,\n 'atlas_blas_threads': atlas_blas_threads_info,\n 'lapack_atlas': lapack_atlas_info, # use lapack_opt instead\n 'lapack_atlas_threads': lapack_atlas_threads_info, # ditto\n 'atlas_3_10': atlas_3_10_info, # use lapack_opt or blas_opt instead\n 'atlas_3_10_threads': atlas_3_10_threads_info, # ditto\n 'atlas_3_10_blas': atlas_3_10_blas_info,\n 'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,\n 'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead\n 'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto\n 'mkl': mkl_info,\n # openblas which may or may not have embedded lapack\n 'openblas': openblas_info, # use blas_opt instead\n # openblas with embedded lapack\n 'openblas_lapack': openblas_lapack_info, # use blas_opt instead\n 'blis': blis_info, # use blas_opt instead\n 'lapack_mkl': lapack_mkl_info, # use lapack_opt instead\n 'blas_mkl': blas_mkl_info, # use blas_opt instead\n 'x11': x11_info,\n 'fft_opt': fft_opt_info,\n 'fftw': fftw_info,\n 'fftw2': fftw2_info,\n 'fftw3': fftw3_info,\n 'dfftw': dfftw_info,\n 'sfftw': sfftw_info,\n 'fftw_threads': fftw_threads_info,\n 'dfftw_threads': dfftw_threads_info,\n 'sfftw_threads': sfftw_threads_info,\n 'djbfft': djbfft_info,\n 'blas': blas_info, # use blas_opt instead\n 'lapack': lapack_info, # use lapack_opt instead\n 'lapack_src': lapack_src_info,\n 'blas_src': blas_src_info,\n 'numpy': numpy_info,\n 'f2py': f2py_info,\n 'Numeric': Numeric_info,\n 'numeric': Numeric_info,\n 'numarray': numarray_info,\n 'numerix': numerix_info,\n 'lapack_opt': lapack_opt_info,\n 'blas_opt': blas_opt_info,\n 'boost_python': boost_python_info,\n 'agg2': agg2_info,\n 'wx': wx_info,\n 'gdk_pixbuf_xlib_2': gdk_pixbuf_xlib_2_info,\n 'gdk-pixbuf-xlib-2.0': gdk_pixbuf_xlib_2_info,\n 'gdk_pixbuf_2': gdk_pixbuf_2_info,\n 'gdk-pixbuf-2.0': gdk_pixbuf_2_info,\n 'gdk': gdk_info,\n 'gdk_2': gdk_2_info,\n 'gdk-2.0': gdk_2_info,\n 'gdk_x11_2': gdk_x11_2_info,\n 'gdk-x11-2.0': gdk_x11_2_info,\n 'gtkp_x11_2': gtkp_x11_2_info,\n 'gtk+-x11-2.0': gtkp_x11_2_info,\n 'gtkp_2': gtkp_2_info,\n 'gtk+-2.0': gtkp_2_info,\n 'xft': xft_info,\n 'freetype2': freetype2_info,\n 'umfpack': umfpack_info,\n 'amd': amd_info,\n }.get(name.lower(), system_info)\n return cl().get_info(notfound_action)\n\n\nclass NotFoundError(DistutilsError):\n \"\"\"Some third-party program or library is not found.\"\"\"\n\n\nclass AtlasNotFoundError(NotFoundError):\n \"\"\"\n Atlas (http://math-atlas.sourceforge.net/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [atlas]) or by setting\n the ATLAS environment variable.\"\"\"\n\n\nclass LapackNotFoundError(NotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [lapack]) or by setting\n the LAPACK environment variable.\"\"\"\n\n\nclass LapackSrcNotFoundError(LapackNotFoundError):\n \"\"\"\n Lapack (http://www.netlib.org/lapack/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [lapack_src]) or by setting\n the LAPACK_SRC environment variable.\"\"\"\n\n\nclass BlasNotFoundError(NotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [blas]) or by setting\n the BLAS environment variable.\"\"\"\n\n\nclass BlasSrcNotFoundError(BlasNotFoundError):\n \"\"\"\n Blas (http://www.netlib.org/blas/) sources not found.\n Directories to search for the sources can be specified in the\n numpy/distutils/site.cfg file (section [blas_src]) or by setting\n the BLAS_SRC environment variable.\"\"\"\n\n\nclass FFTWNotFoundError(NotFoundError):\n \"\"\"\n FFTW (http://www.fftw.org/) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [fftw]) or by setting\n the FFTW environment variable.\"\"\"\n\n\nclass DJBFFTNotFoundError(NotFoundError):\n \"\"\"\n DJBFFT (http://cr.yp.to/djbfft.html) libraries not found.\n Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [djbfft]) or by setting\n the DJBFFT environment variable.\"\"\"\n\n\nclass NumericNotFoundError(NotFoundError):\n \"\"\"\n Numeric (http://www.numpy.org/) module not found.\n Get it from above location, install it, and retry setup.py.\"\"\"\n\n\nclass X11NotFoundError(NotFoundError):\n \"\"\"X11 libraries not found.\"\"\"\n\n\nclass UmfpackNotFoundError(NotFoundError):\n \"\"\"\n UMFPACK sparse solver (http://www.cise.ufl.edu/research/sparse/umfpack/)\n not found. Directories to search for the libraries can be specified in the\n numpy/distutils/site.cfg file (section [umfpack]) or by setting\n the UMFPACK environment variable.\"\"\"\n\n\nclass system_info(object):\n\n \"\"\" get_info() is the only public method. Don't use others.\n \"\"\"\n section = 'ALL'\n dir_env_var = None\n search_static_first = 0 # XXX: disabled by default, may disappear in\n # future unless it is proved to be useful.\n verbosity = 1\n saved_results = {}\n\n notfounderror = NotFoundError\n\n def __init__(self,\n default_lib_dirs=default_lib_dirs,\n default_include_dirs=default_include_dirs,\n verbosity=1,\n ):\n self.__class__.info = {}\n self.local_prefixes = []\n defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),\n 'include_dirs': os.pathsep.join(default_include_dirs),\n 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),\n 'rpath': '',\n 'src_dirs': os.pathsep.join(default_src_dirs),\n 'search_static_first': str(self.search_static_first),\n 'extra_compile_args': '', 'extra_link_args': ''}\n self.cp = ConfigParser(defaults)\n self.files = []\n self.files.extend(get_standard_file('.numpy-site.cfg'))\n self.files.extend(get_standard_file('site.cfg'))\n self.parse_config_files()\n\n if self.section is not None:\n self.search_static_first = self.cp.getboolean(\n self.section, 'search_static_first')\n assert isinstance(self.search_static_first, int)\n\n def parse_config_files(self):\n self.cp.read(self.files)\n if not self.cp.has_section(self.section):\n if self.section is not None:\n self.cp.add_section(self.section)\n\n def calc_libraries_info(self):\n libs = self.get_libraries()\n dirs = self.get_lib_dirs()\n # The extensions use runtime_library_dirs\n r_dirs = self.get_runtime_lib_dirs()\n # Intrinsic distutils use rpath, we simply append both entries\n # as though they were one entry\n r_dirs.extend(self.get_runtime_lib_dirs(key='rpath'))\n info = {}\n for lib in libs:\n i = self.check_libs(dirs, [lib])\n if i is not None:\n dict_append(info, **i)\n else:\n log.info('Library %s was not found. Ignoring' % (lib))\n\n if r_dirs:\n i = self.check_libs(r_dirs, [lib])\n if i is not None:\n # Swap library keywords found to runtime_library_dirs\n # the libraries are insisting on the user having defined\n # them using the library_dirs, and not necessarily by\n # runtime_library_dirs\n del i['libraries']\n i['runtime_library_dirs'] = i.pop('library_dirs')\n dict_append(info, **i)\n else:\n log.info('Runtime library %s was not found. Ignoring' % (lib))\n\n return info\n\n def set_info(self, **info):\n if info:\n lib_info = self.calc_libraries_info()\n dict_append(info, **lib_info)\n # Update extra information\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n self.saved_results[self.__class__.__name__] = info\n\n def has_info(self):\n return self.__class__.__name__ in self.saved_results\n\n def calc_extra_info(self):\n \"\"\" Updates the information in the current information with\n respect to these flags:\n extra_compile_args\n extra_link_args\n \"\"\"\n info = {}\n for key in ['extra_compile_args', 'extra_link_args']:\n # Get values\n opt = self.cp.get(self.section, key)\n if opt:\n tmp = {key : [opt]}\n dict_append(info, **tmp)\n return info\n\n def get_info(self, notfound_action=0):\n \"\"\" Return a dictonary with items that are compatible\n with numpy.distutils.setup keyword arguments.\n \"\"\"\n flag = 0\n if not self.has_info():\n flag = 1\n log.info(self.__class__.__name__ + ':')\n if hasattr(self, 'calc_info'):\n self.calc_info()\n if notfound_action:\n if not self.has_info():\n if notfound_action == 1:\n warnings.warn(self.notfounderror.__doc__)\n elif notfound_action == 2:\n raise self.notfounderror(self.notfounderror.__doc__)\n else:\n raise ValueError(repr(notfound_action))\n\n if not self.has_info():\n log.info(' NOT AVAILABLE')\n self.set_info()\n else:\n log.info(' FOUND:')\n\n res = self.saved_results.get(self.__class__.__name__)\n if self.verbosity > 0 and flag:\n for k, v in res.items():\n v = str(v)\n if k in ['sources', 'libraries'] and len(v) > 270:\n v = v[:120] + '...\\n...\\n...' + v[-120:]\n log.info(' %s = %s', k, v)\n log.info('')\n\n return copy.deepcopy(res)\n\n def get_paths(self, section, key):\n dirs = self.cp.get(section, key).split(os.pathsep)\n env_var = self.dir_env_var\n if env_var:\n if is_sequence(env_var):\n e0 = env_var[-1]\n for e in env_var:\n if e in os.environ:\n e0 = e\n break\n if not env_var[0] == e0:\n log.info('Setting %s=%s' % (env_var[0], e0))\n env_var = e0\n if env_var and env_var in os.environ:\n d = os.environ[env_var]\n if d == 'None':\n log.info('Disabled %s: %s',\n self.__class__.__name__, '(%s is None)'\n % (env_var,))\n return []\n if os.path.isfile(d):\n dirs = [os.path.dirname(d)] + dirs\n l = getattr(self, '_lib_names', [])\n if len(l) == 1:\n b = os.path.basename(d)\n b = os.path.splitext(b)[0]\n if b[:3] == 'lib':\n log.info('Replacing _lib_names[0]==%r with %r' \\\n % (self._lib_names[0], b[3:]))\n self._lib_names[0] = b[3:]\n else:\n ds = d.split(os.pathsep)\n ds2 = []\n for d in ds:\n if os.path.isdir(d):\n ds2.append(d)\n for dd in ['include', 'lib']:\n d1 = os.path.join(d, dd)\n if os.path.isdir(d1):\n ds2.append(d1)\n dirs = ds2 + dirs\n default_dirs = self.cp.get(self.section, key).split(os.pathsep)\n dirs.extend(default_dirs)\n ret = []\n for d in dirs:\n if len(d) > 0 and not os.path.isdir(d):\n warnings.warn('Specified path %s is invalid.' % d)\n continue\n\n if d not in ret:\n ret.append(d)\n\n log.debug('( %s = %s )', key, ':'.join(ret))\n return ret\n\n def get_lib_dirs(self, key='library_dirs'):\n return self.get_paths(self.section, key)\n\n def get_runtime_lib_dirs(self, key='runtime_library_dirs'):\n path = self.get_paths(self.section, key)\n if path == ['']:\n path = []\n return path\n\n def get_include_dirs(self, key='include_dirs'):\n return self.get_paths(self.section, key)\n\n def get_src_dirs(self, key='src_dirs'):\n return self.get_paths(self.section, key)\n\n def get_libs(self, key, default):\n try:\n libs = self.cp.get(self.section, key)\n except NoOptionError:\n if not default:\n return []\n if is_string(default):\n return [default]\n return default\n return [b for b in [a.strip() for a in libs.split(',')] if b]\n\n def get_libraries(self, key='libraries'):\n if hasattr(self, '_lib_names'):\n return self.get_libs(key, default=self._lib_names)\n else:\n return self.get_libs(key, '')\n\n def library_extensions(self):\n static_exts = ['.a']\n if sys.platform == 'win32':\n static_exts.append('.lib') # .lib is used by MSVC\n if self.search_static_first:\n exts = static_exts + [so_ext]\n else:\n exts = [so_ext] + static_exts\n if sys.platform == 'cygwin':\n exts.append('.dll.a')\n if sys.platform == 'darwin':\n exts.append('.dylib')\n return exts\n\n def check_libs(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks for all libraries as shared libraries first, then\n static (or vice versa if self.search_static_first is True).\n \"\"\"\n exts = self.library_extensions()\n info = None\n for ext in exts:\n info = self._check_libs(lib_dirs, libs, opt_libs, [ext])\n if info is not None:\n break\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n return info\n\n def check_libs2(self, lib_dirs, libs, opt_libs=[]):\n \"\"\"If static or shared libraries are available then return\n their info dictionary.\n\n Checks each library for shared or static.\n \"\"\"\n exts = self.library_extensions()\n info = self._check_libs(lib_dirs, libs, opt_libs, exts)\n if not info:\n log.info(' libraries %s not found in %s', ','.join(libs),\n lib_dirs)\n\n return info\n\n def _find_lib(self, lib_dir, lib, exts):\n assert is_string(lib_dir)\n # under windows first try without 'lib' prefix\n if sys.platform == 'win32':\n lib_prefixes = ['', 'lib']\n else:\n lib_prefixes = ['lib']\n # for each library name, see if we can find a file for it.\n for ext in exts:\n for prefix in lib_prefixes:\n p = self.combine_paths(lib_dir, prefix + lib + ext)\n if p:\n break\n if p:\n assert len(p) == 1\n # ??? splitext on p[0] would do this for cygwin\n # doesn't seem correct\n if ext == '.dll.a':\n lib += '.dll'\n return lib\n\n return False\n\n def _find_libs(self, lib_dirs, libs, exts):\n # make sure we preserve the order of libs, as it can be important\n found_dirs, found_libs = [], []\n for lib in libs:\n for lib_dir in lib_dirs:\n found_lib = self._find_lib(lib_dir, lib, exts)\n if found_lib:\n found_libs.append(found_lib)\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n break\n return found_dirs, found_libs\n\n def _check_libs(self, lib_dirs, libs, opt_libs, exts):\n \"\"\"Find mandatory and optional libs in expected paths.\n\n Missing optional libraries are silently forgotten.\n \"\"\"\n if not is_sequence(lib_dirs):\n lib_dirs = [lib_dirs]\n # First, try to find the mandatory libraries\n found_dirs, found_libs = self._find_libs(lib_dirs, libs, exts)\n if len(found_libs) > 0 and len(found_libs) == len(libs):\n # Now, check for optional libraries\n opt_found_dirs, opt_found_libs = self._find_libs(lib_dirs, opt_libs, exts)\n found_libs.extend(opt_found_libs)\n for lib_dir in opt_found_dirs:\n if lib_dir not in found_dirs:\n found_dirs.append(lib_dir)\n info = {'libraries': found_libs, 'library_dirs': found_dirs}\n return info\n else:\n return None\n\n def combine_paths(self, *args):\n \"\"\"Return a list of existing paths composed by all combinations\n of items from the arguments.\n \"\"\"\n return combine_paths(*args, **{'verbosity': self.verbosity})\n\n\nclass fft_opt_info(system_info):\n\n def calc_info(self):\n info = {}\n fftw_info = get_info('fftw3') or get_info('fftw2') or get_info('dfftw')\n djbfft_info = get_info('djbfft')\n if fftw_info:\n dict_append(info, **fftw_info)\n if djbfft_info:\n dict_append(info, **djbfft_info)\n self.set_info(**info)\n return\n\n\nclass fftw_info(system_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n {'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}]\n\n def calc_ver_info(self, ver_param):\n \"\"\"Returns True on successful version detection, else False\"\"\"\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n incl_dir = None\n libs = self.get_libs(self.section + '_libs', ver_param['libs'])\n info = self.check_libs(lib_dirs, libs)\n if info is not None:\n flag = 0\n for d in incl_dirs:\n if len(self.combine_paths(d, ver_param['includes'])) \\\n == len(ver_param['includes']):\n dict_append(info, include_dirs=[d])\n flag = 1\n incl_dirs = [d]\n break\n if flag:\n dict_append(info, define_macros=ver_param['macros'])\n else:\n info = None\n if info is not None:\n self.set_info(**info)\n return True\n else:\n log.info(' %s not found' % (ver_param['name']))\n return False\n\n def calc_info(self):\n for i in self.ver_info:\n if self.calc_ver_info(i):\n break\n\n\nclass fftw2_info(fftw_info):\n #variables to override\n section = 'fftw'\n dir_env_var = 'FFTW'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw2',\n 'libs':['rfftw', 'fftw'],\n 'includes':['fftw.h', 'rfftw.h'],\n 'macros':[('SCIPY_FFTW_H', None)]}\n ]\n\n\nclass fftw3_info(fftw_info):\n #variables to override\n section = 'fftw3'\n dir_env_var = 'FFTW3'\n notfounderror = FFTWNotFoundError\n ver_info = [{'name':'fftw3',\n 'libs':['fftw3'],\n 'includes':['fftw3.h'],\n 'macros':[('SCIPY_FFTW3_H', None)]},\n ]\n\n\nclass dfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw',\n 'libs':['drfftw', 'dfftw'],\n 'includes':['dfftw.h', 'drfftw.h'],\n 'macros':[('SCIPY_DFFTW_H', None)]}]\n\n\nclass sfftw_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw',\n 'libs':['srfftw', 'sfftw'],\n 'includes':['sfftw.h', 'srfftw.h'],\n 'macros':[('SCIPY_SFFTW_H', None)]}]\n\n\nclass fftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'fftw threads',\n 'libs':['rfftw_threads', 'fftw_threads'],\n 'includes':['fftw_threads.h', 'rfftw_threads.h'],\n 'macros':[('SCIPY_FFTW_THREADS_H', None)]}]\n\n\nclass dfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'dfftw threads',\n 'libs':['drfftw_threads', 'dfftw_threads'],\n 'includes':['dfftw_threads.h', 'drfftw_threads.h'],\n 'macros':[('SCIPY_DFFTW_THREADS_H', None)]}]\n\n\nclass sfftw_threads_info(fftw_info):\n section = 'fftw'\n dir_env_var = 'FFTW'\n ver_info = [{'name':'sfftw threads',\n 'libs':['srfftw_threads', 'sfftw_threads'],\n 'includes':['sfftw_threads.h', 'srfftw_threads.h'],\n 'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]\n\n\nclass djbfft_info(system_info):\n section = 'djbfft'\n dir_env_var = 'DJBFFT'\n notfounderror = DJBFFTNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['djbfft']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n info = None\n for d in lib_dirs:\n p = self.combine_paths(d, ['djbfft.a'])\n if p:\n info = {'extra_objects': p}\n break\n p = self.combine_paths(d, ['libdjbfft.a', 'libdjbfft' + so_ext])\n if p:\n info = {'libraries': ['djbfft'], 'library_dirs': [d]}\n break\n if info is None:\n return\n for d in incl_dirs:\n if len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2:\n dict_append(info, include_dirs=[d],\n define_macros=[('SCIPY_DJBFFT_H', None)])\n self.set_info(**info)\n return\n return\n\n\nclass mkl_info(system_info):\n section = 'mkl'\n dir_env_var = 'MKLROOT'\n _lib_mkl = ['mkl', 'vml', 'guide']\n\n def get_mkl_rootdir(self):\n mklroot = os.environ.get('MKLROOT', None)\n if mklroot is not None:\n return mklroot\n paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)\n ld_so_conf = '/etc/ld.so.conf'\n if os.path.isfile(ld_so_conf):\n for d in open(ld_so_conf, 'r'):\n d = d.strip()\n if d:\n paths.append(d)\n intel_mkl_dirs = []\n for path in paths:\n path_atoms = path.split(os.sep)\n for m in path_atoms:\n if m.startswith('mkl'):\n d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])\n intel_mkl_dirs.append(d)\n break\n for d in paths:\n dirs = glob(os.path.join(d, 'mkl', '*'))\n dirs += glob(os.path.join(d, 'mkl*'))\n for d in dirs:\n if os.path.isdir(os.path.join(d, 'lib')):\n return d\n return None\n\n def __init__(self):\n mklroot = self.get_mkl_rootdir()\n if mklroot is None:\n system_info.__init__(self)\n else:\n from .cpuinfo import cpu\n l = 'mkl' # use shared library\n if cpu.is_Itanium():\n plt = '64'\n elif cpu.is_Xeon():\n plt = 'intel64'\n else:\n plt = '32'\n if l not in self._lib_mkl:\n self._lib_mkl.insert(0, l)\n system_info.__init__(\n self,\n default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],\n default_include_dirs=[os.path.join(mklroot, 'include')])\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n incl_dirs = self.get_include_dirs()\n mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)\n info = self.check_libs2(lib_dirs, mkl_libs)\n if info is None:\n return\n dict_append(info,\n define_macros=[('SCIPY_MKL_H', None),\n ('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n if sys.platform == 'win32':\n pass # win32 has no pthread library\n else:\n dict_append(info, libraries=['pthread'])\n self.set_info(**info)\n\n\nclass lapack_mkl_info(mkl_info):\n\n def calc_info(self):\n mkl = get_info('mkl')\n if not mkl:\n return\n if sys.platform == 'win32':\n lapack_libs = self.get_libs('lapack_libs', ['mkl_lapack'])\n else:\n lapack_libs = self.get_libs('lapack_libs',\n ['mkl_lapack32', 'mkl_lapack64'])\n\n info = {'libraries': lapack_libs}\n dict_append(info, **mkl)\n self.set_info(**info)\n\n\nclass blas_mkl_info(mkl_info):\n pass\n\n\nclass atlas_info(system_info):\n section = 'atlas'\n dir_env_var = 'ATLAS'\n _lib_names = ['f77blas', 'cblas']\n if sys.platform[:7] == 'freebsd':\n _lib_atlas = ['atlas_r']\n _lib_lapack = ['alapack_r']\n else:\n _lib_atlas = ['atlas']\n _lib_lapack = ['lapack']\n\n notfounderror = AtlasNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend(self.combine_paths(d, ['atlas*', 'ATLAS*',\n 'sse', '3dnow', 'sse2']) + [d])\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n atlas_libs = self.get_libs('atlas_libs',\n self._lib_names + self._lib_atlas)\n lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)\n atlas = None\n lapack = None\n atlas_1 = None\n for d in lib_dirs:\n atlas = self.check_libs2(d, atlas_libs, [])\n lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])\n if atlas is not None:\n lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])\n lapack = self.check_libs2(lib_dirs2, lapack_libs, [])\n if lapack is not None:\n break\n if atlas:\n atlas_1 = atlas\n log.info(self.__class__)\n if atlas is None:\n atlas = atlas_1\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n if lapack is not None:\n dict_append(info, **lapack)\n dict_append(info, **atlas)\n elif 'lapack_atlas' in atlas['libraries']:\n dict_append(info, **atlas)\n dict_append(info,\n define_macros=[('ATLAS_WITH_LAPACK_ATLAS', None)])\n self.set_info(**info)\n return\n else:\n dict_append(info, **atlas)\n dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])\n message = \"\"\"\n*********************************************************************\n Could not find lapack library within the ATLAS installation.\n*********************************************************************\n\"\"\"\n warnings.warn(message)\n self.set_info(**info)\n return\n\n # Check if lapack library is complete, only warn if it is not.\n lapack_dir = lapack['library_dirs'][0]\n lapack_name = lapack['libraries'][0]\n lapack_lib = None\n lib_prefixes = ['lib']\n if sys.platform == 'win32':\n lib_prefixes.append('')\n for e in self.library_extensions():\n for prefix in lib_prefixes:\n fn = os.path.join(lapack_dir, prefix + lapack_name + e)\n if os.path.exists(fn):\n lapack_lib = fn\n break\n if lapack_lib:\n break\n if lapack_lib is not None:\n sz = os.stat(lapack_lib)[6]\n if sz <= 4000 * 1024:\n message = \"\"\"\n*********************************************************************\n Lapack library (from ATLAS) is probably incomplete:\n size of %s is %sk (expected >4000k)\n\n Follow the instructions in the KNOWN PROBLEMS section of the file\n numpy/INSTALL.txt.\n*********************************************************************\n\"\"\" % (lapack_lib, sz / 1024)\n warnings.warn(message)\n else:\n info['language'] = 'f77'\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(info, **atlas_extra_info)\n\n self.set_info(**info)\n\n\nclass atlas_blas_info(atlas_info):\n _lib_names = ['f77blas', 'cblas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n atlas_libs = self.get_libs('atlas_libs',\n self._lib_names + self._lib_atlas)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_threads_info(atlas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass atlas_blas_threads_info(atlas_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['ptf77blas', 'ptcblas']\n\n\nclass lapack_atlas_info(atlas_info):\n _lib_names = ['lapack_atlas'] + atlas_info._lib_names\n\n\nclass lapack_atlas_threads_info(atlas_threads_info):\n _lib_names = ['lapack_atlas'] + atlas_threads_info._lib_names\n\n\nclass atlas_3_10_info(atlas_info):\n _lib_names = ['satlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_info(atlas_3_10_info):\n _lib_names = ['satlas']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n info = {}\n atlas_libs = self.get_libs('atlas_libs',\n self._lib_names)\n atlas = self.check_libs2(lib_dirs, atlas_libs, [])\n if atlas is None:\n return\n include_dirs = self.get_include_dirs()\n h = (self.combine_paths(lib_dirs + include_dirs, 'cblas.h') or [None])\n h = h[0]\n if h:\n h = os.path.dirname(h)\n dict_append(info, include_dirs=[h])\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n\n atlas_version, atlas_extra_info = get_atlas_version(**atlas)\n dict_append(atlas, **atlas_extra_info)\n\n dict_append(info, **atlas)\n\n self.set_info(**info)\n return\n\n\nclass atlas_3_10_threads_info(atlas_3_10_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n _lib_atlas = _lib_names\n _lib_lapack = _lib_names\n\n\nclass atlas_3_10_blas_threads_info(atlas_3_10_blas_info):\n dir_env_var = ['PTATLAS', 'ATLAS']\n _lib_names = ['tatlas']\n\n\nclass lapack_atlas_3_10_info(atlas_3_10_info):\n pass\n\n\nclass lapack_atlas_3_10_threads_info(atlas_3_10_threads_info):\n pass\n\n\nclass lapack_info(system_info):\n section = 'lapack'\n dir_env_var = 'LAPACK'\n _lib_names = ['lapack']\n notfounderror = LapackNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n lapack_libs = self.get_libs('lapack_libs', self._lib_names)\n info = self.check_libs(lib_dirs, lapack_libs, [])\n if info is None:\n return\n info['language'] = 'f77'\n self.set_info(**info)\n\n\nclass lapack_src_info(system_info):\n section = 'lapack_src'\n dir_env_var = 'LAPACK_SRC'\n notfounderror = LapackSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['LAPACK*/SRC', 'SRC']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'dgesv.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n # The following is extracted from LAPACK-3.0/SRC/Makefile.\n # Added missing names from lapack-lite-3.1.1/SRC/Makefile\n # while keeping removed names for Lapack-3.0 compatibility.\n allaux = '''\n ilaenv ieeeck lsame lsamen xerbla\n iparmq\n ''' # *.f\n laux = '''\n bdsdc bdsqr disna labad lacpy ladiv lae2 laebz laed0 laed1\n laed2 laed3 laed4 laed5 laed6 laed7 laed8 laed9 laeda laev2\n lagtf lagts lamch lamrg lanst lapy2 lapy3 larnv larrb larre\n larrf lartg laruv las2 lascl lasd0 lasd1 lasd2 lasd3 lasd4\n lasd5 lasd6 lasd7 lasd8 lasd9 lasda lasdq lasdt laset lasq1\n lasq2 lasq3 lasq4 lasq5 lasq6 lasr lasrt lassq lasv2 pttrf\n stebz stedc steqr sterf\n\n larra larrc larrd larr larrk larrj larrr laneg laisnan isnan\n lazq3 lazq4\n ''' # [s|d]*.f\n lasrc = '''\n gbbrd gbcon gbequ gbrfs gbsv gbsvx gbtf2 gbtrf gbtrs gebak\n gebal gebd2 gebrd gecon geequ gees geesx geev geevx gegs gegv\n gehd2 gehrd gelq2 gelqf gels gelsd gelss gelsx gelsy geql2\n geqlf geqp3 geqpf geqr2 geqrf gerfs gerq2 gerqf gesc2 gesdd\n gesv gesvd gesvx getc2 getf2 getrf getri getrs ggbak ggbal\n gges ggesx ggev ggevx ggglm gghrd gglse ggqrf ggrqf ggsvd\n ggsvp gtcon gtrfs gtsv gtsvx gttrf gttrs gtts2 hgeqz hsein\n hseqr labrd lacon laein lags2 lagtm lahqr lahrd laic1 lals0\n lalsa lalsd langb lange langt lanhs lansb lansp lansy lantb\n lantp lantr lapll lapmt laqgb laqge laqp2 laqps laqsb laqsp\n laqsy lar1v lar2v larf larfb larfg larft larfx largv larrv\n lartv larz larzb larzt laswp lasyf latbs latdf latps latrd\n latrs latrz latzm lauu2 lauum pbcon pbequ pbrfs pbstf pbsv\n pbsvx pbtf2 pbtrf pbtrs pocon poequ porfs posv posvx potf2\n potrf potri potrs ppcon ppequ pprfs ppsv ppsvx pptrf pptri\n pptrs ptcon pteqr ptrfs ptsv ptsvx pttrs ptts2 spcon sprfs\n spsv spsvx sptrf sptri sptrs stegr stein sycon syrfs sysv\n sysvx sytf2 sytrf sytri sytrs tbcon tbrfs tbtrs tgevc tgex2\n tgexc tgsen tgsja tgsna tgsy2 tgsyl tpcon tprfs tptri tptrs\n trcon trevc trexc trrfs trsen trsna trsyl trti2 trtri trtrs\n tzrqf tzrzf\n\n lacn2 lahr2 stemr laqr0 laqr1 laqr2 laqr3 laqr4 laqr5\n ''' # [s|c|d|z]*.f\n sd_lasrc = '''\n laexc lag2 lagv2 laln2 lanv2 laqtr lasy2 opgtr opmtr org2l\n org2r orgbr orghr orgl2 orglq orgql orgqr orgr2 orgrq orgtr\n orm2l orm2r ormbr ormhr orml2 ormlq ormql ormqr ormr2 ormr3\n ormrq ormrz ormtr rscl sbev sbevd sbevx sbgst sbgv sbgvd sbgvx\n sbtrd spev spevd spevx spgst spgv spgvd spgvx sptrd stev stevd\n stevr stevx syev syevd syevr syevx sygs2 sygst sygv sygvd\n sygvx sytd2 sytrd\n ''' # [s|d]*.f\n cz_lasrc = '''\n bdsqr hbev hbevd hbevx hbgst hbgv hbgvd hbgvx hbtrd hecon heev\n heevd heevr heevx hegs2 hegst hegv hegvd hegvx herfs hesv\n hesvx hetd2 hetf2 hetrd hetrf hetri hetrs hpcon hpev hpevd\n hpevx hpgst hpgv hpgvd hpgvx hprfs hpsv hpsvx hptrd hptrf\n hptri hptrs lacgv lacp2 lacpy lacrm lacrt ladiv laed0 laed7\n laed8 laesy laev2 lahef lanhb lanhe lanhp lanht laqhb laqhe\n laqhp larcm larnv lartg lascl laset lasr lassq pttrf rot spmv\n spr stedc steqr symv syr ung2l ung2r ungbr unghr ungl2 unglq\n ungql ungqr ungr2 ungrq ungtr unm2l unm2r unmbr unmhr unml2\n unmlq unmql unmqr unmr2 unmr3 unmrq unmrz unmtr upgtr upmtr\n ''' # [c|z]*.f\n #######\n sclaux = laux + ' econd ' # s*.f\n dzlaux = laux + ' secnd ' # d*.f\n slasrc = lasrc + sd_lasrc # s*.f\n dlasrc = lasrc + sd_lasrc # d*.f\n clasrc = lasrc + cz_lasrc + ' srot srscl ' # c*.f\n zlasrc = lasrc + cz_lasrc + ' drot drscl ' # z*.f\n oclasrc = ' icmax1 scsum1 ' # *.f\n ozlasrc = ' izmax1 dzsum1 ' # *.f\n sources = ['s%s.f' % f for f in (sclaux + slasrc).split()] \\\n + ['d%s.f' % f for f in (dzlaux + dlasrc).split()] \\\n + ['c%s.f' % f for f in (clasrc).split()] \\\n + ['z%s.f' % f for f in (zlasrc).split()] \\\n + ['%s.f' % f for f in (allaux + oclasrc + ozlasrc).split()]\n sources = [os.path.join(src_dir, f) for f in sources]\n # Lapack 3.1:\n src_dir2 = os.path.join(src_dir, '..', 'INSTALL')\n sources += [os.path.join(src_dir2, p + 'lamch.f') for p in 'sdcz']\n # Lapack 3.2.1:\n sources += [os.path.join(src_dir, p + 'larfp.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lr.f') for p in 'sdcz']\n sources += [os.path.join(src_dir, 'ila' + p + 'lc.f') for p in 'sdcz']\n # Should we check here actual existence of source files?\n # Yes, the file listing is different between 3.0 and 3.1\n # versions.\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\natlas_version_c_text = r'''\n/* This file is generated from numpy/distutils/system_info.py */\nvoid ATL_buildinfo(void);\nint main(void) {\n ATL_buildinfo();\n return 0;\n}\n'''\n\n_cached_atlas_version = {}\n\n\ndef get_atlas_version(**config):\n libraries = config.get('libraries', [])\n library_dirs = config.get('library_dirs', [])\n key = (tuple(libraries), tuple(library_dirs))\n if key in _cached_atlas_version:\n return _cached_atlas_version[key]\n c = cmd_config(Distribution())\n atlas_version = None\n info = {}\n try:\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries, library_dirs=library_dirs,\n use_tee=(system_info.verbosity > 0))\n if s and re.search(r'undefined reference to `_gfortran', o, re.M):\n s, o = c.get_output(atlas_version_c_text,\n libraries=libraries + ['gfortran'],\n library_dirs=library_dirs,\n use_tee=(system_info.verbosity > 0))\n if not s:\n warnings.warn(\"\"\"\n*****************************************************\nLinkage with ATLAS requires gfortran. Use\n\n python setup.py config_fc --fcompiler=gnu95 ...\n\nwhen building extension libraries that use ATLAS.\nMake sure that -lgfortran is used for C++ extensions.\n*****************************************************\n\"\"\")\n dict_append(info, language='f90',\n define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])\n except Exception: # failed to get version from file -- maybe on Windows\n # look at directory name\n for o in library_dirs:\n m = re.search(r'ATLAS_(?P<version>\\d+[.]\\d+[.]\\d+)_', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is not None:\n break\n\n # final choice --- look at ATLAS_VERSION environment\n # variable\n if atlas_version is None:\n atlas_version = os.environ.get('ATLAS_VERSION', None)\n if atlas_version:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', '\"\\\\\"%s\\\\\"\"' % atlas_version)\n ])\n else:\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])\n return atlas_version or '?.?.?', info\n\n if not s:\n m = re.search(r'ATLAS version (?P<version>\\d+[.]\\d+[.]\\d+)', o)\n if m:\n atlas_version = m.group('version')\n if atlas_version is None:\n if re.search(r'undefined symbol: ATL_buildinfo', o, re.M):\n atlas_version = '3.2.1_pre3.3.6'\n else:\n log.info('Status: %d', s)\n log.info('Output: %s', o)\n\n if atlas_version == '3.2.1_pre3.3.6':\n dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])\n else:\n dict_append(info, define_macros=[(\n 'ATLAS_INFO', '\"\\\\\"%s\\\\\"\"' % atlas_version)\n ])\n result = _cached_atlas_version[key] = atlas_version, info\n return result\n\n\nclass lapack_opt_info(system_info):\n\n notfounderror = LapackNotFoundError\n\n def calc_info(self):\n\n lapack_mkl_info = get_info('lapack_mkl')\n if lapack_mkl_info:\n self.set_info(**lapack_mkl_info)\n return\n\n openblas_info = get_info('openblas_lapack')\n if openblas_info:\n self.set_info(**openblas_info)\n return\n\n atlas_info = get_info('atlas_3_10_threads')\n if not atlas_info:\n atlas_info = get_info('atlas_3_10')\n if not atlas_info:\n atlas_info = get_info('atlas_threads')\n if not atlas_info:\n atlas_info = get_info('atlas')\n\n if sys.platform == 'darwin' and not atlas_info:\n # Use the system lapack from Accelerate or vecLib under OSX\n args = []\n link_args = []\n if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \\\n 'x86_64' in get_platform() or \\\n 'i386' in platform.platform():\n intel = 1\n else:\n intel = 0\n if os.path.exists('/System/Library/Frameworks'\n '/Accelerate.framework/'):\n if intel:\n args.extend(['-msse3'])\n else:\n args.extend(['-faltivec'])\n link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])\n elif os.path.exists('/System/Library/Frameworks'\n '/vecLib.framework/'):\n if intel:\n args.extend(['-msse3'])\n else:\n args.extend(['-faltivec'])\n link_args.extend(['-Wl,-framework', '-Wl,vecLib'])\n if args:\n self.set_info(extra_compile_args=args,\n extra_link_args=link_args,\n define_macros=[('NO_ATLAS_INFO', 3),\n ('HAVE_CBLAS', None)])\n return\n\n need_lapack = 0\n need_blas = 0\n info = {}\n if atlas_info:\n l = atlas_info.get('define_macros', [])\n if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \\\n or ('ATLAS_WITHOUT_LAPACK', None) in l:\n need_lapack = 1\n info = atlas_info\n\n else:\n warnings.warn(AtlasNotFoundError.__doc__)\n need_blas = 1\n need_lapack = 1\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n\n if need_lapack:\n lapack_info = get_info('lapack')\n #lapack_info = {} ## uncomment for testing\n if lapack_info:\n dict_append(info, **lapack_info)\n else:\n warnings.warn(LapackNotFoundError.__doc__)\n lapack_src_info = get_info('lapack_src')\n if not lapack_src_info:\n warnings.warn(LapackSrcNotFoundError.__doc__)\n return\n dict_append(info, libraries=[('flapack_src', lapack_src_info)])\n\n if need_blas:\n blas_info = get_info('blas')\n if blas_info:\n dict_append(info, **blas_info)\n else:\n warnings.warn(BlasNotFoundError.__doc__)\n blas_src_info = get_info('blas_src')\n if not blas_src_info:\n warnings.warn(BlasSrcNotFoundError.__doc__)\n return\n dict_append(info, libraries=[('fblas_src', blas_src_info)])\n\n self.set_info(**info)\n return\n\n\nclass blas_opt_info(system_info):\n\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n\n blas_mkl_info = get_info('blas_mkl')\n if blas_mkl_info:\n self.set_info(**blas_mkl_info)\n return\n\n blis_info = get_info('blis')\n if blis_info:\n self.set_info(**blis_info)\n return\n\n openblas_info = get_info('openblas')\n if openblas_info:\n self.set_info(**openblas_info)\n return\n\n atlas_info = get_info('atlas_3_10_blas_threads')\n if not atlas_info:\n atlas_info = get_info('atlas_3_10_blas')\n if not atlas_info:\n atlas_info = get_info('atlas_blas_threads')\n if not atlas_info:\n atlas_info = get_info('atlas_blas')\n\n if sys.platform == 'darwin' and not atlas_info:\n # Use the system BLAS from Accelerate or vecLib under OSX\n args = []\n link_args = []\n if get_platform()[-4:] == 'i386' or 'intel' in get_platform() or \\\n 'x86_64' in get_platform() or \\\n 'i386' in platform.platform():\n intel = 1\n else:\n intel = 0\n if os.path.exists('/System/Library/Frameworks'\n '/Accelerate.framework/'):\n if intel:\n args.extend(['-msse3'])\n else:\n args.extend(['-faltivec'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,Accelerate'])\n elif os.path.exists('/System/Library/Frameworks'\n '/vecLib.framework/'):\n if intel:\n args.extend(['-msse3'])\n else:\n args.extend(['-faltivec'])\n args.extend([\n '-I/System/Library/Frameworks/vecLib.framework/Headers'])\n link_args.extend(['-Wl,-framework', '-Wl,vecLib'])\n if args:\n self.set_info(extra_compile_args=args,\n extra_link_args=link_args,\n define_macros=[('NO_ATLAS_INFO', 3),\n ('HAVE_CBLAS', None)])\n return\n\n need_blas = 0\n info = {}\n if atlas_info:\n info = atlas_info\n else:\n warnings.warn(AtlasNotFoundError.__doc__)\n need_blas = 1\n dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])\n\n if need_blas:\n blas_info = get_info('blas')\n if blas_info:\n dict_append(info, **blas_info)\n else:\n warnings.warn(BlasNotFoundError.__doc__)\n blas_src_info = get_info('blas_src')\n if not blas_src_info:\n warnings.warn(BlasSrcNotFoundError.__doc__)\n return\n dict_append(info, libraries=[('fblas_src', blas_src_info)])\n\n self.set_info(**info)\n return\n\n\nclass blas_info(system_info):\n section = 'blas'\n dir_env_var = 'BLAS'\n _lib_names = ['blas']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n blas_libs = self.get_libs('blas_libs', self._lib_names)\n info = self.check_libs(lib_dirs, blas_libs, [])\n if info is None:\n return\n if platform.system() == 'Windows':\n # The check for windows is needed because has_cblas uses the\n # same compiler that was used to compile Python and msvc is\n # often not installed when mingw is being used. This rough\n # treatment is not desirable, but windows is tricky.\n info['language'] = 'f77' # XXX: is it generally true?\n else:\n lib = self.has_cblas(info)\n if lib is not None:\n info['language'] = 'c'\n info['libraries'] = [lib]\n info['define_macros'] = [('HAVE_CBLAS', None)]\n self.set_info(**info)\n\n def has_cblas(self, info):\n # primitive cblas check by looking for the header and trying to link\n # cblas or blas\n res = False\n c = distutils.ccompiler.new_compiler()\n c.customize('')\n tmpdir = tempfile.mkdtemp()\n s = \"\"\"#include <cblas.h>\n int main(int argc, const char *argv[])\n {\n double a[4] = {1,2,3,4};\n double b[4] = {5,6,7,8};\n return cblas_ddot(4, a, 1, b, 1) > 10;\n }\"\"\"\n src = os.path.join(tmpdir, 'source.c')\n try:\n with open(src, 'wt') as f:\n f.write(s)\n\n try:\n # check we can compile (find headers)\n obj = c.compile([src], output_dir=tmpdir,\n include_dirs=self.get_include_dirs())\n\n # check we can link (find library)\n # some systems have separate cblas and blas libs. First\n # check for cblas lib, and if not present check for blas lib.\n try:\n c.link_executable(obj, os.path.join(tmpdir, \"a.out\"),\n libraries=[\"cblas\"],\n library_dirs=info['library_dirs'],\n extra_postargs=info.get('extra_link_args', []))\n res = \"cblas\"\n except distutils.ccompiler.LinkError:\n c.link_executable(obj, os.path.join(tmpdir, \"a.out\"),\n libraries=[\"blas\"],\n library_dirs=info['library_dirs'],\n extra_postargs=info.get('extra_link_args', []))\n res = \"blas\"\n except distutils.ccompiler.CompileError:\n res = None\n finally:\n shutil.rmtree(tmpdir)\n return res\n\n\nclass openblas_info(blas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n notfounderror = BlasNotFoundError\n\n def check_embedded_lapack(self, info):\n return True\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n openblas_libs = self.get_libs('libraries', self._lib_names)\n if openblas_libs == self._lib_names: # backward compat with 1.8.0\n openblas_libs = self.get_libs('openblas_libs', self._lib_names)\n info = self.check_libs(lib_dirs, openblas_libs, [])\n if info is None:\n return\n\n # Add extra info for OpenBLAS\n extra_info = self.calc_extra_info()\n dict_append(info, **extra_info)\n\n if not self.check_embedded_lapack(info):\n return\n\n info['language'] = 'c'\n info['define_macros'] = [('HAVE_CBLAS', None)]\n self.set_info(**info)\n\n\nclass openblas_lapack_info(openblas_info):\n section = 'openblas'\n dir_env_var = 'OPENBLAS'\n _lib_names = ['openblas']\n notfounderror = BlasNotFoundError\n\n def check_embedded_lapack(self, info):\n res = False\n c = distutils.ccompiler.new_compiler()\n c.customize('')\n tmpdir = tempfile.mkdtemp()\n s = \"\"\"void zungqr();\n int main(int argc, const char *argv[])\n {\n zungqr_();\n return 0;\n }\"\"\"\n src = os.path.join(tmpdir, 'source.c')\n out = os.path.join(tmpdir, 'a.out')\n # Add the additional \"extra\" arguments\n try:\n extra_args = info['extra_link_args']\n except:\n extra_args = []\n try:\n with open(src, 'wt') as f:\n f.write(s)\n obj = c.compile([src], output_dir=tmpdir)\n try:\n c.link_executable(obj, out, libraries=info['libraries'],\n library_dirs=info['library_dirs'],\n extra_postargs=extra_args)\n res = True\n except distutils.ccompiler.LinkError:\n res = False\n finally:\n shutil.rmtree(tmpdir)\n return res\n\n\nclass blis_info(blas_info):\n section = 'blis'\n dir_env_var = 'BLIS'\n _lib_names = ['blis']\n notfounderror = BlasNotFoundError\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n blis_libs = self.get_libs('libraries', self._lib_names)\n if blis_libs == self._lib_names:\n blis_libs = self.get_libs('blis_libs', self._lib_names)\n\n info = self.check_libs2(lib_dirs, blis_libs, [])\n if info is None:\n return\n\n # Add include dirs\n incl_dirs = self.get_include_dirs()\n dict_append(info,\n language='c',\n define_macros=[('HAVE_CBLAS', None)],\n include_dirs=incl_dirs)\n self.set_info(**info)\n\n\nclass blas_src_info(system_info):\n section = 'blas_src'\n dir_env_var = 'BLAS_SRC'\n notfounderror = BlasSrcNotFoundError\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['blas']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'daxpy.f')):\n src_dir = d\n break\n if not src_dir:\n #XXX: Get sources from netlib. May be ask first.\n return\n blas1 = '''\n caxpy csscal dnrm2 dzasum saxpy srotg zdotc ccopy cswap drot\n dznrm2 scasum srotm zdotu cdotc dasum drotg icamax scnrm2\n srotmg zdrot cdotu daxpy drotm idamax scopy sscal zdscal crotg\n dcabs1 drotmg isamax sdot sswap zrotg cscal dcopy dscal izamax\n snrm2 zaxpy zscal csrot ddot dswap sasum srot zcopy zswap\n scabs1\n '''\n blas2 = '''\n cgbmv chpmv ctrsv dsymv dtrsv sspr2 strmv zhemv ztpmv cgemv\n chpr dgbmv dsyr lsame ssymv strsv zher ztpsv cgerc chpr2 dgemv\n dsyr2 sgbmv ssyr xerbla zher2 ztrmv cgeru ctbmv dger dtbmv\n sgemv ssyr2 zgbmv zhpmv ztrsv chbmv ctbsv dsbmv dtbsv sger\n stbmv zgemv zhpr chemv ctpmv dspmv dtpmv ssbmv stbsv zgerc\n zhpr2 cher ctpsv dspr dtpsv sspmv stpmv zgeru ztbmv cher2\n ctrmv dspr2 dtrmv sspr stpsv zhbmv ztbsv\n '''\n blas3 = '''\n cgemm csymm ctrsm dsyrk sgemm strmm zhemm zsyr2k chemm csyr2k\n dgemm dtrmm ssymm strsm zher2k zsyrk cher2k csyrk dsymm dtrsm\n ssyr2k zherk ztrmm cherk ctrmm dsyr2k ssyrk zgemm zsymm ztrsm\n '''\n sources = [os.path.join(src_dir, f + '.f') \\\n for f in (blas1 + blas2 + blas3).split()]\n #XXX: should we check here actual existence of source files?\n sources = [f for f in sources if os.path.isfile(f)]\n info = {'sources': sources, 'language': 'f77'}\n self.set_info(**info)\n\n\nclass x11_info(system_info):\n section = 'x11'\n notfounderror = X11NotFoundError\n\n def __init__(self):\n system_info.__init__(self,\n default_lib_dirs=default_x11_lib_dirs,\n default_include_dirs=default_x11_include_dirs)\n\n def calc_info(self):\n if sys.platform in ['win32']:\n return\n lib_dirs = self.get_lib_dirs()\n include_dirs = self.get_include_dirs()\n x11_libs = self.get_libs('x11_libs', ['X11'])\n info = self.check_libs(lib_dirs, x11_libs, [])\n if info is None:\n return\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d, 'X11/X.h'):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n self.set_info(**info)\n\n\nclass _numpy_info(system_info):\n section = 'Numeric'\n modulename = 'Numeric'\n notfounderror = NumericNotFoundError\n\n def __init__(self):\n include_dirs = []\n try:\n module = __import__(self.modulename)\n prefix = []\n for name in module.__file__.split(os.sep):\n if name == 'lib':\n break\n prefix.append(name)\n\n # Ask numpy for its own include path before attempting\n # anything else\n try:\n include_dirs.append(getattr(module, 'get_include')())\n except AttributeError:\n pass\n\n include_dirs.append(distutils.sysconfig.get_python_inc(\n prefix=os.sep.join(prefix)))\n except ImportError:\n pass\n py_incl_dir = distutils.sysconfig.get_python_inc()\n include_dirs.append(py_incl_dir)\n py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)\n if py_pincl_dir not in include_dirs:\n include_dirs.append(py_pincl_dir)\n for d in default_include_dirs:\n d = os.path.join(d, os.path.basename(py_incl_dir))\n if d not in include_dirs:\n include_dirs.append(d)\n system_info.__init__(self,\n default_lib_dirs=[],\n default_include_dirs=include_dirs)\n\n def calc_info(self):\n try:\n module = __import__(self.modulename)\n except ImportError:\n return\n info = {}\n macros = []\n for v in ['__version__', 'version']:\n vrs = getattr(module, v, None)\n if vrs is None:\n continue\n macros = [(self.modulename.upper() + '_VERSION',\n '\"\\\\\"%s\\\\\"\"' % (vrs)),\n (self.modulename.upper(), None)]\n break\n dict_append(info, define_macros=macros)\n include_dirs = self.get_include_dirs()\n inc_dir = None\n for d in include_dirs:\n if self.combine_paths(d,\n os.path.join(self.modulename,\n 'arrayobject.h')):\n inc_dir = d\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir])\n if info:\n self.set_info(**info)\n return\n\n\nclass numarray_info(_numpy_info):\n section = 'numarray'\n modulename = 'numarray'\n\n\nclass Numeric_info(_numpy_info):\n section = 'Numeric'\n modulename = 'Numeric'\n\n\nclass numpy_info(_numpy_info):\n section = 'numpy'\n modulename = 'numpy'\n\n\nclass numerix_info(system_info):\n section = 'numerix'\n\n def calc_info(self):\n which = None, None\n if os.getenv(\"NUMERIX\"):\n which = os.getenv(\"NUMERIX\"), \"environment var\"\n # If all the above fail, default to numpy.\n if which[0] is None:\n which = \"numpy\", \"defaulted\"\n try:\n import numpy\n which = \"numpy\", \"defaulted\"\n except ImportError:\n msg1 = str(get_exception())\n try:\n import Numeric\n which = \"numeric\", \"defaulted\"\n except ImportError:\n msg2 = str(get_exception())\n try:\n import numarray\n which = \"numarray\", \"defaulted\"\n except ImportError:\n msg3 = str(get_exception())\n log.info(msg1)\n log.info(msg2)\n log.info(msg3)\n which = which[0].strip().lower(), which[1]\n if which[0] not in [\"numeric\", \"numarray\", \"numpy\"]:\n raise ValueError(\"numerix selector must be either 'Numeric' \"\n \"or 'numarray' or 'numpy' but the value obtained\"\n \" from the %s was '%s'.\" % (which[1], which[0]))\n os.environ['NUMERIX'] = which[0]\n self.set_info(**get_info(which[0]))\n\n\nclass f2py_info(system_info):\n def calc_info(self):\n try:\n import numpy.f2py as f2py\n except ImportError:\n return\n f2py_dir = os.path.join(os.path.dirname(f2py.__file__), 'src')\n self.set_info(sources=[os.path.join(f2py_dir, 'fortranobject.c')],\n include_dirs=[f2py_dir])\n return\n\n\nclass boost_python_info(system_info):\n section = 'boost_python'\n dir_env_var = 'BOOST'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['boost*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',\n 'module.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n py_incl_dirs = [distutils.sysconfig.get_python_inc()]\n py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)\n if py_pincl_dir not in py_incl_dirs:\n py_incl_dirs.append(py_pincl_dir)\n srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')\n bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))\n bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))\n info = {'libraries': [('boost_python_src',\n {'include_dirs': [src_dir] + py_incl_dirs,\n 'sources':bpl_srcs}\n )],\n 'include_dirs': [src_dir],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass agg2_info(system_info):\n section = 'agg2'\n dir_env_var = 'AGG2'\n\n def get_paths(self, section, key):\n pre_dirs = system_info.get_paths(self, section, key)\n dirs = []\n for d in pre_dirs:\n dirs.extend([d] + self.combine_paths(d, ['agg2*']))\n return [d for d in dirs if os.path.isdir(d)]\n\n def calc_info(self):\n src_dirs = self.get_src_dirs()\n src_dir = ''\n for d in src_dirs:\n if os.path.isfile(os.path.join(d, 'src', 'agg_affine_matrix.cpp')):\n src_dir = d\n break\n if not src_dir:\n return\n if sys.platform == 'win32':\n agg2_srcs = glob(os.path.join(src_dir, 'src', 'platform',\n 'win32', 'agg_win32_bmp.cpp'))\n else:\n agg2_srcs = glob(os.path.join(src_dir, 'src', '*.cpp'))\n agg2_srcs += [os.path.join(src_dir, 'src', 'platform',\n 'X11',\n 'agg_platform_support.cpp')]\n\n info = {'libraries':\n [('agg2_src',\n {'sources': agg2_srcs,\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n )],\n 'include_dirs': [os.path.join(src_dir, 'include')],\n }\n if info:\n self.set_info(**info)\n return\n\n\nclass _pkg_config_info(system_info):\n section = None\n config_env_var = 'PKG_CONFIG'\n default_config_exe = 'pkg-config'\n append_config_exe = ''\n version_macro_name = None\n release_macro_name = None\n version_flag = '--modversion'\n cflags_flag = '--cflags'\n\n def get_config_exe(self):\n if self.config_env_var in os.environ:\n return os.environ[self.config_env_var]\n return self.default_config_exe\n\n def get_config_output(self, config_exe, option):\n cmd = config_exe + ' ' + self.append_config_exe + ' ' + option\n s, o = exec_command(cmd, use_tee=0)\n if not s:\n return o\n\n def calc_info(self):\n config_exe = find_executable(self.get_config_exe())\n if not config_exe:\n log.warn('File not found: %s. Cannot determine %s info.' \\\n % (config_exe, self.section))\n return\n info = {}\n macros = []\n libraries = []\n library_dirs = []\n include_dirs = []\n extra_link_args = []\n extra_compile_args = []\n version = self.get_config_output(config_exe, self.version_flag)\n if version:\n macros.append((self.__class__.__name__.split('.')[-1].upper(),\n '\"\\\\\"%s\\\\\"\"' % (version)))\n if self.version_macro_name:\n macros.append((self.version_macro_name + '_%s'\n % (version.replace('.', '_')), None))\n if self.release_macro_name:\n release = self.get_config_output(config_exe, '--release')\n if release:\n macros.append((self.release_macro_name + '_%s'\n % (release.replace('.', '_')), None))\n opts = self.get_config_output(config_exe, '--libs')\n if opts:\n for opt in opts.split():\n if opt[:2] == '-l':\n libraries.append(opt[2:])\n elif opt[:2] == '-L':\n library_dirs.append(opt[2:])\n else:\n extra_link_args.append(opt)\n opts = self.get_config_output(config_exe, self.cflags_flag)\n if opts:\n for opt in opts.split():\n if opt[:2] == '-I':\n include_dirs.append(opt[2:])\n elif opt[:2] == '-D':\n if '=' in opt:\n n, v = opt[2:].split('=')\n macros.append((n, v))\n else:\n macros.append((opt[2:], None))\n else:\n extra_compile_args.append(opt)\n if macros:\n dict_append(info, define_macros=macros)\n if libraries:\n dict_append(info, libraries=libraries)\n if library_dirs:\n dict_append(info, library_dirs=library_dirs)\n if include_dirs:\n dict_append(info, include_dirs=include_dirs)\n if extra_link_args:\n dict_append(info, extra_link_args=extra_link_args)\n if extra_compile_args:\n dict_append(info, extra_compile_args=extra_compile_args)\n if info:\n self.set_info(**info)\n return\n\n\nclass wx_info(_pkg_config_info):\n section = 'wx'\n config_env_var = 'WX_CONFIG'\n default_config_exe = 'wx-config'\n append_config_exe = ''\n version_macro_name = 'WX_VERSION'\n release_macro_name = 'WX_RELEASE'\n version_flag = '--version'\n cflags_flag = '--cxxflags'\n\n\nclass gdk_pixbuf_xlib_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_xlib_2'\n append_config_exe = 'gdk-pixbuf-xlib-2.0'\n version_macro_name = 'GDK_PIXBUF_XLIB_VERSION'\n\n\nclass gdk_pixbuf_2_info(_pkg_config_info):\n section = 'gdk_pixbuf_2'\n append_config_exe = 'gdk-pixbuf-2.0'\n version_macro_name = 'GDK_PIXBUF_VERSION'\n\n\nclass gdk_x11_2_info(_pkg_config_info):\n section = 'gdk_x11_2'\n append_config_exe = 'gdk-x11-2.0'\n version_macro_name = 'GDK_X11_VERSION'\n\n\nclass gdk_2_info(_pkg_config_info):\n section = 'gdk_2'\n append_config_exe = 'gdk-2.0'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gdk_info(_pkg_config_info):\n section = 'gdk'\n append_config_exe = 'gdk'\n version_macro_name = 'GDK_VERSION'\n\n\nclass gtkp_x11_2_info(_pkg_config_info):\n section = 'gtkp_x11_2'\n append_config_exe = 'gtk+-x11-2.0'\n version_macro_name = 'GTK_X11_VERSION'\n\n\nclass gtkp_2_info(_pkg_config_info):\n section = 'gtkp_2'\n append_config_exe = 'gtk+-2.0'\n version_macro_name = 'GTK_VERSION'\n\n\nclass xft_info(_pkg_config_info):\n section = 'xft'\n append_config_exe = 'xft'\n version_macro_name = 'XFT_VERSION'\n\n\nclass freetype2_info(_pkg_config_info):\n section = 'freetype2'\n append_config_exe = 'freetype2'\n version_macro_name = 'FREETYPE2_VERSION'\n\n\nclass amd_info(system_info):\n section = 'amd'\n dir_env_var = 'AMD'\n _lib_names = ['amd']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n amd_libs = self.get_libs('amd_libs', self._lib_names)\n info = self.check_libs(lib_dirs, amd_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, 'amd.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_AMD_H', None)],\n swig_opts=['-I' + inc_dir])\n\n self.set_info(**info)\n return\n\n\nclass umfpack_info(system_info):\n section = 'umfpack'\n dir_env_var = 'UMFPACK'\n notfounderror = UmfpackNotFoundError\n _lib_names = ['umfpack']\n\n def calc_info(self):\n lib_dirs = self.get_lib_dirs()\n\n umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)\n info = self.check_libs(lib_dirs, umfpack_libs, [])\n if info is None:\n return\n\n include_dirs = self.get_include_dirs()\n\n inc_dir = None\n for d in include_dirs:\n p = self.combine_paths(d, ['', 'umfpack'], 'umfpack.h')\n if p:\n inc_dir = os.path.dirname(p[0])\n break\n if inc_dir is not None:\n dict_append(info, include_dirs=[inc_dir],\n define_macros=[('SCIPY_UMFPACK_H', None)],\n swig_opts=['-I' + inc_dir])\n\n amd = get_info('amd')\n dict_append(info, **get_info('amd'))\n\n self.set_info(**info)\n return\n\n\ndef combine_paths(*args, **kws):\n \"\"\" Return a list of existing paths composed by all combinations of\n items from arguments.\n \"\"\"\n r = []\n for a in args:\n if not a:\n continue\n if is_string(a):\n a = [a]\n r.append(a)\n args = r\n if not args:\n return []\n if len(args) == 1:\n result = reduce(lambda a, b: a + b, map(glob, args[0]), [])\n elif len(args) == 2:\n result = []\n for a0 in args[0]:\n for a1 in args[1]:\n result.extend(glob(os.path.join(a0, a1)))\n else:\n result = combine_paths(*(combine_paths(args[0], args[1]) + args[2:]))\n verbosity = kws.get('verbosity', 1)\n log.debug('(paths: %s)', ','.join(result))\n return result\n\nlanguage_map = {'c': 0, 'c++': 1, 'f77': 2, 'f90': 3}\ninv_language_map = {0: 'c', 1: 'c++', 2: 'f77', 3: 'f90'}\n\n\ndef dict_append(d, **kws):\n languages = []\n for k, v in kws.items():\n if k == 'language':\n languages.append(v)\n continue\n if k in d:\n if k in ['library_dirs', 'include_dirs',\n 'extra_compile_args', 'extra_link_args',\n 'runtime_library_dirs', 'define_macros']:\n [d[k].append(vv) for vv in v if vv not in d[k]]\n else:\n d[k].extend(v)\n else:\n d[k] = v\n if languages:\n l = inv_language_map[max([language_map.get(l, 0) for l in languages])]\n d['language'] = l\n return\n\n\ndef parseCmdLine(argv=(None,)):\n import optparse\n parser = optparse.OptionParser(\"usage: %prog [-v] [info objs]\")\n parser.add_option('-v', '--verbose', action='store_true', dest='verbose',\n default=False,\n help='be verbose and print more messages')\n\n opts, args = parser.parse_args(args=argv[1:])\n return opts, args\n\n\ndef show_all(argv=None):\n import inspect\n if argv is None:\n argv = sys.argv\n opts, args = parseCmdLine(argv)\n if opts.verbose:\n log.set_threshold(log.DEBUG)\n else:\n log.set_threshold(log.INFO)\n show_only = []\n for n in args:\n if n[-5:] != '_info':\n n = n + '_info'\n show_only.append(n)\n show_all = not show_only\n _gdict_ = globals().copy()\n for name, c in _gdict_.items():\n if not inspect.isclass(c):\n continue\n if not issubclass(c, system_info) or c is system_info:\n continue\n if not show_all:\n if name not in show_only:\n continue\n del show_only[show_only.index(name)]\n conf = c()\n conf.verbosity = 2\n r = conf.get_info()\n if show_only:\n log.info('Info classes not defined: %s', ','.join(show_only))\n\nif __name__ == \"__main__\":\n show_all()\n" ]
[ [ "numpy.distutils.compat.get_exception", "numpy.distutils.exec_command.exec_command", "numpy.distutils.misc_util.is_sequence", "numpy.distutils.misc_util.is_string", "numpy.distutils.misc_util.get_shared_lib_extension" ] ]
Obs01ete/pytorch-detection
[ "4af02e232b38fd202bb348e9bbe7373c7eba165b" ]
[ "average_precision.py" ]
[ "import numpy as np\n\n\ndef iou_point_np(box, boxes):\n \"\"\"\n Find intersection over union\n :param box: (tensor) One box [xmin, ymin, xmax, ymax], shape: [4].\n :param boxes: (tensor) Shape:[N, 4].\n :return: intersection over union. Shape: [N]\n \"\"\"\n\n A = np.maximum(box[:2], boxes[:, :2])\n B = np.minimum(box[2:], boxes[:, 2:])\n interArea = np.maximum(B[:, 0] - A[:, 0], 0) * np.maximum(B[:, 1] - A[:, 1], 0)\n boxArea = (box[2] - box[0]) * (box[3] - box[1])\n\n boxesArea = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n union = boxArea + boxesArea - interArea\n iou = interArea / union\n return iou\n\n\nclass AveragePrecision:\n \"\"\"Average precision calculation using sort-and-iterate algorithm (VOC12)\"\"\"\n\n def __init__(self, labelmap, iou_threshold_perclass):\n \"\"\"\n Ctor.\n\n :param labelmap: list of strings - class names\n :param iou_threshold_perclass: intersection over union thresholds for each class\n \"\"\"\n\n self.labelmap = labelmap\n self.num_classes = len(labelmap)\n self.iou_threshold_perclass = iou_threshold_perclass\n self.annotation_list = []\n self.detection_list = []\n\n def add_batch(self, annotations, detections):\n \"\"\"\n Accumulate detection results and annotations from one batch.\n\n :param annotations: list [N] of list [C] of numpy arrays [Q, 4], where N - batch size,\n C - number of object classes (i.e. no including background), Q - quantity of annotated objects.\n Dimension of size 4 is decoded as a bbox in fractional left-top-right-bottom (LTRB) format.\n\n :param detections: list [N] of list [C] of numpy arrays [Q, 5], where N - batch size,\n C - number of object classes (i.e. no including background), Q - quantity of detected objects.\n Dimension of size 5 is decoded as [0] - confidence, [1:5] - bbox in fractional\n left-top-right-bottom (LTRB) format.\n \"\"\"\n\n self.annotation_list.extend(annotations)\n self.detection_list.extend(detections)\n\n def calculate_mAP(self):\n \"\"\"Perform calculation of mAP and per-class APs\"\"\"\n\n AP_list = np.zeros((self.num_classes,), dtype=np.float64)\n\n for cls_idx in range(self.num_classes):\n\n true_positive_list = []\n positive_list = []\n conf_list = []\n for det, anno in zip(self.detection_list, self.annotation_list):\n\n annotation = anno[cls_idx]\n prediction = det[cls_idx]\n iou_threshold = self.iou_threshold_perclass[cls_idx]\n\n if len(prediction) == 0:\n continue\n\n matched_gt = np.zeros((len(annotation),), dtype=np.int32)\n true_positives = np.zeros((len(prediction),), dtype=np.int32)\n\n predicted_confs = prediction[:, 0]\n predicted_boxes = prediction[:, 1:]\n\n for idx, true_bbox in enumerate(annotation):\n\n iou = iou_point_np(true_bbox, predicted_boxes)\n\n # find matching\n iou_max = np.max(iou)\n if iou_max > iou_threshold:\n matched_gt[idx] = 1\n true_positives[np.argmax(iou)] = 1\n\n true_positive_list.append(true_positives)\n positive_list.append(len(annotation))\n conf_list.append(predicted_confs)\n\n # end loop over images\n\n true_positive = np.concatenate(true_positive_list, axis=0)\n positive = np.array(positive_list, dtype=np.int).sum()\n conf = np.concatenate(conf_list, axis=0)\n\n idx_sort = np.argsort(-conf)\n fn = 1 - true_positive[idx_sort]\n true_positive = np.cumsum(true_positive[idx_sort])\n false_negative = np.cumsum(fn)\n\n precision = true_positive / (true_positive + false_negative + 1e-4)\n recall = true_positive / (positive + 1e-4)\n AP_val = np.sum((recall[1:] - recall[:-1]) * precision[1:])\n AP_list[cls_idx] = AP_val\n\n pass\n\n # end for cls_idx\n\n mAP = float(AP_list.mean())\n\n return mAP, AP_list\n" ]
[ [ "numpy.sum", "numpy.cumsum", "numpy.zeros", "numpy.argsort", "numpy.argmax", "numpy.max", "numpy.maximum", "numpy.concatenate", "numpy.array", "numpy.minimum" ] ]
arpitvaghela/probml-notebooks
[ "32ecb309dd474b989fd1c6ce4ad6dab7a25bbead" ]
[ "notebooks-text-format/ae_mnist_tf.py" ]
[ "# -*- coding: utf-8 -*-\n# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.11.3\n# kernelspec:\n# display_name: Python 3\n# name: python3\n# ---\n\n# + [markdown] id=\"view-in-github\" colab_type=\"text\"\n# <a href=\"https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/ae_mnist_tf.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>\n\n# + [markdown] id=\"dBzXdWvftA6j\"\n# # Autoencoders (using MLP and CNN) for (fashion) MNIST\n#\n# Code based on \n# https://github.com/ageron/handson-ml2/blob/master/17_autoencoders_and_gans.ipynb\n\n# + id=\"qubdBtC2tdfZ\"\n\ntry:\n # # %tensorflow_version only exists in Colab.\n # %tensorflow_version 2.x\n IS_COLAB = True\nexcept Exception:\n IS_COLAB = False\n\n# TensorFlow ≥2.0 is required\nimport tensorflow as tf\nfrom tensorflow import keras\nassert tf.__version__ >= \"2.0\"\n\nif not tf.config.list_physical_devices('GPU'):\n print(\"No GPU was detected. DNNs can be very slow without a GPU.\")\n if IS_COLAB:\n print(\"Go to Runtime > Change runtime and select a GPU hardware accelerator.\")\n\n# + id=\"2gdty9kvcHaZ\"\n# Standard Python libraries\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport time\nimport numpy as np\nimport glob\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport PIL\nimport imageio\n\nfrom IPython import display\n\nimport sklearn\n\nimport seaborn as sns;\nsns.set(style=\"ticks\", color_codes=True)\n\nimport pandas as pd\npd.set_option('precision', 2) # 2 decimal places\npd.set_option('display.max_rows', 20)\npd.set_option('display.max_columns', 30)\npd.set_option('display.width', 100) # wide windows\n\n\n# + id=\"qYZcvdD5f1wD\" colab={\"base_uri\": \"https://localhost:8080/\"} outputId=\"106ba289-07d1-4f51-e757-abc7c4789b0c\"\n\n\nnp.random.seed(0)\n\nFASHION = True\n\nif FASHION:\n (train_images, train_labels), (test_images, test_labels) = keras.datasets.fashion_mnist.load_data() \n class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', \n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\nelse:\n (train_images, train_labels), (test_images, test_labels) = keras.datasets.mnist.load_data() \n class_names = [str(x) for x in range(10)]\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nprint(np.shape(train_images))\nprint(np.shape(test_images))\n#(60000, 28, 28)\n#(10000, 28, 28)\n\n# Partition training into train and valid\nX_train_full = train_images; y_train_full = train_labels\nX_test = test_images; y_test = test_labels\nX_train, X_valid = X_train_full[:-5000], X_train_full[-5000:]\ny_train, y_valid = y_train_full[:-5000], y_train_full[-5000:]\n\n\n\n\n# + id=\"i6D4X0Ht4lTP\"\ndef rounded_accuracy(y_true, y_pred):\n return keras.metrics.binary_accuracy(tf.round(y_true), tf.round(y_pred))\n\n\ndef plot_image(image):\n plt.imshow(image, cmap=\"binary\")\n plt.axis(\"off\")\n \ndef show_reconstructions(model, images=X_valid, n_images=5):\n reconstructions = model.predict(images[:n_images])\n plt.figure(figsize=(n_images * 1.5, 3))\n for image_index in range(n_images):\n plt.subplot(2, n_images, 1 + image_index)\n plot_image(images[image_index])\n plt.subplot(2, n_images, 1 + n_images + image_index)\n plot_image(reconstructions[image_index])\n \n\n\n# + id=\"vSU7jrJ34ZdP\"\n# Visualize 2d manifold from encodings using tSNE\n\nfrom sklearn.manifold import TSNE\nimport matplotlib\n\ndef plot_embeddings_tsne(X_data, y_data, encodings):\n np.random.seed(42)\n tsne = TSNE()\n X_data_2D = tsne.fit_transform(encodings)\n X_data_2D = (X_data_2D - X_data_2D.min()) / (X_data_2D.max() - X_data_2D.min())\n\n # adapted from https://scikit-learn.org/stable/auto_examples/manifold/plot_lle_digits.html\n plt.figure(figsize=(10, 8))\n cmap = plt.cm.tab10\n plt.scatter(X_data_2D[:, 0], X_data_2D[:, 1], c=y_data, s=10, cmap=cmap)\n image_positions = np.array([[1., 1.]])\n for index, position in enumerate(X_data_2D):\n dist = np.sum((position - image_positions) ** 2, axis=1)\n if np.min(dist) > 0.02: # if far enough from other images\n image_positions = np.r_[image_positions, [position]]\n imagebox = matplotlib.offsetbox.AnnotationBbox(\n matplotlib.offsetbox.OffsetImage(X_data[index], cmap=\"binary\"),\n position, bboxprops={\"edgecolor\": cmap(y_data[index]), \"lw\": 2})\n plt.gca().add_artist(imagebox)\n plt.axis(\"off\")\n\n\n# + [markdown] id=\"toI0Fds7vmy-\"\n# # Standard AE\n\n# + [markdown] id=\"NSTcnilg4C0a\"\n# ## MLP\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"y0A3b3Nu3Ged\" outputId=\"efdc68a7-b21b-4fc6-cd7b-d5a0d81e0373\"\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nstacked_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(30, activation=\"selu\"),\n])\nstacked_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\nstacked_ae = keras.models.Sequential([stacked_encoder, stacked_decoder])\nstacked_ae.compile(loss=\"binary_crossentropy\",\n optimizer=keras.optimizers.SGD(lr=1.5), metrics=[rounded_accuracy])\nhistory = stacked_ae.fit(X_train, X_train, epochs=5,\n validation_data=(X_valid, X_valid))\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 192} id=\"RoSCj6Xp368d\" outputId=\"5531b694-41de-4c86-f299-b74d183a44e5\"\nshow_reconstructions(stacked_ae)\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 483} id=\"tUR1B9yc5FAU\" outputId=\"9954411c-e65f-4e4d-b93f-1a5dec0d3808\"\nZ = stacked_encoder.predict(X_valid)\nprint(Z.shape)\nplot_embeddings_tsne(X_valid, y_valid, Z)\nplt.tight_layout()\nplt.savefig('ae-mlp-fashion-tsne.pdf')\nplt.show()\n\n# + [markdown] id=\"-HVBgom54FEP\"\n# ## CNN\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"623eyEF55yn4\" outputId=\"2d5624da-5802-4030-9e0d-0e63665f1906\"\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nconv_encoder = keras.models.Sequential([\n keras.layers.Reshape([28, 28, 1], input_shape=[28, 28]),\n keras.layers.Conv2D(16, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n keras.layers.MaxPool2D(pool_size=2),\n keras.layers.Conv2D(32, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n keras.layers.MaxPool2D(pool_size=2),\n keras.layers.Conv2D(64, kernel_size=3, padding=\"SAME\", activation=\"selu\"),\n keras.layers.MaxPool2D(pool_size=2)\n])\nconv_decoder = keras.models.Sequential([\n keras.layers.Conv2DTranspose(32, kernel_size=3, strides=2, padding=\"VALID\", activation=\"selu\",\n input_shape=[3, 3, 64]),\n keras.layers.Conv2DTranspose(16, kernel_size=3, strides=2, padding=\"SAME\", activation=\"selu\"),\n keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding=\"SAME\", activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\nconv_ae = keras.models.Sequential([conv_encoder, conv_decoder])\n\nconv_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n metrics=[rounded_accuracy])\nhistory = conv_ae.fit(X_train, X_train, epochs=5,\n validation_data=(X_valid, X_valid))\n\n# + id=\"AFsYbN5_f7jZ\" colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 192} outputId=\"a5ab0b57-2689-4044-a42e-56069afb3e93\"\n\n\nshow_reconstructions(conv_ae)\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 615} id=\"-YdrAsFk6rhF\" outputId=\"83d66902-a8a7-4162-e7d3-fdaef9fd8c0c\"\nZ = conv_encoder.predict(X_valid)\nprint(Z.shape)\nN = Z.shape[0]\nZZ = np.reshape(Z, (N,-1))\nprint(ZZ.shape)\n\n\nplot_embeddings_tsne(X_valid, y_valid, ZZ)\nplt.tight_layout()\nplt.savefig('ae-conv-fashion-tsne.pdf')\nplt.show()\n\n# + [markdown] id=\"mgockadWtpwR\"\n# # Denoising\n\n# + [markdown] id=\"h2nuyKaRvd_a\"\n# ## Gaussian noise\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 635} id=\"LfLmtfVxtrE4\" outputId=\"15799a08-1767-4fbe-9a1e-5f4814180902\"\n# Using Gaussian noise\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\ndenoising_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.GaussianNoise(0.2),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(30, activation=\"selu\")\n])\ndenoising_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\ndenoising_ae = keras.models.Sequential([denoising_encoder, denoising_decoder])\ndenoising_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n metrics=[rounded_accuracy])\nhistory = denoising_ae.fit(X_train, X_train, epochs=10,\n validation_data=(X_valid, X_valid))\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nnoise = keras.layers.GaussianNoise(0.2)\nshow_reconstructions(denoising_ae, noise(X_valid, training=True))\n#save_fig(\"ae-denoising-gaussian.pdf\")\nplt.show()\n\n# + [markdown] id=\"y6di4LfCvgGl\"\n# ## Bernoulli dropout noise\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 635} id=\"7iMx4wMLtz60\" outputId=\"d35beb75-5d38-4474-d0c1-c5ed5e0c18c0\"\n\n# Dropout version\n\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\ndropout_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dropout(0.5),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(30, activation=\"selu\")\n])\ndropout_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[30]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\ndropout_ae = keras.models.Sequential([dropout_encoder, dropout_decoder])\ndropout_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n metrics=[rounded_accuracy])\nhistory = dropout_ae.fit(X_train, X_train, epochs=10,\n validation_data=(X_valid, X_valid))\n\ntf.random.set_seed(42)\nnp.random.seed(42)\n\ndropout = keras.layers.Dropout(0.5)\nshow_reconstructions(dropout_ae, dropout(X_valid, training=True))\n#save_fig(\"ae-denoising-dropout.pdf\")\n\n# + [markdown] id=\"4fP7GNViuDAX\"\n# # Sparse\n\n# + [markdown] id=\"_eE9UKuqu-bL\"\n# ## Vanilla AE\n\n# + colab={\"base_uri\": \"https://localhost:8080/\"} id=\"2u9wKK5RuDg3\" outputId=\"2ae00005-5f20-4b51-f9fa-550303494002\"\n# Simple AE with sigmoid activations on the bottleneck\n \ntf.random.set_seed(42)\nnp.random.seed(42)\n\nNhidden = 300 # Geron uses 30 for the simple AE, 300 for the regularized ones\nsimple_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(Nhidden, activation=\"sigmoid\"),\n])\nsimple_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[Nhidden]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\nsimple_ae = keras.models.Sequential([simple_encoder, simple_decoder])\nsimple_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.),\n metrics=[rounded_accuracy])\nhistory = simple_ae.fit(X_train, X_train, epochs=10,\n validation_data=(X_valid, X_valid))\n\n\n# + id=\"CeuWR63euN7S\"\n\n# To visualize statistics of the hidden units\n\ndef plot_percent_hist(ax, data, bins):\n counts, _ = np.histogram(data, bins=bins)\n widths = bins[1:] - bins[:-1]\n x = bins[:-1] + widths / 2\n ax.bar(x, counts / len(data), width=widths*0.8)\n ax.xaxis.set_ticks(bins)\n ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(\n lambda y, position: \"{}%\".format(int(np.round(100 * y)))))\n ax.grid(True)\n\ndef plot_activations_histogram2(encoder, height=1, n_bins=10, fname_base=\"\"):\n X_valid_codings = encoder(X_valid).numpy()\n activation_means = X_valid_codings.mean(axis=0)\n mean = activation_means.mean()\n bins = np.linspace(0, 1, n_bins + 1)\n\n fig, ax1 = plt.subplots()\n plot_percent_hist(ax1, X_valid_codings.ravel(), bins)\n ax1.plot([mean, mean], [0, height], \"k--\", label=\"Overall Mean = {:.2f}\".format(mean))\n ax1.legend(loc=\"upper center\", fontsize=14)\n ax1.set_xlabel(\"Activation\")\n ax1.set_ylabel(\"% Activations\")\n ax1.axis([0, 1, 0, height])\n fname_act = '{}-act.pdf'.format(fname_base)\n #save_fig(fname_act)\n plt.show()\n \n fig, ax2 = plt.subplots()\n plot_percent_hist(ax2, activation_means, bins)\n ax2.plot([mean, mean], [0, height], \"k--\", label=\"Overall Mean = {:.2f}\".format(mean))\n ax2.set_xlabel(\"Neuron Mean Activation\")\n ax2.set_ylabel(\"% Neurons\")\n ax2.axis([0, 1, 0, height])\n fname_act = '{}-neurons.pdf'.format(fname_base)\n #save_fig(fname_act)\n plt.show()\n\ndef plot_activations_heatmap(encoder, N=100):\n X = encoder(X_valid).numpy()\n plt.figure(figsize=(10,5))\n plt.imshow(X[:N,:])\n\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 957} id=\"CmFlyGO1uQWt\" outputId=\"acfcc508-bc16-4116-edab-7d681b5aafa0\"\nshow_reconstructions(simple_ae)\nplot_activations_histogram2(simple_encoder, height=0.35, fname_base=\"ae-sparse-noreg\")\nplot_activations_heatmap(simple_encoder)\n#save_fig(\"ae-sparse-noreg-heatmap.pdf\")\nplt.show()\n\n# + [markdown] id=\"JpjsLmrGvBnl\"\n# ## L1 regularizer on activations\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} id=\"AGmqWtkaub5v\" outputId=\"4f60e5e9-f4c9-472b-9d91-361ed5c26d30\"\n# Add L1 regularizer\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nsparse_l1_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(Nhidden, activation=\"sigmoid\"),\n keras.layers.ActivityRegularization(l1=1e-3) # Alternatively, you could add\n # activity_regularizer=keras.regularizers.l1(1e-3)\n # to the previous layer.\n])\nsparse_l1_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[Nhidden]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\nsparse_l1_ae = keras.models.Sequential([sparse_l1_encoder, sparse_l1_decoder])\nsparse_l1_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n metrics=[rounded_accuracy])\nhistory = sparse_l1_ae.fit(X_train, X_train, epochs=10,\n validation_data=(X_valid, X_valid))\n\nshow_reconstructions(sparse_l1_ae)\nplot_activations_histogram2(sparse_l1_encoder, fname_base=\"ae-sparse-L1reg\")\nplot_activations_heatmap(sparse_l1_encoder)\n#save_fig(\"ae-sparse-L1reg-heatmap.pdf\")\nplt.show()\n\n# + [markdown] id=\"o_00c0wYvFZ-\"\n# ## KL regularizer on activations\n\n# + colab={\"base_uri\": \"https://localhost:8080/\", \"height\": 1000} id=\"Ytq0ls6YusWH\" outputId=\"30b2c382-22ca-4e92-cd2d-108fc443dd84\"\n# KL method\np = 0.1\nq = np.linspace(0.001, 0.999, 500)\nkl_div = p * np.log(p / q) + (1 - p) * np.log((1 - p) / (1 - q))\nmse = (p - q)**2\nmae = np.abs(p - q)\nplt.plot([p, p], [0, 0.3], \"k:\")\nplt.text(0.05, 0.32, \"Target\\nsparsity\", fontsize=14)\nplt.plot(q, kl_div, \"b-\", label=\"KL divergence\")\nplt.plot(q, mae, \"g--\", label=r\"MAE ($\\ell_1$)\")\nplt.plot(q, mse, \"r--\", linewidth=1, label=r\"MSE ($\\ell_2$)\")\nplt.legend(loc=\"upper left\", fontsize=14)\nplt.xlabel(\"Actual sparsity\")\nplt.ylabel(\"Cost\", rotation=0)\nplt.axis([0, 1, 0, 0.95])\n#save_fig(\"ae-sparse-kl-loss\")\n\nK = keras.backend\nkl_divergence = keras.losses.kullback_leibler_divergence\n\nclass KLDivergenceRegularizer(keras.regularizers.Regularizer):\n def __init__(self, weight, target=0.1):\n self.weight = weight\n self.target = target\n def __call__(self, inputs):\n mean_activities = K.mean(inputs, axis=0)\n return self.weight * (\n kl_divergence(self.target, mean_activities) +\n kl_divergence(1. - self.target, 1. - mean_activities))\n \ntf.random.set_seed(42)\nnp.random.seed(42)\n\nkld_reg = KLDivergenceRegularizer(weight=0.05, target=0.1)\nsparse_kl_encoder = keras.models.Sequential([\n keras.layers.Flatten(input_shape=[28, 28]),\n keras.layers.Dense(100, activation=\"selu\"),\n keras.layers.Dense(Nhidden, activation=\"sigmoid\", activity_regularizer=kld_reg)\n])\nsparse_kl_decoder = keras.models.Sequential([\n keras.layers.Dense(100, activation=\"selu\", input_shape=[Nhidden]),\n keras.layers.Dense(28 * 28, activation=\"sigmoid\"),\n keras.layers.Reshape([28, 28])\n])\nsparse_kl_ae = keras.models.Sequential([sparse_kl_encoder, sparse_kl_decoder])\nsparse_kl_ae.compile(loss=\"binary_crossentropy\", optimizer=keras.optimizers.SGD(lr=1.0),\n metrics=[rounded_accuracy])\nhistory = sparse_kl_ae.fit(X_train, X_train, epochs=10,\n validation_data=(X_valid, X_valid))\n\nshow_reconstructions(sparse_kl_ae)\nplot_activations_histogram2(sparse_kl_encoder, fname_base=\"ae-sparse-KLreg\")\nplot_activations_heatmap(sparse_kl_encoder)\n#save_fig(\"ae-sparse-KLreg-heatmap.pdf\")\nplt.show()\n" ]
[ [ "numpy.sum", "tensorflow.keras.layers.Flatten", "numpy.histogram", "matplotlib.pyplot.tight_layout", "tensorflow.round", "numpy.random.seed", "tensorflow.keras.datasets.fashion_mnist.load_data", "tensorflow.keras.datasets.mnist.load_data", "matplotlib.pyplot.imshow", "numpy.log", "matplotlib.pyplot.ylabel", "tensorflow.keras.optimizers.SGD", "tensorflow.keras.layers.Conv2D", "matplotlib.pyplot.plot", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "numpy.reshape", "numpy.abs", "tensorflow.keras.layers.Reshape", "matplotlib.pyplot.gca", "sklearn.manifold.TSNE", "matplotlib.pyplot.text", "tensorflow.keras.layers.Dense", "numpy.linspace", "tensorflow.keras.layers.GaussianNoise", "tensorflow.random.set_seed", "matplotlib.pyplot.scatter", "numpy.round", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Conv2DTranspose", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplots", "numpy.min", "tensorflow.keras.models.Sequential", "matplotlib.pyplot.legend", "tensorflow.keras.layers.ActivityRegularization", "matplotlib.offsetbox.OffsetImage", "pandas.set_option", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.shape", "numpy.array", "tensorflow.keras.layers.MaxPool2D", "matplotlib.pyplot.xlabel", "tensorflow.config.list_physical_devices" ] ]
brkronheim/mcmc_samplers
[ "03d8b2b9b6b6150657eabc7b509875fb16445a0e" ]
[ "Samplers/mirrorSlice.py" ]
[ "import tensorflow as tf\n\nfrom Samplers.sampler import Sampler\n\nclass MirrorSlice(Sampler):\n \"\"\"\n An implementation of the Mirror Slice Sampling MCMC sampler. This sampler\n works by picking a random direction, moving a set distance in that\n direction, then reflecting off the gradient of the distribution if it has\n exited the slice. The slice is all regions of the distribution with a\n probability greater than k, where k is between 0 and the proability of \n the starting location. A state is accepted if it is inside the slice at \n the end.\n \"\"\"\n \n def __init__(self, dimensions, log_likelihood_fn, step_size_min,\n step_size_max, mirror_steps, dtype=tf.float32):\n \"\"\"\n The constructor for the Mirror Slice sampler\n\n Parameters\n ----------\n dimensions : the number of dimensions of the sampled distribution\n log_likelihood_fn : a function accepting a state from the distribution\n and returning the natural logarithm of the probability of that\n state. This probability need not be normalized.\n step_size_min: the minimum value for the distance moved with each step\n step_size_max: the maximum value for the distance moved with each step\n mirror_steps: the number of mirror steps in one sample\n \n\n Returns\n -------\n None.\n\n \"\"\"\n self.dtype = dtype\n self.dimensions=dimensions\n self.log_likelihood_fn = log_likelihood_fn\n self.sampledVals = []\n self.acceptance=0 \n self.step_size_min = tf.cast(step_size_min, self.dtype)\n self.step_size_max = tf.cast(step_size_max, self.dtype)\n self.mirror_steps = mirror_steps\n \n\n\n @tf.function \n def run_sampler_inner(self, initial_state):\n \"\"\"\n The inner sampling step for the Mirror Slice sampler\n\n Parameters\n ----------\n initial_state : the starting point for the sampler. It should be\n structured as [[1st chain - 1st dim, 2nd chain - 1st dim , ...],\n [1st chain - 2nd dim, 2nd chain - 2nd dim , ...],\n ...]\n\n Returns\n -------\n sampledVals: the states sampled by the Metropolis-Hastings sampler\n acceptance: number of new states accepted accross all parallel\n iterations for each sampling step\n \"\"\"\n \n acceptance = 0\n \n currentState = tf.cast(initial_state, self.dtype) \n currentProb = self.log_likelihood_fn(currentState)\n \n \n #run sampler for the number of burn_in steps\n i = tf.constant(0)\n samples = self.burn_in\n condition = lambda i, currentState, currentProb: tf.less(i, samples)\n \n #Body of while loop for burn_in, no states or acceptance info kept\n def body(i, currentState, currentProb):\n currentState, currentProb, accept = self.one_step(currentState,\n currentProb)\n \n return([tf.add(i, 1), currentState, currentProb])\n\n #tf.while_loop to speed up sampling\n i, currentState, currentProb = tf.while_loop(condition, body, \n [i, currentState, \n currentProb])\n \n #tensorArray of set size to store samples\n sampledVals = tf.TensorArray(self.dtype, size=self.samples,\n dynamic_size=False)\n \n #run sampler for the number of sampling steps\n samples += self.samples\n def condition(i, currentState, currentProb, sampledVals, acceptance):\n return(tf.less(i, samples))\n \n #Body of while loop for sampling, states and acceptance info kept\n def body(i, currentState, currentProb, sampledVals, acceptance):\n \n currentState, currentProb, accept = self.one_step(currentState,\n currentProb)\n acceptance+=tf.reduce_sum(accept)\n sampledVals= sampledVals.write(i-self.burn_in, currentState)\n \n \n return([tf.add(i, 1), currentState, currentProb, sampledVals,\n acceptance])\n\n #tf.while_loop to speed up sampling\n i, currentState, currentProb, sampledVals,acceptance = \\\n tf.while_loop(condition, body, [i, currentState, currentProb,\n sampledVals, acceptance])\n \n #trun sampledVals into a normal Tensor\n sampledVals = sampledVals.stack() \n \n return(sampledVals, acceptance)\n\n \n \n def one_step(self, currentState, currentProb):\n \"\"\"\n A function which performs one step of the Mirror Slice sampler\n\n Parameters\n ----------\n currentState : the current state for all the parallel chains\n currentProb : the current probabilities of the states\n\n Returns\n -------\n updatedState : the updated state for all the parallel chains\n updatedProb : the updated probabilities of the states\n accepted : acceptance probability for each chain\n \n \"\"\"\n newState, newProb, slice_prob = self.propose_states(currentState)\n updatedState, updatedProb, accepted = self.accept_reject(currentState,\n newState, slice_prob, newProb)\n \n \n \n return(updatedState, updatedProb, accepted)\n\n \n \n def propose_states(self, currentState):\n \"\"\"\n \n\n Parameters\n ----------\n currentState : the current state of the sampler for each chain\n\n Returns\n -------\n newState : the proposed new state of the sampler for each chain\n\n \"\"\"\n i = tf.constant(0)\n self.step_size=tf.random.uniform((),self.step_size_min, self.step_size_max, dtype=self.dtype)\n currentMomentum = tf.random.normal(currentState.shape, dtype=self.dtype)\n currentProb = self.log_likelihood_fn(currentState)\n slice_prob = tf.math.log(tf.random.uniform([1], minval=0, maxval = tf.exp(currentProb), dtype=self.dtype))\n \n \n def one_mirror_step(i, currentState, currentMomentum, currentProb):\n newState = currentState + currentMomentum*self.step_size\n prob = None\n with tf.GradientTape() as g:\n g.watch(newState)\n prob = self.log_likelihood_fn(newState)\n \n grad = g.gradient(prob, newState)\n newMomentum = tf.where(prob>slice_prob, currentMomentum, currentMomentum-2*grad*tf.reduce_sum(currentMomentum*grad)/tf.reduce_sum(grad*grad))\n \n return([tf.add(i, 1), newState, newMomentum, prob])\n \n def condition(i, currentState, currentMomentum, currentProb):\n return(tf.less(i, self.mirror_steps))\n \n i, newState, currentMomentum, currentProb = tf.while_loop(condition, one_mirror_step, [i, currentState, currentMomentum, currentProb])\n \n \n return(newState, currentProb, slice_prob)\n\n \n def accept_reject(self, currentState, newState, slice_prob, new_prob):\n \"\"\"\n \n\n Parameters\n ----------\n currentState : the current state for all the parallel chains\n newState : the next proposed state for all the parallel chains\n slice_prob : lowest probability of slice\n new_prob : probabiltiy of new state\n\n Returns\n -------\n updatedState : the updated state for all the parallel chains\n updatedProb : the updated probabilities of the states\n accepted : 1 if the new state was accepted, 0 otherwise for each chain\n\n \"\"\"\n acceptCriteria = new_prob>=slice_prob\n \n accept = tf.where(acceptCriteria,1, 0)\n updatedState = tf.where(acceptCriteria, newState, currentState)\n \n return(updatedState, new_prob, accept)\n " ]
[ [ "tensorflow.add", "tensorflow.while_loop", "tensorflow.cast", "tensorflow.less", "tensorflow.TensorArray", "tensorflow.where", "tensorflow.random.normal", "tensorflow.random.uniform", "tensorflow.GradientTape", "tensorflow.exp", "tensorflow.constant", "tensorflow.reduce_sum" ] ]
mamerisawesome/glassmirror
[ "ed6147e73c049931f0118237f2ebb111d471963d" ]
[ "glassmirror/models.py" ]
[ "#!/usr/bin/env python\n# Darwin Bautista\n# HomographyNet, from https://arxiv.org/pdf/1606.03798.pdf\n\nimport os.path\n\nfrom tensorflow.keras.applications import MobileNet\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import MaxPooling2D\nfrom tensorflow.keras.layers import InputLayer\nfrom tensorflow.keras.layers import Dropout\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import Concatenate\n\ndef create_model():\n model = Sequential(name='homographynet')\n model.add(InputLayer((120, 120, 3), name='input_1'))\n\n # 4 Layers with 64 filters, then another 4 with 120 filters\n filters = 4 * [3] + 4 * [120]\n for i, f in enumerate(filters, 1):\n model.add(Conv2D(f, 3, padding='same', activation='relu', name='conv2d_{}'.format(i)))\n model.add(BatchNormalization(name='batch_normalization_{}'.format(i)))\n # MaxPooling after every 2 Conv layers except the last one\n if i % 2 == 0 and i != 8:\n model.add(MaxPooling2D(strides=(2, 2), name='max_pooling2d_{}'.format(int(i/2))))\n\n model.add(Flatten(name='flatten_1'))\n model.add(Dropout(0.5, name='dropout_1'))\n model.add(Dense(120, activation='relu', name='dense_1'))\n model.add(Dropout(0.5, name='dropout_2'))\n\n # Regression model\n model.add(Dense(8, name='dense_2'))\n\n return model" ]
[ [ "tensorflow.keras.models.Sequential", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.InputLayer", "tensorflow.keras.layers.Dense" ] ]
zhaoguangxiang/pytorch-cifar
[ "509994fd2035009c7f53192a4c497b97f6295e6e" ]
[ "models/resnet_small.py" ]
[ "'''ResNet in PyTorch.\n\nFor Pre-activation ResNet, see 'preact_resnet.py'.\n\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\nresnet same as the origin paper\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResSmall(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResSmall, self).__init__()\n self.in_planes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.linear = nn.Linear(64*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef ResSmall20():\n return ResSmall(BasicBlock, [3, 3, 3])\n\n\ndef ResSmall32():\n return ResSmall(BasicBlock, [5, 5, 5])\n\n\ndef ResSmall44():\n return ResSmall(BasicBlock, [7, 7, 7])\n\n\ndef ResSmall56():\n return ResSmall(BasicBlock, [9, 9, 9])\n\n\ndef ResSmall110():\n return ResSmall(BasicBlock, [18, 18, 18])\n\n\nclass BaseBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BaseBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n # self.shortcut = nn.Sequential()\n # if stride != 1 or in_planes != self.expansion * planes:\n # self.shortcut = nn.Sequential(\n # nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),\n # nn.BatchNorm2d(self.expansion * planes)\n # )\n\n def forward(self, x):\n # print('x size', x.size())\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n # out += self.shortcut(x)\n # out = F.relu(out)\n return out\n\n\nclass RfSmall(nn.Module):\n def __init__(self, block, num_blocks, args, num_classes=10):\n super(RfSmall, self).__init__()\n self.in_planes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.num_blocks = num_blocks\n self.layer_list = nn.ModuleList()\n self.shortcut_list = nn.ModuleList()\n self.num_big_block = len(num_blocks)\n layer1, shortcut1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n layer2, shortcut2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n layer3, shortcut3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n self.layer_list.extend([layer1, layer2, layer3])\n self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])\n self.linear = nn.Linear(64*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = nn.ModuleList()\n shortcuts = nn.ModuleList()\n for stride in strides:\n # 64*64, 64*64 ..\n # 64*128(stride=2) 128*128 ..\n # 128*256(stride=2),256*256,..\n # 256*512(stride=2) 512*512 ..\n layers.append(block(self.in_planes, planes, stride))\n shortcut = nn.Sequential()\n if stride != 1 or self.in_planes != block.expansion * planes:\n shortcut = nn.Sequential(\n nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(block.expansion * planes)\n )\n shortcuts.append(shortcut)\n self.in_planes = planes * block.expansion\n return layers, shortcuts\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n for i in range(self.num_big_block):\n for j in range(self.num_blocks[i]):\n # out = F.relu(self.bn1(self.conv1(x)))\n # out = self.bn2(self.conv2(out))\n # out += self.shortcut(x)\n # out = F.relu(out)\n # return out\n layer_i = self.layer_list[i]\n shortcut_i = self.shortcut_list[i]\n res = shortcut_i[j](out)\n out = layer_i[j](out)\n out += res\n out = F.relu(out)\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n # print('out size',out.size())\n out = self.linear(out)\n return out\n\n\ndef RfSmall56(args):\n return RfSmall(block=BaseBlock, num_blocks=[9, 9, 9], args=args)\n\n\ndef RfSmall110(args):\n return RfSmall(block=BaseBlock, num_blocks=[18, 18, 18], args=args)\n\n\nclass LmRnnSmall(nn.Module):\n # 只考虑分别设计三个rnn,然后bsz包含height 和width 的情况,层间使用残差连接。dim_type=channel,pass_hidden=0\n def __init__(self, block, num_blocks, args, num_classes=10):\n super(LmRnnSmall, self).__init__()\n self.in_planes = 16\n\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.num_blocks = num_blocks\n self.num_big_block = len(num_blocks)\n\n self.layer_list = nn.ModuleList()\n self.shortcut_list = nn.ModuleList()\n self.rnn_list = nn.ModuleList()\n self.m_out_list = nn.ModuleList()\n self.rnn_memory_size_list = []\n\n self.args = args\n self.memory_type = args.memory_type\n # self.pass_hidden = args.pass_hidden\n self.rnn_ratio = args.rnn_ratio\n # self.dim_type = args.dim_type\n self.rnn_res = args.rnn_res\n\n layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1 = self._make_layer(block, 16, num_blocks[0], stride=1)\n layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2 = self._make_layer(block, 32, num_blocks[1], stride=2)\n layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3 = self._make_layer(block, 64, num_blocks[2], stride=2)\n\n self.layer_list.extend([layer1, layer2, layer3])\n self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])\n self.rnn_list.extend([rnn1, rnn2, rnn3])\n self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])\n self.rnn_memory_size_list.extend([rnn_memory_size1,rnn_memory_size2,rnn_memory_size3])\n\n self.linear = nn.Linear(64*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = nn.ModuleList()\n shortcuts = nn.ModuleList()\n rnn_input_size = block.expansion * planes\n rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes)\n if self.memory_type == 'rnn':\n rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')\n elif self.memory_type == 'lstm':\n rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)\n elif self.memory_type == 'gru':\n rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)\n else:\n rnn = None\n if self.rnn_ratio != 1:\n m_out_linear = nn.Linear(self.rnn_memory_size, rnn_input_size)\n else:\n m_out_linear = None\n for i in range(num_blocks):\n # 对rnn来说,第一个残差连接虽然等维度,考虑到其他都是传h0,我就把当做和其他大块间残差的一样的\n stride = strides[i]\n # 16*16, 16*16 ..\n # 16*32(stride=2) 32*32 ..\n # 32*64(stride=2),64*64 ..\n layers.append(block(self.in_planes, planes, stride))\n if i == 0:\n shortcut = nn.Sequential()\n if stride != 1 or self.in_planes != block.expansion * planes:\n shortcut = nn.Sequential(\n nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(block.expansion * planes)\n )\n shortcuts.append(shortcut)\n self.in_planes = planes * block.expansion\n return layers, shortcuts, rnn, m_out_linear,rnn_memory_size\n\n def set_m_rnn(self, x, rnn_memory_size):\n origin_bsz, channel, height, width, = x.size()\n bsz = height * width * origin_bsz\n if self.memory_type in ['rnn', 'gru']:\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return hx\n if self.memory_type == 'lstm':\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return (hx, cx)\n\n def m_rnn(self, x, rnn, rnn_hidden):\n origin_bsz, channel, height, width = x.size()\n in_x = x.permute(0, 2, 3, 1).reshape(origin_bsz*height*width, channel)\n if self.memory_type in ['rnn', 'gru']:\n hx = rnn(in_x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = hx\n elif self.memory_type == 'lstm':\n hx, cx = rnn(in_x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = (hx, cx)\n return m_output, rnn_hidden\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # out size torch.Size([128, 16, 32, 32])\n for i in range(self.num_big_block):\n for j in range(self.num_blocks[i]):\n layer_i = self.layer_list[i]\n shortcut_i = self.shortcut_list[i]\n # print('layer i=0,j=0', layer_i[j])\n # print('big_block%d| layer%d| rnn%d: %s|' % (i, j, i, str(self.rnn_list[i])))\n # print('out size', out.size())\n if j == 0:\n res = shortcut_i[j](out)\n else:\n if j == 1:\n rnn_hidden = self.set_m_rnn(out, self.rnn_memory_size_list[i])\n bsz, channel, height, width = out.size()\n m_out, rnn_hidden = self.m_rnn(out, self.rnn_list[i], rnn_hidden)\n if self.m_out_list[i] is not None:\n m_out = self.m_out_list[i](m_out)\n m_out = torch.reshape(m_out, (bsz, height, width, channel)).permute((0, 3, 1, 2))\n res = m_out\n out = layer_i[j](out) # [bsz,dim,h,w]\n out += res\n out = F.relu(out)\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef LmRnnSmall56(args):\n return LmRnnSmall(block=BaseBlock, num_blocks=[9, 9, 9], args=args)\n\n\ndef LmRnnSmall110(args):\n return LmRnnSmall(block=BaseBlock, num_blocks=[18, 18, 18], args=args)\n\n\nclass LmRnnKbSmallCIFAR10(nn.Module):\n # keep batch size same as origin, 32*32*16 ,16*16*32 8*8*64 as the input_size can pass hidden or not pass hidden\n def __init__(self, block, num_blocks, args, num_classes=10):\n super(LmRnnKbSmallCIFAR10, self).__init__()\n self.in_planes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n\n self.num_blocks = num_blocks\n self.num_big_block = len(num_blocks)\n\n self.layer_list = nn.ModuleList()\n self.shortcut_list = nn.ModuleList()\n self.rnn_list = nn.ModuleList()\n self.m_out_list = nn.ModuleList()\n self.rnn_memory_size_list = []\n self.convs_list = nn.ModuleList()\n self.deconvs_list = nn.ModuleList()\n\n self.args = args\n self.memory_type = args.memory_type\n self.pass_hidden = args.pass_hidden\n # self.keep_block_residual = args.keep_block_residual\n self.rnn_ratio = args.rnn_ratio\n self.num_downs = args.num_downs\n self.down_rate = 4 ** self.num_downs\n\n layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1, convs1, deconvs1 = self._make_layer(block, 16, num_blocks[0], stride=1, fm=32)\n layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2, convs2, deconvs2 = self._make_layer(block, 32, num_blocks[1], stride=2, fm=16)\n layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3, convs3, deconvs3 = self._make_layer(block, 64, num_blocks[2], stride=2, fm=8)\n\n self.layer_list.extend([layer1, layer2, layer3])\n self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])\n self.rnn_list.extend([rnn1, rnn2, rnn3])\n self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])\n self.rnn_memory_size_list.extend([rnn_memory_size1, rnn_memory_size2, rnn_memory_size3])\n self.convs_list.extend([convs1, convs2, convs3])\n self.deconvs_list.extend([deconvs1, deconvs2, deconvs3])\n\n self.linear = nn.Linear(64*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride, fm):\n strides = [stride] + [1]*(num_blocks-1)\n layers = nn.ModuleList()\n shortcuts = nn.ModuleList()\n cur_fig_size = int(fm * fm / self.down_rate)\n rnn_input_size = block.expansion * planes * cur_fig_size\n rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size)\n if self.memory_type == 'rnn':\n rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')\n elif self.memory_type == 'lstm':\n rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)\n elif self.memory_type == 'gru':\n rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)\n else:\n rnn = None\n if self.rnn_ratio != 1:\n m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size)\n else:\n m_out_linear = None\n if self.num_downs > 0:\n convs = nn.ModuleList()\n deconvs = nn.ModuleList()\n for j in range(self.num_downs):\n convs.append(nn.Conv2d(in_channels=block.expansion*planes, out_channels=block.expansion*planes,\n kernel_size=3, stride=2, padding=1))\n deconvs.append(nn.ConvTranspose2d(block.expansion*planes, block.expansion*planes, kernel_size=3,\n stride=2, padding=1))\n else:\n convs=None\n deconvs=None\n for i in range(num_blocks):\n # 对rnn来说,第一个残差连接虽然等维度,考虑到其他都是传h0,我就把当做和其他大块间残差的一样的\n stride = strides[i]\n # 16*16, 16*16 ..\n # 16*32(stride=2) 32*32 ..\n # 32*64(stride=2),64*64 ..\n layers.append(block(self.in_planes, planes, stride))\n if i == 0:\n if not self.pass_hidden:\n shortcut = nn.Sequential()\n if stride != 1 or self.in_planes != block.expansion * planes:\n shortcut = nn.Sequential(\n nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(block.expansion * planes)\n )\n shortcuts.append(shortcut)\n else:\n # if self.keep_block_residual:\n # shortcut = nn.Sequential()\n # if stride != 1 or self.in_planes != block.expansion * planes:\n # shortcut = nn.Sequential(\n # nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride,\n # bias=False),\n # nn.BatchNorm2d(block.expansion * planes)\n # )\n # shortcuts.append(shortcut)\n memory_shortcut = nn.Sequential()\n if stride != 1 or self.in_planes != block.expansion * planes:\n memory_shortcut = nn.Sequential(nn.Linear(rnn_memory_size*2, rnn_memory_size),\n nn.BatchNorm2d(rnn_memory_size))\n shortcuts.append(memory_shortcut)\n self.in_planes = planes * block.expansion\n return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs\n\n def set_m_rnn(self, x, rnn_memory_size):\n # origin_bsz, channel, height, width, = x.size()\n # bsz = height * width * origin_bsz\n bsz = x.size()[0]\n if self.memory_type in ['rnn', 'gru']:\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return hx\n if self.memory_type == 'lstm':\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return (hx, cx)\n\n def m_rnn(self, x, cur_i, rnn_hidden):\n input_size_list = []\n rnn = self.rnn_list[cur_i]\n if self.convs_list:\n # 可能四层dim变长的deconv更合理\n convs = self.convs_list[cur_i]\n for j in range(self.num_downs):\n input_size_list.append(x.size())\n x = convs[j](x)\n bsz, channel, new_height, new_width = x.size()\n x = x.permute([0, 2, 3, 1]).reshape(bsz, int(self.rnn_memory_size_list[cur_i]/ self.args.rnn_ratio)) # bsz, new_height * new_width * channel\n if self.memory_type in ['rnn', 'gru']:\n hx = rnn(x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = hx\n elif self.memory_type == 'lstm':\n hx, cx = rnn(x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = (hx, cx)\n if self.m_out_list[cur_i] is not None:\n m_output = self.m_out_list[cur_i](m_output)\n m_output = torch.reshape(m_output, (bsz, new_height, new_height, channel,)).permute((0, 3, 1, 2))\n if self.deconvs_list:\n deconvs = self.deconvs_list[cur_i]\n for j in range(self.num_downs):\n m_output = deconvs[j](m_output, output_size=input_size_list[-j-1])\n return m_output, rnn_hidden\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # out size torch.Size([128, 16, 32, 32])\n rnn_hidden = 0 # 0 to error\n for i in range(self.num_big_block):\n for j in range(self.num_blocks[i]):\n layer_i = self.layer_list[i]\n shortcut_i = self.shortcut_list[i]\n print('layer i=0,j=0', layer_i[j])\n print('big_block%d| layer%d| rnn%d: %s|' % (i, j, i, str(self.rnn_list[i])))\n print('out size', out.size())\n if not self.pass_hidden or i == 0:\n if j == 0:\n res = shortcut_i[j](out)\n else:\n if j == 1:\n rnn_hidden = self.set_m_rnn(out, self.rnn_memory_size_list[i])\n m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)\n res = m_out\n if self.pass_hidden and i > 0:\n if j == 0:\n print('shortcut_i[j]', shortcut_i[j])\n rnn_hidden = shortcut_i[j](rnn_hidden)\n m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)\n res = m_out\n out = layer_i[j](out) # [bsz,dim,h,w]\n out += res\n out = F.relu(out)\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef LmRnnKbSmall56CIFAR10(args):\n return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[9, 9, 9], args=args)\n\n\ndef LmRnnKbSmall110CIFAR10(args):\n return LmRnnKbSmallCIFAR10(block=BaseBlock, num_blocks=[18, 18, 18], args=args)\n\n\nclass DepthTransposeCNN(nn.Module):\n def __init__(self,in_dim, out_dim, kernel_size=4, is_out=False):\n super(DepthTransposeCNN, self).__init__()\n self.nets = nn.ModuleList()\n self.is_out = is_out\n self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=in_dim,\n kernel_size=kernel_size, stride=2, padding=1, groups=in_dim),\n nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim, kernel_size=1, stride=1,\n padding=0, bias=False)])\n if not is_out:\n self.nets.extend([nn.BatchNorm2d(in_dim),\n nn.ReLU(True),\n nn.BatchNorm2d(out_dim),\n nn.ReLU(True)])\n\n def forward(self, x, output_size):\n bsz, dim, h, w = output_size\n if self.is_out:\n x = self.nets[0](x, output_size=[bsz, dim * 2, h, w])\n x = self.nets[1](x, output_size=output_size)\n else:\n x = self.nets[0](x, output_size=[bsz, dim * 2, h, w])\n x = self.nets[2](x)\n x = self.nets[3](x)\n x = self.nets[1](x, output_size=output_size)\n x = self.nets[4](x)\n x = self.nets[5](x)\n return x\n\n\nclass TransposeCNN(nn.Module):\n def __init__(self, in_dim, out_dim, kernel_size=4, is_out=False):\n super(TransposeCNN, self).__init__()\n self.nets = nn.ModuleList()\n self.is_out = is_out\n self.nets.extend([nn.ConvTranspose2d(in_channels=in_dim, out_channels=out_dim,\n kernel_size=kernel_size, stride=2, padding=1), ])\n if not is_out:\n self.nets.extend([nn.BatchNorm2d(out_dim),\n nn.ReLU(True)])\n\n def forward(self, x, output_size):\n if self.is_out:\n x = self.nets[0](x, output_size=output_size)\n else:\n x = self.nets[0](x, output_size=output_size)\n x = self.nets[1](x)\n x = self.nets[2](x)\n return x\n\n\nclass LmRnnConsistentSmallCIFAR10(nn.Module):\n # keep batch size same as origin, 32*32*16 ,16*16*32 8*8*64 as the input_size can pass hidden or not pass hidden\n def __init__(self, block, num_blocks, args, num_classes=10):\n super(LmRnnConsistentSmallCIFAR10, self).__init__()\n self.in_planes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n\n self.num_blocks = num_blocks\n self.num_big_block = len(num_blocks)\n\n self.layer_list = nn.ModuleList()\n self.shortcut_list = nn.ModuleList()\n self.rnn_list = nn.ModuleList()\n self.m_out_list = nn.ModuleList()\n self.rnn_memory_size_list = []\n self.convs_list = nn.ModuleList()\n self.deconvs_list = nn.ModuleList()\n\n self.args = args\n self.memory_type = args.memory_type\n self.rnn_ratio = args.rnn_ratio\n self.conv_activate = args.conv_activate\n self.memory_before = args.memory_before\n self.depth_separate = args.depth_separate\n self.consistent_separate_rnn = args.consistent_separate_rnn\n self.dcgan_init = args.dcgan_init\n self.dcgan_kernel= args.dcgan_kernel\n self.dcgan_share_conv = args.dcgan_share_conv\n\n layer1, shortcut1, rnn1, m_out_linear1, rnn_memory_size1, convs1, deconvs1 = self._make_layer(block, 16, num_blocks[0], stride=1, fm=32,)\n layer2, shortcut2, rnn2, m_out_linear2, rnn_memory_size2, convs2, deconvs2 = self._make_layer(block, 32, num_blocks[1], stride=2, fm=16,)\n layer3, shortcut3, rnn3, m_out_linear3, rnn_memory_size3, convs3, deconvs3 = self._make_layer(block, 64, num_blocks[2], stride=2, fm=8,)\n\n self.layer_list.extend([layer1, layer2, layer3])\n self.shortcut_list.extend([shortcut1, shortcut2, shortcut3])\n if not self.consistent_separate_rnn:\n rnn2 = rnn1\n rnn3 = rnn1\n self.rnn_list.extend([rnn1, rnn2, rnn3])\n if not self.consistent_separate_rnn:\n m_out_linear2 = m_out_linear1\n m_out_linear3 = m_out_linear1\n self.m_out_list.extend([m_out_linear1, m_out_linear2, m_out_linear3])\n self.rnn_memory_size_list.extend([rnn_memory_size1, rnn_memory_size2, rnn_memory_size3])\n if self.dcgan_share_conv:\n # 32*32*16, 16*16*32, 8*8*64, 4*4*128,2*2*256,1*1*512\n # 1*1*512, 2*2*256, 4*4*128, 8*8*64, 16*16*32, 32*32*16,\n dim_list = [512, 256, 128, 64, 32, 16]\n convs2 = convs1[1:]\n convs3 = convs1[2:]\n deconvs2 = deconvs1[:-2].append(DepthTransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel,\n is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-3], out_dim=dim_list[-2], kernel_size=self.dcgan_kernel, is_out=True))\n deconvs3 = deconvs2[:-3].append(DepthTransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel,\n is_out=True) if self.depth_separate else TransposeCNN(in_dim=dim_list[-4], out_dim=dim_list[-3], kernel_size=self.dcgan_kernel, is_out=True))\n\n self.convs_list.extend([convs1, convs2, convs3])\n self.deconvs_list.extend([deconvs1, deconvs2, deconvs3])\n if self.dcgan_init:\n self.deconvs_list.apply(self.weight_init)\n self.convs_list.apply(self.weight_init)\n\n self.linear = nn.Linear(64*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride, fm, ):\n strides = [stride] + [1]*(num_blocks-1)\n layers = nn.ModuleList()\n shortcuts = nn.ModuleList()\n down_rate = fm\n num_downs = int(np.log(fm)/np.log(2))\n cur_fig_size = int(fm * fm / down_rate)\n # build rnn\n rnn_input_size = block.expansion * planes * cur_fig_size\n rnn_memory_size = int(self.args.rnn_ratio * block.expansion * planes * cur_fig_size)\n assert rnn_memory_size == 512 * self.rnn_ratio\n if self.consistent_separate_rnn or fm ==32:\n if self.memory_type == 'rnn':\n rnn = torch.nn.RNNCell(rnn_input_size, rnn_memory_size, bias=True, nonlinearity='tanh')\n elif self.memory_type == 'lstm':\n rnn = torch.nn.LSTMCell(rnn_input_size, rnn_memory_size, bias=True)\n elif self.memory_type == 'gru':\n rnn = torch.nn.GRUCell(rnn_input_size, rnn_memory_size, bias=True)\n else:\n rnn = None\n # rnn out linear\n if self.rnn_ratio != 1:\n m_out_linear = nn.Linear(rnn_memory_size, rnn_input_size)\n else:\n m_out_linear = None\n else:\n rnn = None\n m_out_linear = None\n\n if self.conv_activate == 'lrelu':\n conv_activation = nn.LeakyReLU(True)\n elif self.conv_activate == 'relu':\n conv_activation = nn.ReLU(True)\n if num_downs > 0 or (self.dcgan_share_conv and fm != 32):\n dcgan_kernel=self.dcgan_kernel\n convs = nn.ModuleList()\n deconvs = nn.ModuleList()\n output_dim = block.expansion*planes\n for j in range(num_downs):\n output_dim = output_dim * 2\n # print('output_dim:', output_dim)\n if j == num_downs-1:\n if self.depth_separate:\n cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2),\n kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)),\n nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False))\n else:\n cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim,\n kernel_size=dcgan_kernel, stride=2, padding=1))\n else:\n if self.depth_separate:\n cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim / 2), out_channels=int(output_dim / 2),\n kernel_size=dcgan_kernel, stride=2, padding=1, groups=int(output_dim / 2)),\n nn.BatchNorm2d(int(output_dim / 2)),\n nn.ReLU(True),\n nn.Conv2d(in_channels=int(output_dim / 2), out_channels=output_dim, kernel_size=1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(output_dim),\n nn.ReLU(True))\n else:\n cur_conv = nn.Sequential(nn.Conv2d(in_channels=int(output_dim/2), out_channels=output_dim,\n kernel_size=dcgan_kernel, stride=2, padding=1),\n nn.BatchNorm2d(output_dim),\n conv_activation)\n convs.append(cur_conv)\n for j in range(num_downs):\n output_dim = int(output_dim / 2)\n # print('output_dim:',output_dim)\n if j == num_downs-1:\n is_out = True\n else:\n is_out = False\n if self.depth_separate:\n cur_deconv = DepthTransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out)\n else:\n cur_deconv = TransposeCNN(in_dim=output_dim * 2, out_dim=output_dim, kernel_size=self.dcgan_kernel, is_out=is_out)\n deconvs.append(cur_deconv)\n else:\n convs=None\n deconvs=None\n for i in range(num_blocks):\n stride = strides[i] # 16*16, 16*16 .. 16*32(stride=2) 32*32 .. 32*64(stride=2),64*64 ..\n layers.append(block(self.in_planes, planes, stride))\n if i == 0:\n shortcut = nn.Sequential()\n if stride != 1 or self.in_planes != block.expansion * planes:\n shortcut = nn.Sequential(\n nn.Conv2d(self.in_planes, block.expansion * planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(block.expansion * planes)\n )\n shortcuts.append(shortcut)\n self.in_planes = planes * block.expansion\n return layers, shortcuts, rnn, m_out_linear, rnn_memory_size, convs, deconvs\n\n def weight_init(self, m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n return m\n\n def init_rnn_state(self, x, rnn_memory_size):\n # origin_bsz, channel, height, width, = x.size()\n # bsz = height * width * origin_bsz\n bsz = x.size()[0]\n if self.memory_type in ['rnn', 'gru']:\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return hx\n if self.memory_type == 'lstm':\n hx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n cx = torch.zeros(bsz, rnn_memory_size).cuda().type_as(x)\n return (hx, cx)\n\n def m_rnn(self, x, cur_i, rnn_hidden):\n input_size_list = []\n rnn = self.rnn_list[cur_i]\n num_downs = 5 - cur_i\n if self.convs_list:\n # 5,4,3,的deconv使得dim一致\n convs = self.convs_list[cur_i]\n for j in range(num_downs):\n input_size_list.append(x.size())\n # [128, 16, 32, 32]\n # [128,32,16,16]\n # [128,64, 8, 8]\n # [128,128,4,4]\n # [128, 256, 2, 2]\n # [128, 512, 1, 1]\n x = convs[j](x)\n bsz, channel, new_height, new_width = x.size()\n # print(\"self.convs_list[cur_i]\",self.convs_list[cur_i])\n # print('after conv x size',x.size())\n x = x.permute([0, 2, 3, 1]).reshape(bsz, int(self.rnn_memory_size_list[cur_i] / self.args.rnn_ratio)) # bsz, new_height * new_width * channel\n if self.memory_type in ['rnn', 'gru']:\n hx = rnn(x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = hx\n elif self.memory_type == 'lstm':\n hx, cx = rnn(x, rnn_hidden)\n m_output = hx # bsz, self.rnn_memory_size\n rnn_hidden = (hx, cx)\n if self.m_out_list[cur_i] is not None:\n m_output = self.m_out_list[cur_i](m_output)\n m_output = torch.reshape(m_output, (bsz, new_height, new_height, channel,)).permute((0, 3, 1, 2))\n if self.deconvs_list:\n deconvs = self.deconvs_list[cur_i]\n for j in range(num_downs):\n # print('j:%d deconv_in: %s| deconv j:%s' % (j, m_output.size(),deconvs[j]))\n m_output = deconvs[j](m_output, output_size=input_size_list[-j - 1])\n return m_output, rnn_hidden\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # out size torch.Size([128, 16, 32, 32]) [128,32,16,16]\n rnn_hidden = 0 # 0 to error\n for i in range(self.num_big_block):\n for j in range(self.num_blocks[i]):\n layer_i = self.layer_list[i]\n # shortcut_i = self.shortcut_list[i]\n # print('layer i=0,j=0', layer_i[j])\n # print('big_block%d| layer%d| out size%s|' % (i, j, out.size()))\n if i == 0 and j == 0:\n rnn_hidden = self.init_rnn_state(out, self.rnn_memory_size_list[i])\n if self.memory_before:\n if j == 0:\n m_in = self.shortcut_list[i][j](out)\n else:\n m_in =out\n m_out, rnn_hidden = self.m_rnn(m_in, i, rnn_hidden)\n res = m_out\n out = layer_i[j](out) # [bsz,dim,h,w]\n out += res\n out = F.relu(out)\n else:\n out = layer_i[j](out)\n m_out, rnn_hidden = self.m_rnn(out, i, rnn_hidden)\n out += m_out\n out = F.avg_pool2d(out, 8)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef LmRnnConsistentSmall56CIFAR10(args):\n return LmRnnConsistentSmallCIFAR10(block=BaseBlock, num_blocks=[9, 9, 9], args=args)\n\n\ndef test():\n net = ResSmall20()\n y = net(torch.randn(1, 3, 32, 32))\n print(y.size())\n\n# test()\n" ]
[ [ "torch.nn.functional.avg_pool2d", "numpy.log", "torch.nn.Conv2d", "torch.nn.ModuleList", "torch.nn.ConvTranspose2d", "torch.nn.BatchNorm2d", "torch.randn", "torch.nn.init.normal_", "torch.nn.RNNCell", "torch.reshape", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.GRUCell", "torch.nn.functional.relu", "torch.nn.LSTMCell", "torch.nn.Sequential", "torch.zeros", "torch.nn.ReLU", "torch.nn.LeakyReLU" ] ]
AIasd/pymoo
[ "08705ca866367d9fab675c30ffe585c837df9654" ]
[ "pymoo/interface.py" ]
[ "\"\"\"\nThis class provide an interface for other libraries to specific modules. For example, the evolutionary operations\ncan be used easily just by calling a function and providing the lower and upper bounds of the problem.\n\n\"\"\"\nimport copy\nimport types\n\nimport numpy as np\n\nfrom pymoo.model.algorithm import filter_optimum\nfrom pymoo.model.individual import Individual\nfrom pymoo.model.population import Population\nfrom pymoo.model.problem import Problem\n\n\n# =========================================================================================================\n# A global interface for some features\n# =========================================================================================================\n\n\ndef get_problem_func(n_var, xl, xu, type_var):\n class P(Problem):\n def __init__(self) -> None:\n super().__init__(n_var=n_var, n_obj=1, n_constr=0, xl=xl, xu=xu, type_var=type_var)\n\n return P\n\n\ndef sample(sampling, n_samples, n_var, xl=0, xu=1, **kwargs):\n problem = get_problem_func(n_var, xl, xu, None)(**kwargs)\n return sampling.do(problem, n_samples, pop=None, **kwargs)\n\n\ndef crossover(crossover, a, b, c=None, xl=0, xu=1, type_var=np.double, **kwargs):\n n = a.shape[0]\n _pop = Population.merge(Population().new(\"X\", a), Population().new(\"X\", b))\n _P = np.column_stack([np.arange(n), np.arange(n) + n])\n\n if c is not None:\n _pop = Population.merge(_pop, Population().new(\"X\", c))\n _P = np.column_stack([_P, np.arange(n) + 2 * n])\n\n problem = get_problem_func(a.shape[1], xl, xu, type_var)(**kwargs)\n return crossover.do(problem, _pop, _P, **kwargs).get(\"X\")\n\n\ndef mutation(mutation, X, xl=0, xu=1, type_var=np.double, **kwargs):\n problem = get_problem_func(X.shape[1], xl, xu, type_var)(**kwargs)\n return mutation.do(problem, Population().new(\"X\", X), **kwargs).get(\"X\")\n\n\n# =========================================================================================================\n# Ask And Tell Interface\n# =========================================================================================================\n\n\ndef evaluate_to_nan(self, x, out, *args, **kwargs):\n n_points, _ = x.shape\n out[\"F\"] = None\n if self.n_constr > 0:\n out[\"G\"] = None\n\n\ndef evaluate_to_value(F, G=None):\n def eval(self, x, out, *args, **kwargs):\n n_points, _ = x.shape\n out[\"F\"] = F\n if G is not None:\n out[\"G\"] = G\n\n return eval\n\n\nclass AskAndTell:\n\n def __init__(self, algorithm, problem=None, **kwargs):\n\n if problem is not None:\n self.problem = copy.deepcopy(problem)\n else:\n self.problem = Problem(**kwargs)\n\n self.algorithm = copy.deepcopy(algorithm)\n\n def get_population(self):\n return self.algorithm.pop\n\n def set_population(self, pop):\n self.algorithm.pop = pop\n\n def get_offsprings(self):\n return self.algorithm.off\n\n def set_offsprings(self, off):\n self.algorithm.off = off\n\n def ask(self):\n\n # if the initial population has not been generated yet\n if self.get_population() is None:\n\n self.algorithm.initialize(self.problem)\n\n # deactivate the survival because no values have been set yet\n survival = self.algorithm.survival\n self.algorithm.survival = None\n\n self.problem._evaluate = types.MethodType(evaluate_to_nan, self.problem)\n self.algorithm._initialize()\n\n # activate the survival for the further runs\n self.algorithm.survival = survival\n\n return self.get_population().get(\"X\")\n\n # usually the case - create the next output\n else:\n\n # if offsprings do not exist set the pop - otherwise always offsprings\n if self.get_offsprings() is not None:\n self.set_population(Population.merge(self.get_population(), self.get_offsprings()))\n\n # execute a survival of the algorithm\n survivors = self.algorithm.survival.do(self.problem, self.get_population(),\n self.algorithm.pop_size, algorithm=self.algorithm)\n self.set_population(survivors)\n\n # execute the mating using the population\n off = self.algorithm.mating.do(self.algorithm.problem, self.get_population(),\n n_offsprings=self.algorithm.n_offsprings, algorithm=self.algorithm)\n\n # execute the fake evaluation of the individuals\n self.problem._evaluate = types.MethodType(evaluate_to_nan, self.problem)\n self.algorithm.evaluator.eval(self.problem, off, algorithm=self.algorithm)\n self.set_offsprings(off)\n\n return off.get(\"X\")\n\n def tell(self, F, G=None, X=None):\n\n # if offsprings do not exist set the pop - otherwise always offsprings\n pop_to_evaluate = self.get_offsprings() if self.get_offsprings() is not None else self.get_population()\n\n # if the user changed the design space values for whatever reason\n if X is not None:\n pop_to_evaluate.set(\"X\")\n\n # do the function evaluations\n self.problem._evaluate = types.MethodType(evaluate_to_value(F.copy(), G.copy()), self.problem)\n self.algorithm.evaluator.eval(self.problem, pop_to_evaluate, algorithm=self.algorithm)\n\n def result(self, only_optimum=True, return_values_of=\"auto\"):\n\n if return_values_of == \"auto\":\n return_values_of = [\"X\", \"F\"]\n if self.problem.n_constr > 0:\n return_values_of.append(\"CV\")\n\n if only_optimum:\n self.algorithm.finalize()\n pop, opt = self.algorithm.pop, self.algorithm.opt\n res = filter_optimum(pop.copy()) if opt is None else opt.copy()\n\n if isinstance(res, Individual):\n res = Population.create(res)\n\n else:\n res = self.algorithm.pop\n\n return res.get(*return_values_of)\n\n\n" ]
[ [ "numpy.arange" ] ]
chengyu0910/DeepFusion_IQA_V1.1
[ "7c55f7629b24df00a8c37f82e6142c3a636a667b" ]
[ "models/TrancatedIQA.py" ]
[ "\nimport scipy.io as sio\nimport numpy as np\nimport torch.nn as nn\nimport torch\nfrom models.BCNN import BCNN\n#matlab文件名\n\n\nclass IQANet_trancated(nn.Module):\n def __init__(self, matfile):\n super(IQANet_trancated, self).__init__()\n # matfile = r\"C:\\Users\\chengyu\\Desktop\\IQAloss\\Hu\\matlab_code\\net.mat\"\n dict = sio.loadmat(matfile)\n netdict = dict['net'].squeeze()\n num_layers = netdict.shape[0]\n stride = [1,2,1,2,1,2,1,1,2]\n net_convs = []\n net_fc = []\n for i in range(int(num_layers/2)):\n layer_name = netdict[i*2][0][0]\n if 'regress' not in layer_name and 'conv' in layer_name:\n if i is 0:\n in_chs = 3\n else:\n in_chs = netdict[(i-1)*2][1].shape[-1]\n out_chs = netdict[i*2][1].shape[-1]\n conv = torch.nn.Conv2d(in_channels=in_chs, out_channels=out_chs, kernel_size=(3,3), stride=(stride[i],stride[i]),padding=(1,1))\n conv.weight.data = torch.from_numpy(netdict[i*2][1]).permute(3,2,0,1).float()\n conv.bias.data = torch.from_numpy(netdict[i*2+1][1]).squeeze().float()\n net_convs.append(conv)\n net_convs.append(torch.nn.ReLU())\n elif 'regress' in layer_name:\n fc = torch.nn.Linear(netdict[i*2][1].shape[-1],1)\n fc.weight.data = torch.from_numpy(netdict[i*2][1]).squeeze(0).float()\n fc.bias.data = torch.from_numpy(netdict[i*2+1][1]).squeeze().float()\n net_fc.append(fc)\n self.add_module('net_convs', nn.Sequential(*net_convs))\n self.add_module('net_fc', nn.Sequential(*net_fc))\n # self.net_convs = torch.nn.Sequential(*net_convs)\n # self.net_fc = torch.nn.Sequential(*net_fc)\n self.net_bilinear_pool = BCNN()\n\n def forward(self, input):# Attention:input is in range (0,255)\n input = input*255\n input[:, 0, :, :] = input[:, 0, :, :] - 123.8181\n input[:, 1, :, :] = input[:, 1, :, :] - 119.8395\n input[:, 2, :, :] = input[:, 2, :, :] - 114.6756\n\n nodes_convs = [3,7,11,17]#net nodes for convsblock\n nodes_convs_name=['conv1','conv2','conv3','conv4']\n feat_and_score = dict([])\n cnt = 0\n for i in range(len(self.net_convs._modules)):\n if i is 0:\n feat = self.net_convs._modules[str(i)](input)\n else:\n feat = self.net_convs._modules[str(i)](feat)\n if i in nodes_convs:\n feat_and_score = dict(feat_and_score,**{nodes_convs_name[cnt]:feat})\n cnt += 1\n\n feat = self.net_bilinear_pool(feat)\n score = self.net_fc(feat)\n feat_and_score = dict(feat_and_score, **{'score':score})\n\n return feat_and_score\n\n\n\n\n\n\n\n" ]
[ [ "scipy.io.loadmat", "torch.nn.Linear", "torch.from_numpy", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
JanSchulz/pandas
[ "6e8ce685eb5a4bd0b39665a3a0d7ddd627ea8ed0" ]
[ "pandas/core/series.py" ]
[ "\"\"\"\nData structure for 1-dimensional cross-sectional and time series data\n\"\"\"\nfrom __future__ import division\n\n# pylint: disable=E1101,E1103\n# pylint: disable=W0703,W0622,W0613,W0201\n\nimport types\nimport warnings\n\nfrom numpy import nan, ndarray\nimport numpy as np\nimport numpy.ma as ma\n\nfrom pandas.core.common import (isnull, notnull, _is_bool_indexer,\n _default_index, _maybe_upcast,\n _asarray_tuplesafe, _infer_dtype_from_scalar,\n is_list_like, _values_from_object,\n _possibly_cast_to_datetime, _possibly_castable,\n _possibly_convert_platform, _try_sort,\n ABCSparseArray, _maybe_match_name, _coerce_to_dtype,\n _ensure_object, SettingWithCopyError,\n _maybe_box_datetimelike)\nfrom pandas.core.index import (Index, MultiIndex, InvalidIndexError,\n _ensure_index)\nfrom pandas.core.indexing import _check_bool_indexer, _maybe_convert_indices\nfrom pandas.core import generic, base\nfrom pandas.core.internals import SingleBlockManager\nfrom pandas.core.categorical import Categorical\nfrom pandas.tseries.index import DatetimeIndex\nfrom pandas.tseries.tdi import TimedeltaIndex\nfrom pandas.tseries.period import PeriodIndex, Period\nfrom pandas import compat\nfrom pandas.util.terminal import get_terminal_size\nfrom pandas.compat import zip, u, OrderedDict\n\nimport pandas.core.array as pa\nimport pandas.core.ops as ops\nfrom pandas.core.algorithms import select_n\n\nimport pandas.core.common as com\nimport pandas.core.datetools as datetools\nimport pandas.core.format as fmt\nimport pandas.core.nanops as nanops\nfrom pandas.util.decorators import Appender, cache_readonly\n\nimport pandas.lib as lib\nimport pandas.tslib as tslib\nimport pandas.index as _index\n\nfrom numpy import percentile as _quantile\nfrom pandas.core.config import get_option\n\n__all__ = ['Series']\n\n\n_shared_doc_kwargs = dict(\n axes='index',\n klass='Series',\n axes_single_arg=\"{0,'index'}\",\n inplace=\"\"\"inplace : boolean, default False\n If True, performs operation inplace and returns None.\"\"\"\n)\n\n\ndef _coerce_method(converter):\n \"\"\" install the scalar coercion methods \"\"\"\n\n def wrapper(self):\n if len(self) == 1:\n return converter(self.iloc[0])\n raise TypeError(\n \"cannot convert the series to {0}\".format(str(converter)))\n return wrapper\n\n\n#----------------------------------------------------------------------\n# Series class\n\n\nclass Series(base.IndexOpsMixin, generic.NDFrame):\n\n \"\"\"\n One-dimensional ndarray with axis labels (including time series).\n\n Labels need not be unique but must be any hashable type. The object\n supports both integer- and label-based indexing and provides a host of\n methods for performing operations involving the index. Statistical\n methods from ndarray have been overridden to automatically exclude\n missing data (currently represented as NaN)\n\n Operations between Series (+, -, /, *, **) align values based on their\n associated index values-- they need not be the same length. The result\n index will be the sorted union of the two indexes.\n\n Parameters\n ----------\n data : array-like, dict, or scalar value\n Contains data stored in Series\n index : array-like or Index (1d)\n Values must be unique and hashable, same length as data. Index\n object (or other iterable of same length as data) Will default to\n np.arange(len(data)) if not provided. If both a dict and index\n sequence are used, the index will override the keys found in the\n dict.\n dtype : numpy.dtype or None\n If None, dtype will be inferred\n copy : boolean, default False\n Copy input data\n \"\"\"\n _metadata = ['name']\n _allow_index_ops = True\n\n def __init__(self, data=None, index=None, dtype=None, name=None,\n copy=False, fastpath=False):\n\n # we are called internally, so short-circuit\n if fastpath:\n\n # data is an ndarray, index is defined\n if not isinstance(data, SingleBlockManager):\n data = SingleBlockManager(data, index, fastpath=True)\n if copy:\n data = data.copy()\n if index is None:\n index = data.index\n\n else:\n\n if index is not None:\n index = _ensure_index(index)\n\n if data is None:\n data = {}\n if dtype is not None:\n dtype = self._validate_dtype(dtype)\n\n if isinstance(data, MultiIndex):\n raise NotImplementedError\n elif isinstance(data, Index):\n # need to copy to avoid aliasing issues\n if name is None:\n name = data.name\n\n data = data._to_embed(keep_tz=True)\n copy = True\n elif isinstance(data, pa.Array):\n pass\n elif isinstance(data, Series):\n if name is None:\n name = data.name\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n data = data._data\n elif isinstance(data, dict):\n if index is None:\n if isinstance(data, OrderedDict):\n index = Index(data)\n else:\n index = Index(_try_sort(data))\n try:\n if isinstance(index, DatetimeIndex):\n # coerce back to datetime objects for lookup\n data = lib.fast_multiget(data, index.astype('O'),\n default=pa.NA)\n elif isinstance(index, PeriodIndex):\n data = [data.get(i, nan) for i in index]\n else:\n data = lib.fast_multiget(data, index.values,\n default=pa.NA)\n except TypeError:\n data = [data.get(i, nan) for i in index]\n\n elif isinstance(data, SingleBlockManager):\n if index is None:\n index = data.index\n else:\n data = data.reindex(index, copy=copy)\n elif isinstance(data, Categorical):\n if dtype is not None:\n raise ValueError(\"cannot specify a dtype with a Categorical\")\n if name is None:\n name = data.name\n elif isinstance(data, types.GeneratorType):\n data = list(data)\n elif isinstance(data, (set, frozenset)):\n raise TypeError(\"{0!r} type is unordered\"\n \"\".format(data.__class__.__name__))\n else:\n\n # handle sparse passed here (and force conversion)\n if isinstance(data, ABCSparseArray):\n data = data.to_dense()\n\n if index is None:\n if not is_list_like(data):\n data = [data]\n index = _default_index(len(data))\n\n # create/copy the manager\n if isinstance(data, SingleBlockManager):\n if dtype is not None:\n data = data.astype(dtype=dtype, raise_on_error=False)\n elif copy:\n data = data.copy()\n else:\n data = _sanitize_array(data, index, dtype, copy,\n raise_cast_failure=True)\n\n data = SingleBlockManager(data, index, fastpath=True)\n\n generic.NDFrame.__init__(self, data, fastpath=True)\n\n object.__setattr__(self, 'name', name)\n self._set_axis(0, index, fastpath=True)\n\n @classmethod\n def from_array(cls, arr, index=None, name=None, dtype=None, copy=False,\n fastpath=False):\n # return a sparse series here\n if isinstance(arr, ABCSparseArray):\n from pandas.sparse.series import SparseSeries\n cls = SparseSeries\n\n return cls(arr, index=index, name=name, dtype=dtype, copy=copy, fastpath=fastpath)\n\n @property\n def _constructor(self):\n return Series\n\n # types\n @property\n def _can_hold_na(self):\n return self._data._can_hold_na\n\n @property\n def is_time_series(self):\n return self._subtyp in ['time_series', 'sparse_time_series']\n\n _index = None\n\n def _set_axis(self, axis, labels, fastpath=False):\n \"\"\" override generic, we want to set the _typ here \"\"\"\n\n if not fastpath:\n labels = _ensure_index(labels)\n\n is_all_dates = labels.is_all_dates\n if is_all_dates:\n if not isinstance(labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n labels = DatetimeIndex(labels)\n\n # need to set here becuase we changed the index\n if fastpath:\n self._data.set_axis(axis, labels)\n self._set_subtyp(is_all_dates)\n\n object.__setattr__(self, '_index', labels)\n if not fastpath:\n self._data.set_axis(axis, labels)\n\n def _set_subtyp(self, is_all_dates):\n if is_all_dates:\n object.__setattr__(self, '_subtyp', 'time_series')\n else:\n object.__setattr__(self, '_subtyp', 'series')\n\n def _update_inplace(self, result):\n return generic.NDFrame._update_inplace(self, result)\n\n # ndarray compatibility\n @property\n def dtype(self):\n \"\"\" return the dtype object of the underlying data \"\"\"\n return self._data.dtype\n\n @property\n def dtypes(self):\n \"\"\" return the dtype object of the underlying data \"\"\"\n return self._data.dtype\n\n @property\n def ftype(self):\n \"\"\" return if the data is sparse|dense \"\"\"\n return self._data.ftype\n\n @property\n def ftypes(self):\n \"\"\" return if the data is sparse|dense \"\"\"\n return self._data.ftype\n\n @property\n def values(self):\n \"\"\"\n Return Series as ndarray\n\n Returns\n -------\n arr : numpy.ndarray\n \"\"\"\n return self._data.values\n\n def get_values(self):\n \"\"\" same as values (but handles sparseness conversions); is a view \"\"\"\n return self._data.get_values()\n\n\n # ops\n def ravel(self, order='C'):\n \"\"\"\n Return the flattened underlying data as an ndarray\n\n See also\n --------\n numpy.ndarray.ravel\n \"\"\"\n return self.values.ravel(order=order)\n\n def compress(self, condition, axis=0, out=None, **kwargs):\n \"\"\"\n Return selected slices of an array along given axis as a Series\n\n See also\n --------\n numpy.ndarray.compress\n \"\"\"\n return self[condition]\n\n def nonzero(self):\n \"\"\"\n Return the indices of the elements that are non-zero\n\n This method is equivalent to calling `numpy.nonzero` on the\n series data. For compatability with NumPy, the return value is\n the same (a tuple with an array of indices for each dimension),\n but it will always be a one-item tuple because series only have\n one dimension.\n\n Examples\n --------\n >>> s = pd.Series([0, 3, 0, 4])\n >>> s.nonzero()\n (array([1, 3]),)\n >>> s.iloc[s.nonzero()[0]]\n 1 3\n 3 4\n dtype: int64\n\n See Also\n --------\n numpy.nonzero\n \"\"\"\n return self.values.nonzero()\n\n def put(self, *args, **kwargs):\n \"\"\"\n return a ndarray with the values put\n\n See also\n --------\n numpy.ndarray.put\n \"\"\"\n self.values.put(*args, **kwargs)\n\n def __len__(self):\n \"\"\"\n return the length of the Series\n \"\"\"\n return len(self._data)\n\n def view(self, dtype=None):\n return self._constructor(self.values.view(dtype),\n index=self.index).__finalize__(self)\n\n def __array__(self, result=None):\n \"\"\"\n the array interface, return my values\n \"\"\"\n return self.get_values()\n\n def __array_wrap__(self, result, context=None):\n \"\"\"\n Gets called after a ufunc\n \"\"\"\n return self._constructor(result, index=self.index,\n copy=False).__finalize__(self)\n\n def __array_prepare__(self, result, context=None):\n \"\"\"\n Gets called prior to a ufunc\n \"\"\"\n\n # nice error message for non-ufunc types\n if context is not None and not isinstance(self.values, np.ndarray):\n obj = context[1][0]\n raise TypeError(\"{obj} with dtype {dtype} cannot perform \"\n \"the numpy op {op}\".format(obj=type(obj).__name__,\n dtype=getattr(obj,'dtype',None),\n op=context[0].__name__))\n return result\n\n # complex\n @property\n def real(self):\n return self.values.real\n\n @real.setter\n def real(self, v):\n self.values.real = v\n\n @property\n def imag(self):\n return self.values.imag\n\n @imag.setter\n def imag(self, v):\n self.values.imag = v\n\n # coercion\n __float__ = _coerce_method(float)\n __long__ = _coerce_method(int)\n __int__ = _coerce_method(int)\n\n # we are preserving name here\n def __getstate__(self):\n return dict(_data=self._data, name=self.name)\n\n def _unpickle_series_compat(self, state):\n if isinstance(state, dict):\n self._data = state['_data']\n self.name = state['name']\n self.index = self._data.index\n\n elif isinstance(state, tuple):\n\n # < 0.12 series pickle\n\n nd_state, own_state = state\n\n # recreate the ndarray\n data = np.empty(nd_state[1], dtype=nd_state[2])\n np.ndarray.__setstate__(data, nd_state)\n\n # backwards compat\n index, name = own_state[0], None\n if len(own_state) > 1:\n name = own_state[1]\n\n # recreate\n self._data = SingleBlockManager(data, index, fastpath=True)\n self._index = index\n self.name = name\n\n else:\n raise Exception(\"cannot unpickle legacy formats -> [%s]\" % state)\n\n # indexers\n @property\n def axes(self):\n return [self.index]\n\n def _ixs(self, i, axis=0):\n \"\"\"\n Return the i-th value or values in the Series by location\n\n Parameters\n ----------\n i : int, slice, or sequence of integers\n\n Returns\n -------\n value : scalar (int) or Series (slice, sequence)\n \"\"\"\n try:\n return _index.get_value_at(self.values, i)\n except IndexError:\n raise\n except:\n if isinstance(i, slice):\n indexer = self.index._convert_slice_indexer(i, typ='iloc')\n return self._get_values(indexer)\n else:\n label = self.index[i]\n if isinstance(label, Index):\n return self.take(i, axis=axis, convert=True)\n else:\n return _index.get_value_at(self, i)\n\n @property\n def _is_mixed_type(self):\n return False\n\n def _slice(self, slobj, axis=0, typ=None):\n slobj = self.index._convert_slice_indexer(slobj, typ=typ or 'getitem')\n return self._get_values(slobj)\n\n def __getitem__(self, key):\n try:\n result = self.index.get_value(self, key)\n\n if not np.isscalar(result):\n if is_list_like(result) and not isinstance(result, Series):\n\n # we need to box if we have a non-unique index here\n # otherwise have inline ndarray/lists\n if not self.index.is_unique:\n result = self._constructor(result,\n index=[key]*len(result)\n ,dtype=self.dtype).__finalize__(self)\n\n return result\n except InvalidIndexError:\n pass\n except (KeyError, ValueError):\n if isinstance(key, tuple) and isinstance(self.index, MultiIndex):\n # kludge\n pass\n elif key is Ellipsis:\n return self\n elif _is_bool_indexer(key):\n pass\n else:\n\n # we can try to coerce the indexer (or this will raise)\n new_key = self.index._convert_scalar_indexer(key)\n if type(new_key) != type(key):\n return self.__getitem__(new_key)\n raise\n\n except Exception:\n raise\n\n if com.is_iterator(key):\n key = list(key)\n\n if _is_bool_indexer(key):\n key = _check_bool_indexer(self.index, key)\n\n return self._get_with(key)\n\n def _get_with(self, key):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, typ='getitem')\n return self._get_values(indexer)\n else:\n if isinstance(key, tuple):\n try:\n return self._get_values_tuple(key)\n except:\n if len(key) == 1:\n key = key[0]\n if isinstance(key, slice):\n return self._get_values(key)\n raise\n\n # pragma: no cover\n if not isinstance(key, (list, pa.Array, Series, Index)):\n key = list(key)\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key)\n\n if key_type == 'integer':\n if self.index.is_integer() or self.index.is_floating():\n return self.reindex(key)\n else:\n return self._get_values(key)\n elif key_type == 'boolean':\n return self._get_values(key)\n else:\n try:\n # handle the dup indexing case (GH 4246)\n if isinstance(key, (list, tuple)):\n return self.ix[key]\n\n return self.reindex(key)\n except Exception:\n # [slice(0, 5, None)] will break if you convert to ndarray,\n # e.g. as requested by np.median\n # hack\n if isinstance(key[0], slice):\n return self._get_values(key)\n raise\n\n def _get_values_tuple(self, key):\n # mpl hackaround\n if any(k is None for k in key):\n return self._get_values(key)\n\n if not isinstance(self.index, MultiIndex):\n raise ValueError('Can only tuple-index with a MultiIndex')\n\n # If key is contained, would have returned by now\n indexer, new_index = self.index.get_loc_level(key)\n return self._constructor(self.values[indexer],\n index=new_index).__finalize__(self)\n\n def _get_values(self, indexer):\n try:\n return self._constructor(self._data.get_slice(indexer),\n fastpath=True).__finalize__(self)\n except Exception:\n return self.values[indexer]\n\n def __setitem__(self, key, value):\n\n def setitem(key, value):\n try:\n self._set_with_engine(key, value)\n return\n except (SettingWithCopyError):\n raise\n except (KeyError, ValueError):\n values = self.values\n if (com.is_integer(key)\n and not self.index.inferred_type == 'integer'):\n\n values[key] = value\n return\n elif key is Ellipsis:\n self[:] = value\n return\n elif _is_bool_indexer(key):\n pass\n elif com.is_timedelta64_dtype(self.dtype):\n # reassign a null value to iNaT\n if isnull(value):\n value = tslib.iNaT\n\n try:\n self.index._engine.set_value(self.values, key, value)\n return\n except (TypeError):\n pass\n\n self.loc[key] = value\n return\n\n except TypeError as e:\n if isinstance(key, tuple) and not isinstance(self.index,\n MultiIndex):\n raise ValueError(\"Can only tuple-index with a MultiIndex\")\n\n # python 3 type errors should be raised\n if 'unorderable' in str(e): # pragma: no cover\n raise IndexError(key)\n\n if _is_bool_indexer(key):\n key = _check_bool_indexer(self.index, key)\n try:\n self.where(~key, value, inplace=True)\n return\n except (InvalidIndexError):\n pass\n\n self._set_with(key, value)\n\n # do the setitem\n cacher_needs_updating = self._check_is_chained_assignment_possible()\n setitem(key, value)\n if cacher_needs_updating:\n self._maybe_update_cacher()\n\n def _set_with_engine(self, key, value):\n values = self.values\n try:\n self.index._engine.set_value(values, key, value)\n return\n except KeyError:\n values[self.index.get_loc(key)] = value\n return\n\n def _set_with(self, key, value):\n # other: fancy integer or otherwise\n if isinstance(key, slice):\n indexer = self.index._convert_slice_indexer(key, typ='getitem')\n return self._set_values(indexer, value)\n else:\n if isinstance(key, tuple):\n try:\n self._set_values(key, value)\n except Exception:\n pass\n\n if not isinstance(key, (list, Series, pa.Array, Series)):\n try:\n key = list(key)\n except:\n key = [ key ]\n\n if isinstance(key, Index):\n key_type = key.inferred_type\n else:\n key_type = lib.infer_dtype(key)\n\n if key_type == 'integer':\n if self.index.inferred_type == 'integer':\n self._set_labels(key, value)\n else:\n return self._set_values(key, value)\n elif key_type == 'boolean':\n self._set_values(key.astype(np.bool_), value)\n else:\n self._set_labels(key, value)\n\n def _set_labels(self, key, value):\n if isinstance(key, Index):\n key = key.values\n else:\n key = _asarray_tuplesafe(key)\n indexer = self.index.get_indexer(key)\n mask = indexer == -1\n if mask.any():\n raise ValueError('%s not contained in the index'\n % str(key[mask]))\n self._set_values(indexer, value)\n\n def _set_values(self, key, value):\n if isinstance(key, Series):\n key = key.values\n self._data = self._data.setitem(indexer=key, value=value)\n self._maybe_update_cacher()\n\n # help out SparseSeries\n _get_val_at = ndarray.__getitem__\n\n def repeat(self, reps):\n \"\"\"\n return a new Series with the values repeated reps times\n\n See also\n --------\n numpy.ndarray.repeat\n \"\"\"\n new_index = self.index.repeat(reps)\n new_values = self.values.repeat(reps)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def reshape(self, *args, **kwargs):\n \"\"\"\n return an ndarray with the values shape\n if the specified shape matches exactly the current shape, then\n return self (for compat)\n\n See also\n --------\n numpy.ndarray.take\n \"\"\"\n if len(args) == 1 and hasattr(args[0], '__iter__'):\n shape = args[0]\n else:\n shape = args\n\n if tuple(shape) == self.shape:\n # XXX ignoring the \"order\" keyword.\n return self\n\n return self.values.reshape(shape, **kwargs)\n\n iget_value = _ixs\n iget = _ixs\n irow = _ixs\n\n def get_value(self, label, takeable=False):\n \"\"\"\n Quickly retrieve single value at passed index label\n\n Parameters\n ----------\n index : label\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n value : scalar value\n \"\"\"\n if takeable is True:\n return _maybe_box_datetimelike(self.values[label])\n return self.index.get_value(self.values, label)\n\n def set_value(self, label, value, takeable=False):\n \"\"\"\n Quickly set single value at passed label. If label is not contained, a\n new object is created with the label placed at the end of the result\n index\n\n Parameters\n ----------\n label : object\n Partial indexing with MultiIndex not allowed\n value : object\n Scalar value\n takeable : interpret the index as indexers, default False\n\n Returns\n -------\n series : Series\n If label is contained, will be reference to calling Series,\n otherwise a new object\n \"\"\"\n try:\n if takeable:\n self.values[label] = value\n else:\n self.index._engine.set_value(self.values, label, value)\n return self\n except KeyError:\n\n # set using a non-recursive method\n self.loc[label] = value\n return self\n\n def reset_index(self, level=None, drop=False, name=None, inplace=False):\n \"\"\"\n Analogous to the :meth:`pandas.DataFrame.reset_index` function, see\n docstring there.\n\n Parameters\n ----------\n level : int, str, tuple, or list, default None\n Only remove the given levels from the index. Removes all levels by\n default\n drop : boolean, default False\n Do not try to insert index into dataframe columns\n name : object, default None\n The name of the column corresponding to the Series values\n inplace : boolean, default False\n Modify the Series in place (do not create a new object)\n\n Returns\n ----------\n resetted : DataFrame, or Series if drop == True\n \"\"\"\n if drop:\n new_index = pa.arange(len(self))\n if level is not None and isinstance(self.index, MultiIndex):\n if not isinstance(level, (tuple, list)):\n level = [level]\n level = [self.index._get_level_number(lev) for lev in level]\n if len(level) < len(self.index.levels):\n new_index = self.index.droplevel(level)\n\n if inplace:\n self.index = new_index\n # set name if it was passed, otherwise, keep the previous name\n self.name = name or self.name\n else:\n return self._constructor(self.values.copy(),\n index=new_index).__finalize__(self)\n elif inplace:\n raise TypeError('Cannot reset_index inplace on a Series '\n 'to create a DataFrame')\n else:\n df = self.to_frame(name)\n return df.reset_index(level=level, drop=drop)\n\n def __unicode__(self):\n \"\"\"\n Return a string representation for a particular DataFrame\n\n Invoked by unicode(df) in py2 only. Yields a Unicode String in both\n py2/py3.\n \"\"\"\n width, height = get_terminal_size()\n max_rows = (height if get_option(\"display.max_rows\") == 0\n else get_option(\"display.max_rows\"))\n if max_rows and len(self.index) > max_rows:\n result = self._tidy_repr(min(30, max_rows - 4))\n elif len(self.index) > 0:\n result = self._get_repr(print_header=True,\n length=len(self) > 50,\n name=True,\n dtype=True)\n elif self.name is None:\n result = u('Series([], dtype: %s)') % (self.dtype)\n else:\n result = u('Series([], name: %s, dtype: %s)') % (self.name,\n self.dtype)\n return result\n\n def _tidy_repr(self, max_vals=20):\n \"\"\"\n\n Internal function, should always return unicode string\n \"\"\"\n if max_vals > 1:\n num = max_vals // 2\n else:\n num = 1\n max_vals = 2\n head = self.iloc[:num]._get_repr(print_header=True, length=False,\n dtype=False, name=False)\n tail = self.iloc[-(max_vals - num):]._get_repr(print_header=False,\n length=False,\n name=False,\n dtype=False)\n result = head + '\\n...\\n' + tail\n result = '%s\\n%s' % (result, self._repr_footer())\n\n return compat.text_type(result)\n\n def _repr_footer(self):\n\n namestr = u(\"Name: %s, \") % com.pprint_thing(\n self.name) if self.name is not None else \"\"\n\n # time series\n if self.is_time_series:\n if self.index.freq is not None:\n freqstr = u('Freq: %s, ') % self.index.freqstr\n else:\n freqstr = u('')\n\n return u('%s%sLength: %d') % (freqstr, namestr, len(self))\n\n # Categorical\n if com.is_categorical_dtype(self.dtype):\n level_info = self.values._repr_categories_info()\n return u('%sLength: %d, dtype: %s\\n%s') % (namestr,\n len(self),\n str(self.dtype.name),\n level_info)\n\n # reg series\n return u('%sLength: %d, dtype: %s') % (namestr,\n len(self),\n str(self.dtype.name))\n\n def to_string(self, buf=None, na_rep='NaN', float_format=None,\n length=False, dtype=False, name=False):\n \"\"\"\n Render a string representation of the Series\n\n Parameters\n ----------\n buf : StringIO-like, optional\n buffer to write to\n na_rep : string, optional\n string representation of NAN to use, default 'NaN'\n float_format : one-parameter function, optional\n formatter function to apply to columns' elements if they are floats\n default None\n length : boolean, default False\n Add the Series length\n dtype : boolean, default False\n Add the Series dtype\n name : boolean, default False\n Add the Series name (which may be None)\n\n Returns\n -------\n formatted : string (if not buffer passed)\n \"\"\"\n\n the_repr = self._get_repr(float_format=float_format, na_rep=na_rep,\n length=length, dtype=dtype, name=name)\n\n # catch contract violations\n if not isinstance(the_repr, compat.text_type):\n raise AssertionError(\"result must be of type unicode, type\"\n \" of result is {0!r}\"\n \"\".format(the_repr.__class__.__name__))\n\n if buf is None:\n return the_repr\n else:\n try:\n buf.write(the_repr)\n except AttributeError:\n with open(buf, 'w') as f:\n f.write(the_repr)\n\n def _get_repr(\n self, name=False, print_header=False, length=True, dtype=True,\n na_rep='NaN', float_format=None):\n \"\"\"\n\n Internal function, should always return unicode string\n \"\"\"\n\n formatter = fmt.SeriesFormatter(self, name=name, header=print_header,\n length=length, dtype=dtype,\n na_rep=na_rep,\n float_format=float_format)\n result = formatter.to_string()\n\n # TODO: following check prob. not neces.\n if not isinstance(result, compat.text_type):\n raise AssertionError(\"result must be of type unicode, type\"\n \" of result is {0!r}\"\n \"\".format(result.__class__.__name__))\n return result\n\n def __iter__(self):\n if com.is_categorical_dtype(self.dtype):\n return iter(self.values)\n elif np.issubdtype(self.dtype, np.datetime64):\n return (lib.Timestamp(x) for x in self.values)\n elif np.issubdtype(self.dtype, np.timedelta64):\n return (lib.Timedelta(x) for x in self.values)\n else:\n return iter(self.values)\n\n def iteritems(self):\n \"\"\"\n Lazily iterate over (index, value) tuples\n \"\"\"\n return zip(iter(self.index), iter(self))\n\n if compat.PY3: # pragma: no cover\n items = iteritems\n\n #----------------------------------------------------------------------\n # Misc public methods\n\n def keys(self):\n \"Alias for index\"\n return self.index\n\n def tolist(self):\n \"\"\" Convert Series to a nested list \"\"\"\n return list(self)\n\n def to_dict(self):\n \"\"\"\n Convert Series to {label -> value} dict\n\n Returns\n -------\n value_dict : dict\n \"\"\"\n return dict(compat.iteritems(self))\n\n def to_frame(self, name=None):\n \"\"\"\n Convert Series to DataFrame\n\n Parameters\n ----------\n name : object, default None\n The passed name should substitute for the series name (if it has\n one).\n\n Returns\n -------\n data_frame : DataFrame\n \"\"\"\n from pandas.core.frame import DataFrame\n if name is None:\n df = DataFrame(self)\n else:\n df = DataFrame({name: self})\n\n return df\n\n def to_sparse(self, kind='block', fill_value=None):\n \"\"\"\n Convert Series to SparseSeries\n\n Parameters\n ----------\n kind : {'block', 'integer'}\n fill_value : float, defaults to NaN (missing)\n\n Returns\n -------\n sp : SparseSeries\n \"\"\"\n from pandas.core.sparse import SparseSeries\n return SparseSeries(self, kind=kind,\n fill_value=fill_value).__finalize__(self)\n\n #----------------------------------------------------------------------\n # Statistics, overridden ndarray methods\n\n # TODO: integrate bottleneck\n\n def count(self, level=None):\n \"\"\"\n Return number of non-NA/null observations in the Series\n\n Parameters\n ----------\n level : int or level name, default None\n If the axis is a MultiIndex (hierarchical), count along a\n particular level, collapsing into a smaller Series\n\n Returns\n -------\n nobs : int or Series (if level specified)\n \"\"\"\n if level is not None:\n mask = notnull(self.values)\n\n if isinstance(level, compat.string_types):\n level = self.index._get_level_number(level)\n\n level_index = self.index.levels[level]\n\n if len(self) == 0:\n return self._constructor(0, index=level_index)\\\n .__finalize__(self)\n\n # call cython function\n max_bin = len(level_index)\n labels = com._ensure_int64(self.index.labels[level])\n counts = lib.count_level_1d(mask.view(pa.uint8),\n labels, max_bin)\n return self._constructor(counts,\n index=level_index).__finalize__(self)\n\n return notnull(_values_from_object(self)).sum()\n\n def mode(self):\n \"\"\"Returns the mode(s) of the dataset.\n\n Empty if nothing occurs at least 2 times. Always returns Series even\n if only one value.\n\n Parameters\n ----------\n sort : bool, default True\n If True, will lexicographically sort values, if False skips\n sorting. Result ordering when ``sort=False`` is not defined.\n\n Returns\n -------\n modes : Series (sorted)\n \"\"\"\n # TODO: Add option for bins like value_counts()\n from pandas.core.algorithms import mode\n return mode(self)\n\n @Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)\n def drop_duplicates(self, take_last=False, inplace=False):\n return super(Series, self).drop_duplicates(take_last=take_last,\n inplace=inplace)\n\n @Appender(base._shared_docs['duplicated'] % _shared_doc_kwargs)\n def duplicated(self, take_last=False):\n return super(Series, self).duplicated(take_last=take_last)\n\n def idxmin(self, axis=None, out=None, skipna=True):\n \"\"\"\n Index of first occurrence of minimum of values.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values\n\n Returns\n -------\n idxmin : Index of minimum of values\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``.\n\n See Also\n --------\n DataFrame.idxmin\n numpy.ndarray.argmin\n \"\"\"\n i = nanops.nanargmin(_values_from_object(self), skipna=skipna)\n if i == -1:\n return pa.NA\n return self.index[i]\n\n def idxmax(self, axis=None, out=None, skipna=True):\n \"\"\"\n Index of first occurrence of maximum of values.\n\n Parameters\n ----------\n skipna : boolean, default True\n Exclude NA/null values\n\n Returns\n -------\n idxmax : Index of maximum of values\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmax``.\n\n See Also\n --------\n DataFrame.idxmax\n numpy.ndarray.argmax\n \"\"\"\n i = nanops.nanargmax(_values_from_object(self), skipna=skipna)\n if i == -1:\n return pa.NA\n return self.index[i]\n\n # ndarray compat\n argmin = idxmin\n argmax = idxmax\n\n @Appender(pa.Array.round.__doc__)\n def round(self, decimals=0, out=None):\n \"\"\"\n\n \"\"\"\n result = _values_from_object(self).round(decimals, out=out)\n if out is None:\n result = self._constructor(result,\n index=self.index).__finalize__(self)\n\n return result\n\n def quantile(self, q=0.5):\n \"\"\"\n Return value at the given quantile, a la numpy.percentile.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute\n\n Returns\n -------\n quantile : float or Series\n if ``q`` is an array, a Series will be returned where the\n index is ``q`` and the values are the quantiles.\n\n Examples\n --------\n\n >>> s = Series([1, 2, 3, 4])\n >>> s.quantile(.5)\n 2.5\n >>> s.quantile([.25, .5, .75])\n 0.25 1.75\n 0.50 2.50\n 0.75 3.25\n dtype: float64\n \"\"\"\n valid = self.dropna()\n\n def multi(values, qs):\n if com.is_list_like(qs):\n return Series([_quantile(values, x*100)\n for x in qs], index=qs)\n else:\n return _quantile(values, qs*100)\n\n return self._maybe_box(lambda values: multi(values, q), dropna=True)\n\n def ptp(self, axis=None, out=None):\n return _values_from_object(self).ptp(axis, out)\n\n def corr(self, other, method='pearson',\n min_periods=None):\n \"\"\"\n Compute correlation with `other` Series, excluding missing values\n\n Parameters\n ----------\n other : Series\n method : {'pearson', 'kendall', 'spearman'}\n * pearson : standard correlation coefficient\n * kendall : Kendall Tau correlation coefficient\n * spearman : Spearman rank correlation\n min_periods : int, optional\n Minimum number of observations needed to have a valid result\n\n\n Returns\n -------\n correlation : float\n \"\"\"\n this, other = self.align(other, join='inner', copy=False)\n if len(this) == 0:\n return pa.NA\n return nanops.nancorr(this.values, other.values, method=method,\n min_periods=min_periods)\n\n def cov(self, other, min_periods=None):\n \"\"\"\n Compute covariance with Series, excluding missing values\n\n Parameters\n ----------\n other : Series\n min_periods : int, optional\n Minimum number of observations needed to have a valid result\n\n Returns\n -------\n covariance : float\n\n Normalized by N-1 (unbiased estimator).\n \"\"\"\n this, other = self.align(other, join='inner', copy=False)\n if len(this) == 0:\n return pa.NA\n return nanops.nancov(this.values, other.values,\n min_periods=min_periods)\n\n def diff(self, periods=1):\n \"\"\"\n 1st discrete difference of object\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming difference\n\n Returns\n -------\n diffed : Series\n \"\"\"\n result = com.diff(_values_from_object(self), periods)\n return self._constructor(result, index=self.index).__finalize__(self)\n\n def autocorr(self):\n \"\"\"\n Lag-1 autocorrelation\n\n Returns\n -------\n autocorr : float\n \"\"\"\n return self.corr(self.shift(1))\n\n def dot(self, other):\n \"\"\"\n Matrix multiplication with DataFrame or inner-product with Series\n objects\n\n Parameters\n ----------\n other : Series or DataFrame\n\n Returns\n -------\n dot_product : scalar or Series\n \"\"\"\n from pandas.core.frame import DataFrame\n if isinstance(other, (Series, DataFrame)):\n common = self.index.union(other.index)\n if (len(common) > len(self.index) or\n len(common) > len(other.index)):\n raise ValueError('matrices are not aligned')\n\n left = self.reindex(index=common, copy=False)\n right = other.reindex(index=common, copy=False)\n lvals = left.values\n rvals = right.values\n else:\n left = self\n lvals = self.values\n rvals = np.asarray(other)\n if lvals.shape[0] != rvals.shape[0]:\n raise Exception('Dot product shape mismatch, %s vs %s' %\n (lvals.shape, rvals.shape))\n\n if isinstance(other, DataFrame):\n return self._constructor(np.dot(lvals, rvals),\n index=other.columns).__finalize__(self)\n elif isinstance(other, Series):\n return np.dot(lvals, rvals)\n elif isinstance(rvals, np.ndarray):\n return np.dot(lvals, rvals)\n else: # pragma: no cover\n raise TypeError('unsupported type: %s' % type(other))\n\n #------------------------------------------------------------------------------\n # Combination\n\n def append(self, to_append, verify_integrity=False):\n \"\"\"\n Concatenate two or more Series. The indexes must not overlap\n\n Parameters\n ----------\n to_append : Series or list/tuple of Series\n verify_integrity : boolean, default False\n If True, raise Exception on creating index with duplicates\n\n Returns\n -------\n appended : Series\n \"\"\"\n from pandas.tools.merge import concat\n\n if isinstance(to_append, (list, tuple)):\n to_concat = [self] + to_append\n else:\n to_concat = [self, to_append]\n return concat(to_concat, ignore_index=False,\n verify_integrity=verify_integrity)\n\n def _binop(self, other, func, level=None, fill_value=None):\n \"\"\"\n Perform generic binary operation with optional fill value\n\n Parameters\n ----------\n other : Series\n func : binary operator\n fill_value : float or object\n Value to substitute for NA/null values. If both Series are NA in a\n location, the result will be NA regardless of the passed fill value\n level : int or level name, default None\n Broadcast across a level, matching Index values on the\n passed MultiIndex level\n\n Returns\n -------\n combined : Series\n \"\"\"\n if not isinstance(other, Series):\n raise AssertionError('Other operand must be Series')\n\n new_index = self.index\n this = self\n\n if not self.index.equals(other.index):\n this, other = self.align(other, level=level, join='outer', copy=False)\n new_index = this.index\n\n this_vals = this.values\n other_vals = other.values\n\n if fill_value is not None:\n this_mask = isnull(this_vals)\n other_mask = isnull(other_vals)\n this_vals = this_vals.copy()\n other_vals = other_vals.copy()\n\n # one but not both\n mask = this_mask ^ other_mask\n this_vals[this_mask & mask] = fill_value\n other_vals[other_mask & mask] = fill_value\n\n result = func(this_vals, other_vals)\n name = _maybe_match_name(self, other)\n return self._constructor(result, index=new_index).__finalize__(self)\n\n def combine(self, other, func, fill_value=nan):\n \"\"\"\n Perform elementwise binary operation on two Series using given function\n with optional fill value when an index is missing from one Series or\n the other\n\n Parameters\n ----------\n other : Series or scalar value\n func : function\n fill_value : scalar value\n\n Returns\n -------\n result : Series\n \"\"\"\n if isinstance(other, Series):\n new_index = self.index.union(other.index)\n new_name = _maybe_match_name(self, other)\n new_values = pa.empty(len(new_index), dtype=self.dtype)\n for i, idx in enumerate(new_index):\n lv = self.get(idx, fill_value)\n rv = other.get(idx, fill_value)\n new_values[i] = func(lv, rv)\n else:\n new_index = self.index\n new_values = func(self.values, other)\n new_name = self.name\n return self._constructor(new_values, index=new_index, name=new_name)\n\n def combine_first(self, other):\n \"\"\"\n Combine Series values, choosing the calling Series's values\n first. Result index will be the union of the two indexes\n\n Parameters\n ----------\n other : Series\n\n Returns\n -------\n y : Series\n \"\"\"\n new_index = self.index.union(other.index)\n this = self.reindex(new_index, copy=False)\n other = other.reindex(new_index, copy=False)\n name = _maybe_match_name(self, other)\n rs_vals = com._where_compat(isnull(this), other.values, this.values)\n return self._constructor(rs_vals, index=new_index).__finalize__(self)\n\n def update(self, other):\n \"\"\"\n Modify Series in place using non-NA values from passed\n Series. Aligns on index\n\n Parameters\n ----------\n other : Series\n \"\"\"\n other = other.reindex_like(self)\n mask = notnull(other)\n\n self._data = self._data.putmask(mask=mask, new=other, inplace=True)\n self._maybe_update_cacher()\n\n #----------------------------------------------------------------------\n # Reindexing, sorting\n\n def sort_index(self, ascending=True):\n \"\"\"\n Sort object by labels (along an axis)\n\n Parameters\n ----------\n ascending : boolean or list, default True\n Sort ascending vs. descending. Specify list for multiple sort\n orders\n\n Examples\n --------\n >>> result1 = s.sort_index(ascending=False)\n >>> result2 = s.sort_index(ascending=[1, 0])\n\n Returns\n -------\n sorted_obj : Series\n \"\"\"\n index = self.index\n if isinstance(index, MultiIndex):\n from pandas.core.groupby import _lexsort_indexer\n indexer = _lexsort_indexer(index.labels, orders=ascending)\n indexer = com._ensure_platform_int(indexer)\n new_labels = index.take(indexer)\n else:\n new_labels, indexer = index.order(return_indexer=True,\n ascending=ascending)\n\n new_values = self.values.take(indexer)\n return self._constructor(new_values,\n index=new_labels).__finalize__(self)\n\n def argsort(self, axis=0, kind='quicksort', order=None):\n \"\"\"\n Overrides ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values\n\n Parameters\n ----------\n axis : int (can only be zero)\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n order : ignored\n\n Returns\n -------\n argsorted : Series, with -1 indicated where nan values are present\n\n See also\n --------\n numpy.ndarray.argsort\n \"\"\"\n values = self.values\n mask = isnull(values)\n\n if mask.any():\n result = Series(\n -1, index=self.index, name=self.name, dtype='int64')\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n return self._constructor(result,\n index=self.index).__finalize__(self)\n else:\n return self._constructor(\n np.argsort(values, kind=kind), index=self.index,\n dtype='int64').__finalize__(self)\n\n def rank(self, method='average', na_option='keep', ascending=True,\n pct=False):\n \"\"\"\n Compute data ranks (1 through n). Equal values are assigned a rank that\n is the average of the ranks of those values\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n na_option : {'keep'}\n keep: leave NA values where they are\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n pct : boolean, default False\n Computes percentage rank of data\n\n Returns\n -------\n ranks : Series\n \"\"\"\n from pandas.core.algorithms import rank\n ranks = rank(self.values, method=method, na_option=na_option,\n ascending=ascending, pct=pct)\n return self._constructor(ranks, index=self.index).__finalize__(self)\n\n def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):\n \"\"\"\n Sort values and index labels by value. This is an inplace sort by default.\n Series.order is the equivalent but returns a new Series.\n\n Parameters\n ----------\n axis : int (can only be zero)\n ascending : boolean, default True\n Sort ascending. Passing False sorts descending\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n inplace : boolean, default True\n Do operation in place.\n\n See Also\n --------\n Series.order\n \"\"\"\n return self.order(ascending=ascending,\n kind=kind,\n na_position=na_position,\n inplace=inplace)\n\n def order(self, na_last=None, ascending=True, kind='quicksort', na_position='last', inplace=False):\n \"\"\"\n Sorts Series object, by value, maintaining index-value link.\n This will return a new Series by default. Series.sort is the equivalent but as an inplace method.\n\n Parameters\n ----------\n na_last : boolean (optional, default=True) (DEPRECATED; use na_position)\n Put NaN's at beginning or end\n ascending : boolean, default True\n Sort ascending. Passing False sorts descending\n kind : {'mergesort', 'quicksort', 'heapsort'}, default 'quicksort'\n Choice of sorting algorithm. See np.sort for more\n information. 'mergesort' is the only stable algorithm\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n inplace : boolean, default False\n Do operation in place.\n\n Returns\n -------\n y : Series\n\n See Also\n --------\n Series.sort\n \"\"\"\n\n # GH 5856/5853\n if inplace and self._is_cached:\n raise ValueError(\"This Series is a view of some other array, to \"\n \"sort in-place you must create a copy\")\n\n if na_last is not None:\n warnings.warn((\"na_last is deprecated. Please use na_position instead\"),\n FutureWarning)\n na_position = 'last' if na_last else 'first'\n\n def _try_kind_sort(arr):\n # easier to ask forgiveness than permission\n try:\n # if kind==mergesort, it can fail for object dtype\n return arr.argsort(kind=kind)\n except TypeError:\n # stable sort not available for object dtype\n # uses the argsort default quicksort\n return arr.argsort(kind='quicksort')\n\n arr = self.values\n sortedIdx = pa.empty(len(self), dtype=np.int32)\n\n bad = isnull(arr)\n\n good = ~bad\n idx = pa.arange(len(self))\n\n argsorted = _try_kind_sort(arr[good])\n\n if not ascending:\n argsorted = argsorted[::-1]\n\n if na_position == 'last':\n n = good.sum()\n sortedIdx[:n] = idx[good][argsorted]\n sortedIdx[n:] = idx[bad]\n elif na_position == 'first':\n n = bad.sum()\n sortedIdx[n:] = idx[good][argsorted]\n sortedIdx[:n] = idx[bad]\n else:\n raise ValueError('invalid na_position: {!r}'.format(na_position))\n\n result = self._constructor(arr[sortedIdx], index=self.index[sortedIdx])\n\n if inplace:\n self._update_inplace(result)\n else:\n return result.__finalize__(self)\n\n def nlargest(self, n=5, take_last=False):\n \"\"\"Return the largest `n` elements.\n\n Parameters\n ----------\n n : int\n Return this many descending sorted values\n take_last : bool\n Where there are duplicate values, take the last duplicate\n\n Returns\n -------\n top_n : Series\n The n largest values in the Series, in sorted order\n\n Notes\n -----\n Faster than ``.order(ascending=False).head(n)`` for small `n` relative\n to the size of the ``Series`` object.\n\n See Also\n --------\n Series.nsmallest\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series(np.random.randn(1e6))\n >>> s.nlargest(10) # only sorts up to the N requested\n \"\"\"\n return select_n(self, n=n, take_last=take_last, method='nlargest')\n\n def nsmallest(self, n=5, take_last=False):\n \"\"\"Return the smallest `n` elements.\n\n Parameters\n ----------\n n : int\n Return this many ascending sorted values\n take_last : bool\n Where there are duplicate values, take the last duplicate\n\n Returns\n -------\n bottom_n : Series\n The n smallest values in the Series, in sorted order\n\n Notes\n -----\n Faster than ``.order().head(n)`` for small `n` relative to\n the size of the ``Series`` object.\n\n See Also\n --------\n Series.nlargest\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> s = pd.Series(np.random.randn(1e6))\n >>> s.nsmallest(10) # only sorts up to the N requested\n \"\"\"\n return select_n(self, n=n, take_last=take_last, method='nsmallest')\n\n def sortlevel(self, level=0, ascending=True, sort_remaining=True):\n \"\"\"\n Sort Series with MultiIndex by chosen level. Data will be\n lexicographically sorted by the chosen level followed by the other\n levels (in order)\n\n Parameters\n ----------\n level : int or level name, default None\n ascending : bool, default True\n\n Returns\n -------\n sorted : Series\n \"\"\"\n if not isinstance(self.index, MultiIndex):\n raise TypeError('can only sort by level with a hierarchical index')\n\n new_index, indexer = self.index.sortlevel(level, ascending=ascending,\n sort_remaining=sort_remaining)\n new_values = self.values.take(indexer)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def swaplevel(self, i, j, copy=True):\n \"\"\"\n Swap levels i and j in a MultiIndex\n\n Parameters\n ----------\n i, j : int, string (can be mixed)\n Level of index to be swapped. Can pass level name as string.\n\n Returns\n -------\n swapped : Series\n \"\"\"\n new_index = self.index.swaplevel(i, j)\n return self._constructor(self.values, index=new_index,\n copy=copy).__finalize__(self)\n\n def reorder_levels(self, order):\n \"\"\"\n Rearrange index levels using input order. May not drop or duplicate\n levels\n\n Parameters\n ----------\n order: list of int representing new level order.\n (reference level by number or key)\n axis: where to reorder levels\n\n Returns\n -------\n type of caller (new object)\n \"\"\"\n if not isinstance(self.index, MultiIndex): # pragma: no cover\n raise Exception('Can only reorder levels on a hierarchical axis.')\n\n result = self.copy()\n result.index = result.index.reorder_levels(order)\n return result\n\n def unstack(self, level=-1):\n \"\"\"\n Unstack, a.k.a. pivot, Series with MultiIndex to produce DataFrame.\n The level involved will automatically get sorted.\n\n Parameters\n ----------\n level : int, string, or list of these, default last level\n Level(s) to unstack, can pass level name\n\n Examples\n --------\n >>> s\n one a 1.\n one b 2.\n two a 3.\n two b 4.\n\n >>> s.unstack(level=-1)\n a b\n one 1. 2.\n two 3. 4.\n\n >>> s.unstack(level=0)\n one two\n a 1. 2.\n b 3. 4.\n\n Returns\n -------\n unstacked : DataFrame\n \"\"\"\n from pandas.core.reshape import unstack\n return unstack(self, level)\n\n #----------------------------------------------------------------------\n # function application\n\n def map(self, arg, na_action=None):\n \"\"\"\n Map values of Series using input correspondence (which can be\n a dict, Series, or function)\n\n Parameters\n ----------\n arg : function, dict, or Series\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values\n\n Examples\n --------\n >>> x\n one 1\n two 2\n three 3\n\n >>> y\n 1 foo\n 2 bar\n 3 baz\n\n >>> x.map(y)\n one foo\n two bar\n three baz\n\n Returns\n -------\n y : Series\n same index as caller\n \"\"\"\n values = self.values\n if com.is_datetime64_dtype(values.dtype):\n values = lib.map_infer(values, lib.Timestamp)\n\n if na_action == 'ignore':\n mask = isnull(values)\n\n def map_f(values, f):\n return lib.map_infer_mask(values, f, mask.view(pa.uint8))\n else:\n map_f = lib.map_infer\n\n if isinstance(arg, (dict, Series)):\n if isinstance(arg, dict):\n arg = self._constructor(arg, index=arg.keys())\n\n indexer = arg.index.get_indexer(values)\n new_values = com.take_1d(arg.values, indexer)\n return self._constructor(new_values,\n index=self.index).__finalize__(self)\n else:\n mapped = map_f(values, arg)\n return self._constructor(mapped,\n index=self.index).__finalize__(self)\n\n def apply(self, func, convert_dtype=True, args=(), **kwds):\n \"\"\"\n Invoke function on values of Series. Can be ufunc (a NumPy function\n that applies to the entire Series) or a Python function that only works\n on single values\n\n Parameters\n ----------\n func : function\n convert_dtype : boolean, default True\n Try to find better dtype for elementwise function results. If\n False, leave as dtype=object\n args : tuple\n Positional arguments to pass to function in addition to the value\n Additional keyword arguments will be passed as keywords to the function\n\n See also\n --------\n Series.map: For element-wise operations\n\n Returns\n -------\n y : Series or DataFrame if func returns a Series\n \"\"\"\n if len(self) == 0:\n return Series()\n\n if kwds or args and not isinstance(func, np.ufunc):\n f = lambda x: func(x, *args, **kwds)\n else:\n f = func\n\n if isinstance(f, np.ufunc):\n return f(self)\n\n values = _values_from_object(self)\n if com.is_datetime64_dtype(values.dtype):\n values = lib.map_infer(values, lib.Timestamp)\n\n mapped = lib.map_infer(values, f, convert=convert_dtype)\n if len(mapped) and isinstance(mapped[0], Series):\n from pandas.core.frame import DataFrame\n return DataFrame(mapped.tolist(), index=self.index)\n else:\n return self._constructor(mapped,\n index=self.index).__finalize__(self)\n\n def _reduce(self, op, axis=0, skipna=True, numeric_only=None,\n filter_type=None, name=None, **kwds):\n \"\"\"\n perform a reduction operation\n\n if we have an ndarray as a value, then simply perform the operation,\n otherwise delegate to the object\n\n \"\"\"\n delegate = self.values\n if isinstance(delegate, np.ndarray):\n return op(delegate, skipna=skipna, **kwds)\n\n return delegate._reduce(op=op, axis=axis, skipna=skipna, numeric_only=numeric_only,\n filter_type=filter_type, name=name, **kwds)\n\n def _maybe_box(self, func, dropna=False):\n \"\"\"\n evaluate a function with possible input/output conversion if we are i8\n\n Parameters\n ----------\n dropna : bool, default False\n whether to drop values if necessary\n\n \"\"\"\n if dropna:\n values = self.dropna().values\n else:\n values = self.values\n\n if com.needs_i8_conversion(self):\n boxer = com.i8_boxer(self)\n\n if len(values) == 0:\n return boxer(iNaT)\n\n values = values.view('i8')\n result = func(values)\n\n if com.is_list_like(result):\n result = result.map(boxer)\n else:\n result = boxer(result)\n\n else:\n\n # let the function return nan if appropriate\n if dropna:\n if len(values) == 0:\n return np.nan\n result = func(values)\n\n return result\n\n def _reindex_indexer(self, new_index, indexer, copy):\n if indexer is None:\n if copy:\n return self.copy()\n return self\n\n # be subclass-friendly\n new_values = com.take_1d(self.get_values(), indexer)\n return self._constructor(new_values, index=new_index)\n\n def _needs_reindex_multi(self, axes, method, level):\n \"\"\" check if we do need a multi reindex; this is for compat with\n higher dims\n \"\"\"\n return False\n\n @Appender(generic._shared_docs['rename'] % _shared_doc_kwargs)\n def rename(self, index=None, **kwargs):\n return super(Series, self).rename(index=index, **kwargs)\n\n @Appender(generic._shared_docs['reindex'] % _shared_doc_kwargs)\n def reindex(self, index=None, **kwargs):\n return super(Series, self).reindex(index=index, **kwargs)\n\n def reindex_axis(self, labels, axis=0, **kwargs):\n \"\"\" for compatibility with higher dims \"\"\"\n if axis != 0:\n raise ValueError(\"cannot reindex series on non-zero axis!\")\n return self.reindex(index=labels, **kwargs)\n\n def take(self, indices, axis=0, convert=True, is_copy=False):\n \"\"\"\n return Series corresponding to requested indices\n\n Parameters\n ----------\n indices : list / array of ints\n convert : translate negative to positive indices (default)\n\n Returns\n -------\n taken : Series\n\n See also\n --------\n numpy.ndarray.take\n \"\"\"\n # check/convert indicies here\n if convert:\n indices = _maybe_convert_indices(\n indices, len(self._get_axis(axis)))\n\n indices = com._ensure_platform_int(indices)\n new_index = self.index.take(indices)\n new_values = self.values.take(indices)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def isin(self, values):\n \"\"\"\n Return a boolean :class:`~pandas.Series` showing whether each element\n in the :class:`~pandas.Series` is exactly contained in the passed\n sequence of ``values``.\n\n Parameters\n ----------\n values : list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n ``list`` of one element.\n\n Returns\n -------\n isin : Series (bool dtype)\n\n Raises\n ------\n TypeError\n * If ``values`` is a string\n\n See Also\n --------\n pandas.DataFrame.isin\n\n Examples\n --------\n\n >>> s = pd.Series(list('abc'))\n >>> s.isin(['a', 'c', 'e'])\n 0 True\n 1 False\n 2 True\n dtype: bool\n\n Passing a single string as ``s.isin('a')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['a'])\n 0 True\n 1 False\n 2 False\n dtype: bool\n\n \"\"\"\n if not com.is_list_like(values):\n raise TypeError(\"only list-like objects are allowed to be passed\"\n \" to Series.isin(), you passed a \"\n \"{0!r}\".format(type(values).__name__))\n\n # may need i8 conversion for proper membership testing\n comps = _values_from_object(self)\n if com.is_datetime64_dtype(self):\n from pandas.tseries.tools import to_datetime\n values = Series(to_datetime(values)).values.view('i8')\n comps = comps.view('i8')\n elif com.is_timedelta64_dtype(self):\n from pandas.tseries.timedeltas import to_timedelta\n values = Series(to_timedelta(values)).values.view('i8')\n comps = comps.view('i8')\n\n value_set = set(values)\n result = lib.ismember(comps, value_set)\n return self._constructor(result, index=self.index).__finalize__(self)\n\n def between(self, left, right, inclusive=True):\n \"\"\"\n Return boolean Series equivalent to left <= series <= right. NA values\n will be treated as False\n\n Parameters\n ----------\n left : scalar\n Left boundary\n right : scalar\n Right boundary\n\n Returns\n -------\n is_between : Series\n \"\"\"\n if inclusive:\n lmask = self >= left\n rmask = self <= right\n else:\n lmask = self > left\n rmask = self < right\n\n return lmask & rmask\n\n @classmethod\n def from_csv(cls, path, sep=',', parse_dates=True, header=None,\n index_col=0, encoding=None, infer_datetime_format=False):\n \"\"\"\n Read delimited file into Series\n\n Parameters\n ----------\n path : string file path or file handle / StringIO\n sep : string, default ','\n Field delimiter\n parse_dates : boolean, default True\n Parse dates. Different default from read_table\n header : int, default 0\n Row to use at header (skip prior rows)\n index_col : int or sequence, default 0\n Column to use for index. If a sequence is given, a MultiIndex\n is used. Different default from read_table\n encoding : string, optional\n a string representing the encoding to use if the contents are\n non-ascii, for python versions prior to 3\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n\n Returns\n -------\n y : Series\n \"\"\"\n from pandas.core.frame import DataFrame\n df = DataFrame.from_csv(path, header=header, index_col=index_col,\n sep=sep, parse_dates=parse_dates,\n encoding=encoding,\n infer_datetime_format=infer_datetime_format)\n result = df.icol(0)\n result.index.name = result.name = None\n return result\n\n def to_csv(self, path, index=True, sep=\",\", na_rep='',\n float_format=None, header=False,\n index_label=None, mode='w', nanRep=None, encoding=None,\n date_format=None):\n \"\"\"\n Write Series to a comma-separated values (csv) file\n\n Parameters\n ----------\n path : string file path or file handle / StringIO. If None is provided\n the result is returned as a string.\n na_rep : string, default ''\n Missing data representation\n float_format : string, default None\n Format string for floating point numbers\n header : boolean, default False\n Write out series name\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n mode : Python write mode, default 'w'\n sep : character, default \",\"\n Field delimiter for the output file.\n encoding : string, optional\n a string representing the encoding to use if the contents are\n non-ascii, for python versions prior to 3\n date_format: string, default None\n Format string for datetime objects.\n \"\"\"\n from pandas.core.frame import DataFrame\n df = DataFrame(self)\n # result is only a string if no path provided, otherwise None\n result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep,\n float_format=float_format, header=header,\n index_label=index_label, mode=mode, nanRep=nanRep,\n encoding=encoding, date_format=date_format)\n if path is None:\n return result\n\n def dropna(self, axis=0, inplace=False, **kwargs):\n \"\"\"\n Return Series without null values\n\n Returns\n -------\n valid : Series\n inplace : boolean, default False\n Do operation in place.\n \"\"\"\n axis = self._get_axis_number(axis or 0)\n result = remove_na(self)\n if inplace:\n self._update_inplace(result)\n else:\n return result\n\n valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,\n **kwargs)\n\n def first_valid_index(self):\n \"\"\"\n Return label for first non-NA/null value\n \"\"\"\n if len(self) == 0:\n return None\n\n mask = isnull(self.values)\n i = mask.argmin()\n if mask[i]:\n return None\n else:\n return self.index[i]\n\n def last_valid_index(self):\n \"\"\"\n Return label for last non-NA/null value\n \"\"\"\n if len(self) == 0:\n return None\n\n mask = isnull(self.values[::-1])\n i = mask.argmin()\n if mask[i]:\n return None\n else:\n return self.index[len(self) - i - 1]\n\n #----------------------------------------------------------------------\n # Time series-oriented methods\n\n def asof(self, where):\n \"\"\"\n Return last good (non-NaN) value in TimeSeries if value is NaN for\n requested date.\n\n If there is no good value, NaN is returned.\n\n Parameters\n ----------\n where : date or array of dates\n\n Notes\n -----\n Dates are assumed to be sorted\n\n Returns\n -------\n value or NaN\n \"\"\"\n if isinstance(where, compat.string_types):\n where = datetools.to_datetime(where)\n\n values = self.values\n\n if not hasattr(where, '__iter__'):\n start = self.index[0]\n if isinstance(self.index, PeriodIndex):\n where = Period(where, freq=self.index.freq).ordinal\n start = start.ordinal\n\n if where < start:\n return pa.NA\n loc = self.index.searchsorted(where, side='right')\n if loc > 0:\n loc -= 1\n while isnull(values[loc]) and loc > 0:\n loc -= 1\n return values[loc]\n\n if not isinstance(where, Index):\n where = Index(where)\n\n locs = self.index.asof_locs(where, notnull(values))\n new_values = com.take_1d(values, locs)\n return self._constructor(new_values, index=where).__finalize__(self)\n\n @cache_readonly\n def str(self):\n from pandas.core.strings import StringMethods\n return StringMethods(self)\n\n def to_timestamp(self, freq=None, how='start', copy=True):\n \"\"\"\n Cast to datetimeindex of timestamps, at *beginning* of period\n\n Parameters\n ----------\n freq : string, default frequency of PeriodIndex\n Desired frequency\n how : {'s', 'e', 'start', 'end'}\n Convention for converting period to timestamp; start of period\n vs. end\n\n Returns\n -------\n ts : TimeSeries with DatetimeIndex\n \"\"\"\n new_values = self.values\n if copy:\n new_values = new_values.copy()\n\n new_index = self.index.to_timestamp(freq=freq, how=how)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n def to_period(self, freq=None, copy=True):\n \"\"\"\n Convert TimeSeries from DatetimeIndex to PeriodIndex with desired\n frequency (inferred from index if not passed)\n\n Parameters\n ----------\n freq : string, default\n\n Returns\n -------\n ts : TimeSeries with PeriodIndex\n \"\"\"\n new_values = self.values\n if copy:\n new_values = new_values.copy()\n\n new_index = self.index.to_period(freq=freq)\n return self._constructor(new_values,\n index=new_index).__finalize__(self)\n\n #------------------------------------------------------------------------------\n # Datetimelike delegation methods\n\n @cache_readonly\n def dt(self):\n from pandas.tseries.common import maybe_to_datetimelike\n try:\n return maybe_to_datetimelike(self)\n except (Exception):\n raise TypeError(\"Can only use .dt accessor with datetimelike values\")\n\n #------------------------------------------------------------------------------\n # Categorical methods\n\n @cache_readonly\n def cat(self):\n from pandas.core.categorical import CategoricalAccessor\n if not com.is_categorical_dtype(self.dtype):\n raise TypeError(\"Can only use .cat accessor with a 'category' dtype\")\n return CategoricalAccessor(self.values, self.index)\n\nSeries._setup_axes(['index'], info_axis=0, stat_axis=0,\n aliases={'rows': 0})\nSeries._add_numeric_operations()\n_INDEX_TYPES = ndarray, Index, list, tuple\n\n#------------------------------------------------------------------------------\n# Supplementary functions\n\n\ndef remove_na(series):\n \"\"\"\n Return series containing only true/non-NaN values, possibly empty.\n \"\"\"\n return series[notnull(_values_from_object(series))]\n\n\ndef _sanitize_index(data, index, copy=False):\n \"\"\" sanitize an index type to return an ndarray of the underlying, pass thru a non-Index \"\"\"\n\n if len(data) != len(index):\n raise ValueError('Length of values does not match length of '\n 'index')\n\n if isinstance(data, PeriodIndex):\n data = data.asobject\n elif isinstance(data, DatetimeIndex):\n data = data._to_embed(keep_tz=True)\n if copy:\n data = data.copy()\n elif isinstance(data, np.ndarray):\n\n # coerce datetimelike types\n if data.dtype.kind in ['M','m']:\n data = _sanitize_array(data, index, copy=copy)\n\n return data\n\ndef _sanitize_array(data, index, dtype=None, copy=False,\n raise_cast_failure=False):\n \"\"\" sanitize input data to an ndarray, copy if specified, coerce to the dtype if specified \"\"\"\n\n if dtype is not None:\n dtype = _coerce_to_dtype(dtype)\n\n if isinstance(data, ma.MaskedArray):\n mask = ma.getmaskarray(data)\n if mask.any():\n data, fill_value = _maybe_upcast(data, copy=True)\n data[mask] = fill_value\n else:\n data = data.copy()\n\n def _try_cast(arr, take_fast_path):\n\n # perf shortcut as this is the most common case\n if take_fast_path:\n if _possibly_castable(arr) and not copy and dtype is None:\n return arr\n\n try:\n arr = _possibly_cast_to_datetime(arr, dtype)\n subarr = pa.array(arr, dtype=dtype, copy=copy)\n except (ValueError, TypeError):\n if com.is_categorical_dtype(dtype):\n subarr = Categorical(arr)\n elif dtype is not None and raise_cast_failure:\n raise\n else:\n subarr = pa.array(arr, dtype=object, copy=copy)\n return subarr\n\n # GH #846\n if isinstance(data, (pa.Array, Index, Series)):\n subarr = np.array(data, copy=False)\n if dtype is not None:\n\n # possibility of nan -> garbage\n if com.is_float_dtype(data.dtype) and com.is_integer_dtype(dtype):\n if not isnull(data).any():\n subarr = _try_cast(data, True)\n elif copy:\n subarr = data.copy()\n else:\n if (com.is_datetime64_dtype(data.dtype) and\n not com.is_datetime64_dtype(dtype)):\n if dtype == object:\n ints = np.asarray(data).view('i8')\n subarr = tslib.ints_to_pydatetime(ints)\n elif raise_cast_failure:\n raise TypeError('Cannot cast datetime64 to %s' % dtype)\n else:\n subarr = _try_cast(data, True)\n elif isinstance(data, Index):\n # don't coerce Index types\n # e.g. indexes can have different conversions (so don't fast path them)\n # GH 6140\n subarr = _sanitize_index(data, index, copy=True)\n else:\n subarr = _try_cast(data, True)\n\n if copy:\n subarr = data.copy()\n\n elif isinstance(data, Categorical):\n subarr = data\n\n if copy:\n subarr = data.copy()\n return subarr\n\n elif isinstance(data, list) and len(data) > 0:\n if dtype is not None:\n try:\n subarr = _try_cast(data, False)\n except Exception:\n if raise_cast_failure: # pragma: no cover\n raise\n subarr = pa.array(data, dtype=object, copy=copy)\n subarr = lib.maybe_convert_objects(subarr)\n\n else:\n subarr = _possibly_convert_platform(data)\n\n subarr = _possibly_cast_to_datetime(subarr, dtype)\n\n else:\n subarr = _try_cast(data, False)\n\n # scalar like\n if subarr.ndim == 0:\n if isinstance(data, list): # pragma: no cover\n subarr = pa.array(data, dtype=object)\n elif index is not None:\n value = data\n\n # figure out the dtype from the value (upcast if necessary)\n if dtype is None:\n dtype, value = _infer_dtype_from_scalar(value)\n else:\n # need to possibly convert the value here\n value = _possibly_cast_to_datetime(value, dtype)\n\n subarr = pa.empty(len(index), dtype=dtype)\n subarr.fill(value)\n\n else:\n return subarr.item()\n\n # the result that we want\n elif subarr.ndim == 1:\n if index is not None:\n\n # a 1-element ndarray\n if len(subarr) != len(index) and len(subarr) == 1:\n value = subarr[0]\n subarr = pa.empty(len(index), dtype=subarr.dtype)\n subarr.fill(value)\n\n elif subarr.ndim > 1:\n if isinstance(data, pa.Array):\n raise Exception('Data must be 1-dimensional')\n else:\n subarr = _asarray_tuplesafe(data, dtype=dtype)\n\n # This is to prevent mixed-type Series getting all casted to\n # NumPy string type, e.g. NaN --> '-1#IND'.\n if issubclass(subarr.dtype.type, compat.string_types):\n subarr = pa.array(data, dtype=object, copy=copy)\n\n return subarr\n\n# backwards compatiblity\nTimeSeries = Series\n\n#----------------------------------------------------------------------\n# Add plotting methods to Series\n\nimport pandas.tools.plotting as _gfx\n\nSeries.plot = _gfx.plot_series\nSeries.hist = _gfx.hist_series\n\n# Add arithmetic!\nops.add_flex_arithmetic_methods(Series, **ops.series_flex_funcs)\nops.add_special_arithmetic_methods(Series, **ops.series_special_funcs)\n" ]
[ [ "numpy.asarray", "pandas.core.nanops.nancorr", "pandas.core.algorithms.select_n", "pandas.core.common._coerce_to_dtype", "pandas.lib.map_infer", "pandas.core.common.is_float_dtype", "pandas.core.datetools.to_datetime", "pandas.tseries.index.DatetimeIndex", "pandas.tseries.timedeltas.to_timedelta", "pandas.core.common.is_list_like", "pandas.sparse.series.SparseSeries", "pandas.core.reshape.unstack", "pandas.core.common.is_timedelta64_dtype", "pandas.tseries.period.Period", "pandas.core.categorical.CategoricalAccessor", "pandas.lib.Timedelta", "numpy.array", "numpy.dot", "pandas.core.format.SeriesFormatter", "pandas.compat.iteritems", "numpy.issubdtype", "pandas.core.algorithms.rank", "pandas.core.common.take_1d", "numpy.ndarray.__setstate__", "numpy.isscalar", "pandas.core.common.is_categorical_dtype", "pandas.util.decorators.Appender", "pandas.core.common.is_integer", "pandas.lib.infer_dtype", "pandas.core.frame.DataFrame.from_csv", "pandas.core.common.i8_boxer", "pandas.tools.merge.concat", "pandas.core.categorical.Categorical", "pandas.core.common.is_datetime64_dtype", "pandas.core.common.notnull", "pandas.tseries.tools.to_datetime", "pandas.core.common._maybe_match_name", "pandas.index.get_value_at", "pandas.core.common._ensure_int64", "pandas.core.array.array", "pandas.lib.maybe_convert_objects", "pandas.core.common.isnull", "pandas.core.common._possibly_castable", "numpy.argsort", "pandas.util.terminal.get_terminal_size", "numpy.ma.getmaskarray", "pandas.lib.Timestamp", "pandas.core.index._ensure_index", "pandas.core.common.is_integer_dtype", "pandas.compat.u", "pandas.core.ops.add_flex_arithmetic_methods", "pandas.lib.fast_multiget", "pandas.compat.text_type", "pandas.core.common._ensure_platform_int", "pandas.core.common.needs_i8_conversion", "pandas.core.generic.NDFrame._update_inplace", "pandas.core.common._asarray_tuplesafe", "pandas.lib.ismember", "pandas.tslib.ints_to_pydatetime", "pandas.core.common._is_bool_indexer", "pandas.core.config.get_option", "pandas.core.internals.SingleBlockManager", "pandas.core.nanops.nancov", "pandas.core.common._values_from_object", "pandas.core.common._maybe_box_datetimelike", "pandas.core.generic.NDFrame.__init__", "pandas.core.common._infer_dtype_from_scalar", "pandas.core.groupby._lexsort_indexer", "pandas.core.ops.add_special_arithmetic_methods", "pandas.core.common._possibly_cast_to_datetime", "pandas.core.common._try_sort", "pandas.core.common.is_iterator", "pandas.core.common._possibly_convert_platform", "pandas.core.indexing._check_bool_indexer", "numpy.percentile", "numpy.empty", "pandas.core.common._maybe_upcast", "pandas.core.algorithms.mode", "pandas.core.index.Index", "pandas.tseries.common.maybe_to_datetimelike", "pandas.core.strings.StringMethods", "pandas.core.frame.DataFrame", "pandas.core.common.pprint_thing" ] ]
ojasjoshi/Selective_Deblur_GANs
[ "17056ca69f097a07884135d9031c53d4ef217a6a" ]
[ "new_yolo2/yolo2-pytorch/darknet_ori.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport utils.network as net_utils\nimport cfgs.config as cfg\nfrom layers.reorg.reorg_layer import ReorgLayer\nfrom utils.cython_bbox import bbox_ious, anchor_intersections\nfrom utils.cython_yolo import yolo_to_bbox\nfrom functools import partial\n\nfrom multiprocessing import Pool\n\n\ndef _make_layers(in_channels, net_cfg):\n layers = []\n\n if len(net_cfg) > 0 and isinstance(net_cfg[0], list):\n for sub_cfg in net_cfg:\n layer, in_channels = _make_layers(in_channels, sub_cfg)\n layers.append(layer)\n else:\n for item in net_cfg:\n if item == 'M':\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n else:\n out_channels, ksize = item\n layers.append(net_utils.Conv2d_BatchNorm(in_channels,\n out_channels,\n ksize,\n same_padding=True))\n # layers.append(net_utils.Conv2d(in_channels, out_channels,\n # ksize, same_padding=True))\n in_channels = out_channels\n\n return nn.Sequential(*layers), in_channels\n\n\ndef _process_batch(data, size_index):\n W, H = cfg.multi_scale_out_size[size_index]\n inp_size = cfg.multi_scale_inp_size[size_index]\n out_size = cfg.multi_scale_out_size[size_index]\n\n bbox_pred_np, gt_boxes, gt_classes, dontcares, iou_pred_np = data\n\n # net output\n hw, num_anchors, _ = bbox_pred_np.shape\n\n # gt\n _classes = np.zeros([hw, num_anchors, cfg.num_classes], dtype=np.float)\n _class_mask = np.zeros([hw, num_anchors, 1], dtype=np.float)\n\n _ious = np.zeros([hw, num_anchors, 1], dtype=np.float)\n _iou_mask = np.zeros([hw, num_anchors, 1], dtype=np.float)\n\n _boxes = np.zeros([hw, num_anchors, 4], dtype=np.float)\n _boxes[:, :, 0:2] = 0.5\n _boxes[:, :, 2:4] = 1.0\n _box_mask = np.zeros([hw, num_anchors, 1], dtype=np.float) + 0.01\n\n # scale pred_bbox\n anchors = np.ascontiguousarray(cfg.anchors, dtype=np.float)\n bbox_pred_np = np.expand_dims(bbox_pred_np, 0)\n bbox_np = yolo_to_bbox(\n np.ascontiguousarray(bbox_pred_np, dtype=np.float),\n anchors,\n H, W)\n # bbox_np = (hw, num_anchors, (x1, y1, x2, y2)) range: 0 ~ 1\n bbox_np = bbox_np[0]\n bbox_np[:, :, 0::2] *= float(inp_size[0]) # rescale x\n bbox_np[:, :, 1::2] *= float(inp_size[1]) # rescale y\n\n # gt_boxes_b = np.asarray(gt_boxes[b], dtype=np.float)\n gt_boxes_b = np.asarray(gt_boxes, dtype=np.float)\n\n # for each cell, compare predicted_bbox and gt_bbox\n bbox_np_b = np.reshape(bbox_np, [-1, 4])\n ious = bbox_ious(\n np.ascontiguousarray(bbox_np_b, dtype=np.float),\n np.ascontiguousarray(gt_boxes_b, dtype=np.float)\n )\n best_ious = np.max(ious, axis=1).reshape(_iou_mask.shape)\n iou_penalty = 0 - iou_pred_np[best_ious < cfg.iou_thresh]\n _iou_mask[best_ious <= cfg.iou_thresh] = cfg.noobject_scale * iou_penalty\n\n # locate the cell of each gt_boxe\n cell_w = float(inp_size[0]) / W\n cell_h = float(inp_size[1]) / H\n cx = (gt_boxes_b[:, 0] + gt_boxes_b[:, 2]) * 0.5 / cell_w\n cy = (gt_boxes_b[:, 1] + gt_boxes_b[:, 3]) * 0.5 / cell_h\n cell_inds = np.floor(cy) * W + np.floor(cx)\n cell_inds = cell_inds.astype(np.int)\n\n target_boxes = np.empty(gt_boxes_b.shape, dtype=np.float)\n target_boxes[:, 0] = cx - np.floor(cx) # cx\n target_boxes[:, 1] = cy - np.floor(cy) # cy\n target_boxes[:, 2] = \\\n (gt_boxes_b[:, 2] - gt_boxes_b[:, 0]) / inp_size[0] * out_size[0] # tw\n target_boxes[:, 3] = \\\n (gt_boxes_b[:, 3] - gt_boxes_b[:, 1]) / inp_size[1] * out_size[1] # th\n\n # for each gt boxes, match the best anchor\n gt_boxes_resize = np.copy(gt_boxes_b)\n gt_boxes_resize[:, 0::2] *= (out_size[0] / float(inp_size[0]))\n gt_boxes_resize[:, 1::2] *= (out_size[1] / float(inp_size[1]))\n anchor_ious = anchor_intersections(\n anchors,\n np.ascontiguousarray(gt_boxes_resize, dtype=np.float)\n )\n anchor_inds = np.argmax(anchor_ious, axis=0)\n\n ious_reshaped = np.reshape(ious, [hw, num_anchors, len(cell_inds)])\n for i, cell_ind in enumerate(cell_inds):\n if cell_ind >= hw or cell_ind < 0:\n print('cell inds size {}'.format(len(cell_inds)))\n print('cell over {} hw {}'.format(cell_ind, hw))\n continue\n a = anchor_inds[i]\n\n # 0 ~ 1, should be close to 1\n iou_pred_cell_anchor = iou_pred_np[cell_ind, a, :]\n _iou_mask[cell_ind, a, :] = cfg.object_scale * (1 - iou_pred_cell_anchor) # noqa\n # _ious[cell_ind, a, :] = anchor_ious[a, i]\n _ious[cell_ind, a, :] = ious_reshaped[cell_ind, a, i]\n\n _box_mask[cell_ind, a, :] = cfg.coord_scale\n target_boxes[i, 2:4] /= anchors[a]\n _boxes[cell_ind, a, :] = target_boxes[i]\n\n _class_mask[cell_ind, a, :] = cfg.class_scale\n _classes[cell_ind, a, gt_classes[i]] = 1.\n\n # _boxes[:, :, 2:4] = np.maximum(_boxes[:, :, 2:4], 0.001)\n # _boxes[:, :, 2:4] = np.log(_boxes[:, :, 2:4])\n\n return _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask\n\n\nclass Darknet19(nn.Module):\n def __init__(self):\n super(Darknet19, self).__init__()\n\n net_cfgs = [\n # conv1s\n [(32, 3)],\n ['M', (64, 3)],\n ['M', (128, 3), (64, 1), (128, 3)],\n ['M', (256, 3), (128, 1), (256, 3)],\n ['M', (512, 3), (256, 1), (512, 3), (256, 1), (512, 3)],\n # conv2\n ['M', (1024, 3), (512, 1), (1024, 3), (512, 1), (1024, 3)],\n # ------------\n # conv3\n [(1024, 3), (1024, 3)],\n # conv4\n [(1024, 3)]\n ]\n\n # darknet\n self.conv1s, c1 = _make_layers(3, net_cfgs[0:5])\n self.conv2, c2 = _make_layers(c1, net_cfgs[5])\n # ---\n self.conv3, c3 = _make_layers(c2, net_cfgs[6])\n\n stride = 2\n # stride*stride times the channels of conv1s\n self.reorg = ReorgLayer(stride=2)\n # cat [conv1s, conv3]\n self.conv4, c4 = _make_layers((c1*(stride*stride) + c3), net_cfgs[7])\n\n # linear\n out_channels = cfg.num_anchors * (cfg.num_classes + 5)\n self.conv5 = net_utils.Conv2d(c4, out_channels, 1, 1, relu=False)\n self.global_average_pool = nn.AvgPool2d((1, 1))\n\n # train\n self.bbox_loss = None\n self.iou_loss = None\n self.cls_loss = None\n self.pool = Pool(processes=10)\n\n @property\n def loss(self):\n return self.bbox_loss + self.iou_loss + self.cls_loss\n\n def forward(self, im_data, gt_boxes=None, gt_classes=None, dontcare=None,\n size_index=0):\n conv1s = self.conv1s(im_data)\n conv2 = self.conv2(conv1s)\n conv3 = self.conv3(conv2)\n conv1s_reorg = self.reorg(conv1s)\n cat_1_3 = torch.cat([conv1s_reorg, conv3], 1)\n conv4 = self.conv4(cat_1_3)\n conv5 = self.conv5(conv4) # batch_size, out_channels, h, w\n global_average_pool = self.global_average_pool(conv5)\n\n # for detection\n # bsize, c, h, w -> bsize, h, w, c ->\n # bsize, h x w, num_anchors, 5+num_classes\n bsize, _, h, w = global_average_pool.size()\n # assert bsize == 1, 'detection only support one image per batch'\n global_average_pool_reshaped = \\\n global_average_pool.permute(0, 2, 3, 1).contiguous().view(bsize,\n -1, cfg.num_anchors, cfg.num_classes + 5) # noqa\n\n # tx, ty, tw, th, to -> sig(tx), sig(ty), exp(tw), exp(th), sig(to)\n xy_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 0:2])\n wh_pred = torch.exp(global_average_pool_reshaped[:, :, :, 2:4])\n bbox_pred = torch.cat([xy_pred, wh_pred], 3)\n iou_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 4:5])\n\n score_pred = global_average_pool_reshaped[:, :, :, 5:].contiguous()\n prob_pred = F.softmax(score_pred.view(-1, score_pred.size()[-1])).view_as(score_pred) # noqa\n\n # for training\n if self.training:\n bbox_pred_np = bbox_pred.data.cpu().numpy()\n iou_pred_np = iou_pred.data.cpu().numpy()\n _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask = \\\n self._build_target(bbox_pred_np,\n gt_boxes,\n gt_classes,\n dontcare,\n iou_pred_np,\n size_index)\n\n _boxes = net_utils.np_to_variable(_boxes)\n _ious = net_utils.np_to_variable(_ious)\n _classes = net_utils.np_to_variable(_classes)\n box_mask = net_utils.np_to_variable(_box_mask,\n dtype=torch.FloatTensor)\n iou_mask = net_utils.np_to_variable(_iou_mask,\n dtype=torch.FloatTensor)\n class_mask = net_utils.np_to_variable(_class_mask,\n dtype=torch.FloatTensor)\n\n num_boxes = sum((len(boxes) for boxes in gt_boxes))\n\n # _boxes[:, :, :, 2:4] = torch.log(_boxes[:, :, :, 2:4])\n box_mask = box_mask.expand_as(_boxes)\n\n self.bbox_loss = nn.MSELoss(size_average=False)(bbox_pred * box_mask, _boxes * box_mask) / num_boxes # noqa\n self.iou_loss = nn.MSELoss(size_average=False)(iou_pred * iou_mask, _ious * iou_mask) / num_boxes # noqa\n\n class_mask = class_mask.expand_as(prob_pred)\n self.cls_loss = nn.MSELoss(size_average=False)(prob_pred * class_mask, _classes * class_mask) / num_boxes # noqa\n\n return bbox_pred, iou_pred, prob_pred\n\n def _build_target(self, bbox_pred_np, gt_boxes, gt_classes, dontcare,\n iou_pred_np, size_index):\n \"\"\"\n :param bbox_pred: shape: (bsize, h x w, num_anchors, 4) :\n (sig(tx), sig(ty), exp(tw), exp(th))\n \"\"\"\n\n bsize = bbox_pred_np.shape[0]\n\n targets = self.pool.map(partial(_process_batch, size_index=size_index),\n ((bbox_pred_np[b], gt_boxes[b],\n gt_classes[b], dontcare[b], iou_pred_np[b])\n for b in range(bsize)))\n\n _boxes = np.stack(tuple((row[0] for row in targets)))\n _ious = np.stack(tuple((row[1] for row in targets)))\n _classes = np.stack(tuple((row[2] for row in targets)))\n _box_mask = np.stack(tuple((row[3] for row in targets)))\n _iou_mask = np.stack(tuple((row[4] for row in targets)))\n _class_mask = np.stack(tuple((row[5] for row in targets)))\n\n return _boxes, _ious, _classes, _box_mask, _iou_mask, _class_mask\n\n def load_from_npz(self, fname, num_conv=None):\n dest_src = {'conv.weight': 'kernel', 'conv.bias': 'biases',\n 'bn.weight': 'gamma', 'bn.bias': 'biases',\n 'bn.running_mean': 'moving_mean',\n 'bn.running_var': 'moving_variance'}\n params = np.load(fname)\n own_dict = self.state_dict()\n keys = list(own_dict.keys())\n\n for i, start in enumerate(range(0, len(keys), 5)):\n if num_conv is not None and i >= num_conv:\n break\n end = min(start+5, len(keys))\n for key in keys[start:end]:\n list_key = key.split('.')\n ptype = dest_src['{}.{}'.format(list_key[-2], list_key[-1])]\n src_key = '{}-convolutional/{}:0'.format(i, ptype)\n print((src_key, own_dict[key].size(), params[src_key].shape))\n param = torch.from_numpy(params[src_key])\n if ptype == 'kernel':\n param = param.permute(3, 2, 0, 1)\n own_dict[key].copy_(param)\n\n\nif __name__ == '__main__':\n net = Darknet19()\n # net.load_from_npz('models/yolo-voc.weights.npz')\n net.load_from_npz('models/darknet19.weights.npz', num_conv=18)\n" ]
[ [ "numpy.asarray", "numpy.copy", "numpy.ascontiguousarray", "torch.cat", "torch.nn.functional.sigmoid", "numpy.reshape", "numpy.expand_dims", "torch.from_numpy", "torch.nn.AvgPool2d", "numpy.load", "torch.nn.MaxPool2d", "numpy.zeros", "numpy.argmax", "numpy.max", "numpy.empty", "torch.nn.MSELoss", "numpy.floor", "torch.exp", "torch.nn.Sequential" ] ]
siconos/siconos-tutorials
[ "821365a6ce679fc3d606b272ff069134e3c6aa4b" ]
[ "examples/mechanics/BulletBouncingBox/BulletBouncingBox.py" ]
[ "#!/usr/bin/env python\n\n# Siconos is a program dedicated to modeling, simulation and control\n# of non smooth dynamical systems.\n#\n# Copyright 2021 INRIA.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n\ndo_plot = True\ntry:\n import matplotlib\nexcept:\n do_plot = False\nif do_plot:\n import os, sys\n if sys.platform=='linux' and (not 'DISPLAY' in os.environ\n or len(os.environ['DISPLAY'])==0):\n matplotlib.use('Agg')\n from matplotlib.pyplot import \\\n subplot, title, plot, grid, show, savefig, ylim\n\nfrom siconos.kernel import \\\n NonSmoothDynamicalSystem, MoreauJeanOSI, TimeDiscretisation, \\\n FrictionContact, NewtonImpactFrictionNSL, TimeStepping\n\nimport siconos.kernel as sk\n\nfrom siconos.mechanics.collision.bullet import \\\n SiconosBulletCollisionManager\n\nfrom siconos.mechanics.collision import \\\n SiconosBox, SiconosPlane, RigidBodyDS, SiconosContactor, SiconosContactorSet\n\nfrom numpy import zeros\nfrom numpy.linalg import norm\n\nt0 = 0 # start time\nT = 20 # end time\nh = 0.005 # time step\n\ng = 9.81 # gravity\n\ntheta = 0.5 # theta scheme\n\n#\n# dynamical system\n#\nposition_init = 10\nvelocity_init = 0\n\n# a box shape\nbox1 = SiconosBox(1.0, 1.0, 1.0)\n\n# A Bullet Dynamical System : a shape + a mass (1.0) + position and velocity\nbody = RigidBodyDS([0, 0, position_init, 1., 0, 0, 0],\n [0, 0, velocity_init, 0., 0., 0.],\n 1.0)\n\n# Add the shape, wrapped in a SiconosContactor, to the body's\n# contactor set.\nbody.contactors().push_back(SiconosContactor(box1))\n\n# set external forces\nweight = [0, 0, -body.scalarMass() * g]\nbody.setFExtPtr(weight)\n\n#\n# Model\n#\nbouncingBox = NonSmoothDynamicalSystem(t0, T)\n\n# add the dynamical system to the non smooth dynamical system\nbouncingBox.insertDynamicalSystem(body)\n\n#\n# Simulation\n#\n\n# (1) OneStepIntegrators\nosi = MoreauJeanOSI(theta)\n\nground = SiconosPlane()\ngroundOffset = [0,0,-0.5,1,0,0,0]\n\n# (2) Time discretisation --\ntimedisc = TimeDiscretisation(t0, h)\n\n# (3) one step non smooth problem\nosnspb = FrictionContact(3)\n\nosnspb.numericsSolverOptions().iparam[0] = 1000\nosnspb.numericsSolverOptions().dparam[0] = 1e-5\nosnspb.setMaxSize(16384)\nosnspb.setMStorageType(1)\nosnspb.setNumericsVerboseMode(False)\n\n# keep previous solution\nosnspb.setKeepLambdaAndYState(True)\n\n\n# (4) non smooth law\nnslaw = NewtonImpactFrictionNSL(0.8, 0., 0., 3)\n\n# (5) broadphase contact detection\nbroadphase = SiconosBulletCollisionManager()\n\n# insert a non smooth law for contactors id 0\nbroadphase.insertNonSmoothLaw(nslaw, 0, 0)\n\n# The ground is a static object\n# we give it a group contactor id : 0\nscs = SiconosContactorSet()\nscs.append(SiconosContactor(ground))\nbroadphase.insertStaticContactorSet(scs, groundOffset)\n\n# (6) Simulation setup with (1) (2) (3) (4) (5)\nsimulation = TimeStepping(bouncingBox, timedisc)\nsimulation.insertInteractionManager(broadphase)\n\nsimulation.insertIntegrator(osi)\nsimulation.insertNonSmoothProblem(osnspb)\n\n\n# Get the values to be plotted\n# ->saved in a matrix dataPlot\n\nN = int((T - t0) / h)\ndataPlot = zeros((N+1, 4))\n\n#\n# numpy pointers on dense Siconos vectors\n#\nq = body.q()\nv = body.velocity()\n\n#\n# initial data\n#\ndataPlot[0, 0] = t0\ndataPlot[0, 1] = q[2]\ndataPlot[0, 2] = v[2]\n\nk = 1\n\n# time loop\nwhile(simulation.hasNextEvent()):\n\n simulation.computeOneStep()\n\n dataPlot[k, 0] = simulation.nextTime()\n dataPlot[k, 1] = q[2]\n dataPlot[k, 2] = v[2]\n\n #if (broadphase.collisionWorld().getDispatcher().getNumManifolds() > 0):\n if (broadphase.statistics().new_interactions_created +\n broadphase.statistics().existing_interactions_processed) > 0:\n if bouncingBox.topology().\\\n numberOfIndexSet() == 2:\n index1 = sk.interactions(simulation.indexSet(1))\n if (len(index1) == 4):\n dataPlot[k, 3] = norm(index1[0].lambda_(1)) + \\\n norm(index1[1].lambda_(1)) + norm(index1[2].lambda_(1)) + \\\n norm(index1[3].lambda_(1))\n\n k += 1\n simulation.nextStep()\n\n#\n# comparison with the reference file\n#\nfrom siconos.kernel import SimpleMatrix, getMatrix\nfrom numpy.linalg import norm\n\nref = getMatrix(SimpleMatrix(\"result.ref\"))\n\nprint(\"norm(dataPlot - ref) = {0}\".format(norm(dataPlot - ref)))\nif (norm(dataPlot - ref) > 1e-11):\n print(\"Warning. The result is rather different from the reference file.\")\n\n\n#\n# plots\n#\n\nif do_plot:\n subplot(511)\n title('position')\n plot(dataPlot[0:k, 0], dataPlot[0:k, 1])\n y = ylim()\n plot(ref[0:k, 0], ref[0:k, 1])\n ylim(y)\n grid()\n subplot(513)\n title('velocity')\n plot(dataPlot[0:k, 0], dataPlot[0:k, 2])\n y = ylim()\n plot(ref[0:k, 0], ref[0:k, 2])\n ylim(y)\n grid()\n subplot(515)\n plot(dataPlot[0:k, 0], dataPlot[0:k, 3])\n y = ylim()\n plot(ref[0:k, 0], ref[0:k, 3])\n ylim(y)\n title('lambda')\n grid()\n savefig('result.png')\n show()\n" ]
[ [ "numpy.zeros", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.ylim", "matplotlib.use", "matplotlib.pyplot.plot", "numpy.linalg.norm" ] ]
zapatacomputing/tutorial-2-hello-ml
[ "e82f73f29cb2bde46cf1a24e9ee8b5e4cde291bc" ]
[ "src/python/lstm/data_manipulator.py" ]
[ "\"\"\"\nCopyright Zapata Computing, Inc. All rights reserved.\n\nThis module manipulates data.\n\"\"\"\n\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom typing import TextIO\n\n\ndef noisy_sine_generation(time_range:float, time_step:float, noise_std:float) -> dict:\n \"\"\"\n Generates noisy sine data.\n\n Args:\n time_range (float):\n The upper limit of the time range for the data to generate. The time\n range starts at 0. The time_range is not included as the last point.\n time_step (float):\n The step between each of the time values.\n noise_std (float):\n The standard deviation of the noise. Noise follows a normal distribution\n centered at zero.\n\n Returns:\n data_dict (dict):\n A dict containing a dict representation of a Pandas dataframe within its\n \"data\" field.\n \"\"\"\n\n print('time_range = ', time_range)\n print('time_step = ', time_step)\n print('noise_std = ', noise_std)\n\n data_dict = {}\n\n try:\n time = np.arange(0, time_range, time_step)\n\n # Generating data: sine function\n values = np.sin(time) + np.random.normal(scale=noise_std, size=len(time))\n print('Values shape from numpy: ', values.shape)\n\n # Making pandas DataFrame\n data_df = pd.DataFrame(data=np.transpose([time, values]), columns=['time','values'])\n\n print('Data shape from pandas:')\n print(data_df.shape)\n print('DataFrame head:')\n print(data_df.head())\n\n # Save data in dict for serialization into JSON\n data_dict[\"data\"] = data_df.to_dict()\n except Exception as e:\n e = sys.exc_info()[0]\n print(f'Error: {e}')\n\n return data_dict\n\n\ndef preprocess_data(data:dict, train_frac:float=0.8, window_size:int=10) -> (dict, dict, dict, dict):\n \"\"\"\n Preprocesses data into a format suitable for training a model, splits it \n into training and testing sets, and creates datasets of lookback windows and \n next values.\n\n Args:\n data (dict):\n A dict with two keys, each containing indexes as keys and data as values \n (this is the dict format of Pandas DataFrames). Here is an example:\n {\n \"x\": {\n \"0\": 0.0,\n \"1\": 0.1\n },\n \"y\": {\n \"0\": 1.0,\n \"1\": 2.0\n }\n }\n train_frac (float):\n The fraction of the data to use for training. The remaining data will be\n returned as testing data.\n window_size (int):\n The number of data values in the rolling lookback window.\n\n Returns:\n train_dict (dict):\n A dict with a Pandas DataFrame of the data for training in input format \n inside its \"data\" field.\n test_dict (dict):\n A dict with a Pandas DataFrame of the data for testing in input format \n inside its \"data\" field.\n train_window_dict (dict):\n A dict of the data for training in the \"data\" field, with a list of \n lookback windows in the \"windows\" field and a list of the corresponding \n next values in the \"next_vals\" field.\n test_window_dict (dict):\n A dict of the data for testing in the \"data\" field, with a list of \n lookback windows in the \"windows\" field and a list of the corresponding \n next values in the \"next_vals\" field.\n \"\"\"\n\n # Load data into dataframe\n df = pd.DataFrame.from_dict(data)\n print(\"DataFrame head:\")\n print(df.head())\n\n dfsize = df.shape[0]\n\n # Splitting up dataset into Training and Testing datsets\n train_size = int(dfsize * train_frac)\n test_size = dfsize - train_size\n train, test = df.iloc[0:train_size], df.iloc[train_size:]\n\n print(\"Train and test set sizes: \", len(train), len(test))\n\n # Reshape to dimensions required by tensorflow: [samples, window_size, n_features]\n col = df.columns[1]\n train_windows, train_next_vals = create_dataset(train[col], train[col], window_size)\n test_windows, test_next_vals = create_dataset(test[col], test[col], window_size)\n\n # Save all 4 data sets to JSON serializable formats (dicts/lists)\n train_dict = {}\n train_dict[\"data\"] = train.to_dict()\n\n test_dict = {}\n test_dict[\"data\"] = test.to_dict()\n\n train_window_dict = {\"data\":{}}\n train_window_dict[\"data\"][\"windows\"] = train_windows.tolist()\n train_window_dict[\"data\"][\"next_vals\"] = train_next_vals.tolist()\n\n test_window_dict = {\"data\":{}}\n test_window_dict[\"data\"][\"windows\"] = test_windows.tolist()\n test_window_dict[\"data\"][\"next_vals\"] = test_next_vals.tolist()\n\n return train_dict, test_dict, train_window_dict, test_window_dict\n\n\ndef create_dataset(x: pd.Series, y: pd.Series, window_size:int=1) -> (np.ndarray, np.ndarray):\n \"\"\"\n A helper function of `preprocess_data` to split data into lookback windows \n and next values.\n\n Args:\n x (pd.Series):\n The data to make the lookback windows from\n y (pd.Series):\n The data to get the next values from\n window_size (int):\n The size of the lookback window.\n\n Returns:\n np.array(xs) (numpy.ndarray):\n An array of lookback windows.\n np.array(ys) (numpy.ndarray):\n An array of corresponding next values.\n \"\"\"\n\n xs, ys = [], []\n\n # Create pairs of a window of data and the next value after the window\n for i in range(len(x) - window_size):\n v = x.iloc[i:(i + window_size)].values\n xs.append(v)\n ys.append(y.iloc[i + window_size])\n\n return np.array(xs), np.array(ys)\n\n\ndef save_data(datas:list, filenames:list) -> None:\n \"\"\"\n Saves data as JSON.\n\n Args:\n datas (list):\n A list of dicts of data to save.\n filenames (list):\n A list of filenames corresponding to the data dicts to save the data in. \n These should have a '.json' extension.\n \"\"\"\n\n for i in range(len(datas)):\n data = datas[i]\n filename = filenames[i]\n\n try:\n data[\"schema\"] = \"orquestra-v1-data\"\n except KeyError as e:\n print(f'Error: Could not load schema key from {filename}')\n\n try:\n with open(filename,'w') as f:\n # Write data to file as this will serve as output artifact\n f.write(json.dumps(data, indent=2)) \n except IOError as e:\n print(f'Error: Could not open {filename}')\n\n\ndef load_data(filename:TextIO) -> dict:\n \"\"\"\n Loads data from JSON.\n\n Args:\n filename (TextIO):\n The file to load the data from.\n\n Returns:\n data (dict):\n The data that was loaded from the file.\n \"\"\"\n\n if isinstance(filename, str):\n try:\n with open(filename, 'r') as f:\n data = json.load(f)\n\n except IOError:\n print(f'Error: Could not open {filename}')\n\n else:\n data = json.load(filename)\n\n return data\n" ]
[ [ "numpy.transpose", "numpy.arange", "numpy.array", "numpy.sin", "pandas.DataFrame.from_dict" ] ]
gyes00205/Open3D
[ "2520323e2e143699dec28d8bd559eba326d4005d" ]
[ "examples/python/geometry/image_processing.py" ]
[ "# ----------------------------------------------------------------------------\n# - Open3D: www.open3d.org -\n# ----------------------------------------------------------------------------\n# The MIT License (MIT)\n#\n# Copyright (c) 2018-2021 www.open3d.org\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n# ----------------------------------------------------------------------------\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\nimport open3d as o3d\n#conda install pillow matplotlib\n\nif __name__ == \"__main__\":\n\n print(\"Testing image in open3d ...\")\n print(\"Convert an image to numpy\")\n sample_image = o3d.data.JuneauImage()\n x = o3d.io.read_image(sample_image.path)\n print(np.asarray(x))\n print(\n \"Convet a numpy image to o3d.geometry.Image and show it with DrawGeomtries().\"\n )\n y = mpimg.imread(sample_image.path)\n print(y.shape)\n yy = o3d.geometry.Image(y)\n print(yy)\n o3d.visualization.draw_geometries([yy])\n\n print(\"Render a channel of the previous image.\")\n z = np.array(y[:, :, 1])\n print(z.shape)\n print(z.strides)\n zz = o3d.geometry.Image(z)\n print(zz)\n o3d.visualization.draw_geometries([zz])\n\n print(\"Write the previous image to file.\")\n o3d.io.write_image(\"test.jpg\", zz, quality=100)\n\n print(\"Testing basic image processing module.\")\n sample_image = o3d.data.JuneauImage()\n im_raw = mpimg.imread(sample_image.path)\n im = o3d.geometry.Image(im_raw)\n im_g3 = im.filter(o3d.geometry.ImageFilterType.Gaussian3)\n im_g5 = im.filter(o3d.geometry.ImageFilterType.Gaussian5)\n im_g7 = im.filter(o3d.geometry.ImageFilterType.Gaussian7)\n im_gaussian = [im, im_g3, im_g5, im_g7]\n pyramid_levels = 4\n pyramid_with_gaussian_filter = True\n im_pyramid = im.create_pyramid(pyramid_levels, pyramid_with_gaussian_filter)\n im_dx = im.filter(o3d.geometry.ImageFilterType.Sobel3dx)\n im_dx_pyramid = o3d.geometry.Image.filter_pyramid(\n im_pyramid, o3d.geometry.ImageFilterType.Sobel3dx)\n im_dy = im.filter(o3d.geometry.ImageFilterType.Sobel3dy)\n im_dy_pyramid = o3d.geometry.Image.filter_pyramid(\n im_pyramid, o3d.geometry.ImageFilterType.Sobel3dy)\n switcher = {\n 0: im_gaussian,\n 1: im_pyramid,\n 2: im_dx_pyramid,\n 3: im_dy_pyramid,\n }\n for i in range(4):\n for j in range(pyramid_levels):\n plt.subplot(4, pyramid_levels, i * 4 + j + 1)\n plt.imshow(switcher.get(i)[j])\n plt.show()\n" ]
[ [ "numpy.asarray", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.array", "matplotlib.image.imread" ] ]
bryant1410/arxiv2018-bayesian-ensembles
[ "d97cf64270d34b2301903678e6fbfe170c4c2105" ]
[ "src/run_arg_experiments.py" ]
[ "import os\nimport sys\n\nfrom baselines.ibcc import IBCC\nfrom baselines.majority_voting import MajorityVoting\nfrom bsc.bsc import BSC\nfrom data import data_utils\nfrom evaluation.experiment import Experiment, calculate_scores\nimport data.load_data as load_data\nimport numpy as np\nimport pandas as pd\n\noutput_dir = '../../data/bayesian_sequence_combination/output/arg_LMU_corrected_gold_2/'\n\n# TODO try the simple BIO task as well as 5-class thing\n\ndef cap_number_of_workers(crowd, doc_start, max_workers_per_doc):\n print('Reducing number of workers per document to %i' % max_workers_per_doc)\n doc_start_idxs = np.where(doc_start)[0]\n for d, doc in enumerate(doc_start_idxs):\n valid_workers = crowd[doc] != -1\n worker_count = np.sum(valid_workers)\n if worker_count > max_workers_per_doc:\n valid_workers = np.argwhere(valid_workers).flatten()\n drop_workers = valid_workers[max_workers_per_doc:]\n # print('dropping workers %s' % str(drop_workers))\n if d+1 < len(doc_start_idxs):\n next_doc = doc_start_idxs[d + 1]\n crowd[doc:next_doc, drop_workers] = -1\n else:\n crowd[doc:, drop_workers] = -1\n\n used_workers = np.any(crowd != -1, 0)\n crowd = crowd[:, used_workers]\n\n return crowd\n\ndef split_dev_set(gt, crowd, text, doc_start):\n all_doc_starts = np.where(doc_start)[0]\n doc_starts = np.where(doc_start & (gt != -1))[0]\n dev_starts = doc_starts[100:]\n\n crowd_dev = []\n gt_dev = []\n text_dev = []\n doc_start_dev = []\n for dev_start in dev_starts:\n next_doc_start = np.where(all_doc_starts == dev_start)[0][0] + 1\n if next_doc_start < all_doc_starts.shape[0]:\n doc_end = all_doc_starts[next_doc_start]\n else:\n doc_end = all_doc_starts.shape[0]\n\n crowd_dev.append(crowd[dev_start:doc_end])\n\n gt_dev.append(np.copy(gt[dev_start:doc_end]))\n gt[dev_start:doc_end] = -1\n\n text_dev.append(text[dev_start:doc_end])\n doc_start_dev.append(doc_start[dev_start:doc_end])\n\n crowd_dev = np.concatenate(crowd_dev, axis=0)\n gt_dev = np.concatenate(gt_dev, axis=0)\n text_dev = np.concatenate(text_dev, axis=0)\n doc_start_dev = np.concatenate(doc_start_dev, axis=0)\n\n return gt, crowd_dev, gt_dev, doc_start_dev, text_dev\n\ndef load_arg_sentences(debug_size=0, regen_data=False, second_batch_workers_only=False, gold_labelled_only=False,\n max_workers_per_doc=5):\n data_dir = '../../data/bayesian_sequence_combination/data/argmin_LMU/'\n\n if not regen_data and os.path.exists(data_dir + 'evaluation_gold.csv'):\n #reload the data for the experiments from cache files\n gt = pd.read_csv(data_dir + 'evaluation_gold.csv', usecols=[1]).values.astype(int)\n crowd = pd.read_csv(data_dir + 'evaluation_crowd.csv').values[:, 1:].astype(int)\n doc_start = pd.read_csv(data_dir + 'evaluation_doc_start.csv', usecols=[1]).values.astype(int)\n text = pd.read_csv(data_dir + 'evaluation_text.csv', usecols=[1]).values\n\n if second_batch_workers_only:\n crowd = crowd[:, 26:]\n\n if gold_labelled_only:\n idxs = gt.flatten() != -1\n gt = gt[idxs]\n crowd = crowd[idxs, :]\n doc_start = doc_start[idxs]\n text = text[idxs]\n\n if max_workers_per_doc > 0:\n crowd = cap_number_of_workers(crowd, doc_start, max_workers_per_doc)\n\n # split dev set\n gt, crowd_dev, gt_dev, doc_start_dev, text_dev = split_dev_set(gt, crowd, text, doc_start)\n\n return gt, crowd, doc_start, text, crowd_dev, gt_dev, doc_start_dev, text_dev\n\n expert_file = data_dir + 'expert_corrected_disagreements.csv'\n\n gold_text = pd.read_csv(expert_file, sep=',', usecols=[6]).values\n gold_doc_start = pd.read_csv(expert_file, sep=',', usecols=[1]).values\n gold = pd.read_csv(expert_file, sep=',', usecols=[7]).values.astype(int).flatten()\n\n crowd_on_gold_file = data_dir + 'crowd_on_expert_labelled_sentences.csv'\n crowd_on_gold = pd.read_csv(crowd_on_gold_file, sep=',', usecols=range(2,28)).values\n\n crowd_no_gold_file = data_dir + 'crowd_on_sentences_with_no_experts.csv'\n crowd_without_gold = pd.read_csv(crowd_no_gold_file, sep=',', usecols=range(2,100)).values\n nogold_doc_start = pd.read_csv(crowd_no_gold_file, sep=',', usecols=[1]).values\n nogold_text = pd.read_csv(crowd_no_gold_file, sep=',', usecols=[100]).values\n\n print('Number of tokens = %i' % crowd_without_gold.shape[0])\n\n # some of these data points may have no annotations\n valididxs = np.any(crowd_without_gold != -1, axis=1)\n crowd_without_gold = crowd_without_gold[valididxs, :]\n doc_start = nogold_doc_start[valididxs]\n text = nogold_text[valididxs]\n\n print('Number of crowd-labelled tokens = %i' % crowd_without_gold.shape[0])\n\n N = crowd_without_gold.shape[0]\n\n # now line up the gold sentences with the complete set of crowd data\n crowd = np.zeros((N, crowd_on_gold.shape[1] + crowd_without_gold.shape[1]), dtype=int) - 1\n crowd[:, crowd_on_gold.shape[1]:] = crowd_without_gold\n\n # crowd_labels_present = np.any(crowd != -1, axis=1)\n # N_withcrowd = np.sum(crowd_labels_present)\n\n gt = np.zeros(N) - 1\n\n gold_docs = np.split(gold_text, np.where(gold_doc_start == 1)[0][1:], axis=0)\n gold_gold = np.split(gold, np.where(gold_doc_start == 1)[0][1:], axis=0)\n gold_crowd = np.split(crowd_on_gold, np.where(gold_doc_start == 1)[0][1:], axis=0)\n\n nogold_docs = np.split(text, np.where(nogold_doc_start == 1)[0][1:], axis=0)\n for d, doc in enumerate(gold_docs):\n\n print('matching gold doc %i of %i' % (d, len(gold_docs)))\n\n loc_in_nogold = 0\n\n for doc_nogold in nogold_docs:\n if np.all(doc == doc_nogold):\n len_doc_nogold = len(doc_nogold)\n break\n else:\n loc_in_nogold += len(doc_nogold)\n\n locs_in_nogold = np.arange(loc_in_nogold, len_doc_nogold+loc_in_nogold)\n gt[locs_in_nogold] = gold_gold[d]\n crowd[locs_in_nogold, :crowd_on_gold.shape[1]] = gold_crowd[d]\n\n # we need to flip 3 and 4 to fit our scheme here\n ICon_idxs = gt == 4\n BCon_idxs = gt == 3\n gt[ICon_idxs] = 3\n gt[BCon_idxs] = 4\n\n ICon_idxs = crowd == 4\n BCon_idxs = crowd == 3\n crowd[ICon_idxs] = 3\n crowd[BCon_idxs] = 4\n\n if debug_size:\n gt = gt[:debug_size]\n crowd = crowd[:debug_size]\n doc_start = doc_start[:debug_size]\n text = text[:debug_size]\n\n # save files for our experiments with the tag 'evaluation_'\n pd.DataFrame(gt).to_csv(data_dir + 'evaluation_gold.csv')\n pd.DataFrame(crowd).to_csv(data_dir + 'evaluation_crowd.csv')\n pd.DataFrame(doc_start).to_csv(data_dir + 'evaluation_doc_start.csv')\n pd.DataFrame(text).to_csv(data_dir + 'evaluation_text.csv')\n\n if second_batch_workers_only:\n crowd = crowd[:, 26:]\n\n if gold_labelled_only:\n idxs = gt.flatten() != -1\n gt = gt[idxs]\n crowd = crowd[idxs, :]\n doc_start = doc_start[idxs]\n text = text[idxs]\n\n gt = gt.astype(int)\n\n if max_workers_per_doc > 0:\n crowd = cap_number_of_workers(crowd, doc_start, max_workers_per_doc)\n\n gt, crowd_dev, gt_dev, doc_start_dev, text_dev = split_dev_set(gt, crowd, text, doc_start)\n\n return gt, crowd, doc_start, text, crowd_dev, gt_dev, doc_start_dev, text_dev\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n second_batch_workers_only = bool(int(sys.argv[1]))\n else:\n second_batch_workers_only = False\n\n if len(sys.argv) > 2:\n gold_labelled_only = bool(int(sys.argv[2]))\n else:\n gold_labelled_only = False\n\n if len(sys.argv) > 3:\n regen_data = bool(int(sys.argv[3]))\n else:\n regen_data = False\n\n print('Running ' + ('with' if second_batch_workers_only else 'without') + ' second-batch workers only.')\n\n N = 0 #4521 # set to 0 to use all\n gt, annos, doc_start, text, annos_dev, gt_dev, doc_start_dev, text_dev = load_arg_sentences(\n N, regen_data, second_batch_workers_only, gold_labelled_only)\n N = float(len(gt))\n\n valid_workers = np.any(annos != -1, axis=0)\n print('Valid workers for this subset are %s' % str(np.argwhere(valid_workers).flatten()))\n\n nclasses = 5\n\n # ------------------------------------------------------------------------------------------------------------------\n\n nu_factors = [0.1, 1, 10]\n diags = [0.1, 1, 10, 100]\n factors = [0.1, 1, 10, 100]\n\n # ------------------------------------------------------------------------------------------------\n #\n # exp = Experiment(None, nclasses, annos.shape[1], None, max_iter=20)\n #\n # methods_to_tune = [\n # 'ibcc',\n # 'bac_vec_integrateIF',\n # 'bac_seq_integrateIF',\n # # 'HMM_crowd',\n # 'bac_seq',\n # 'bac_seq_integrateIF_noHMM',\n # 'bac_ibcc',\n # 'bac_ibcc_integrateIF_noHMM',\n # # 'bac_ibcc_integrateIF',\n # # 'bac_acc_integrateIF',\n # # 'bac_mace_integrateIF',\n # ]\n #\n # for m, method in enumerate(methods_to_tune):\n # print('TUNING %s' % method)\n #\n # best_scores = exp.tune_alpha0(diags, factors, nu_factors, method, annos_dev, gt_dev, doc_start_dev,\n # output_dir, text_dev, metric_idx_to_optimise=8, new_data=regen_data)\n # best_idxs = best_scores[1:].astype(int)\n # exp.nu0_factor = nu_factors[best_idxs[0]]\n # exp.alpha0_diags = diags[best_idxs[1]]\n # exp.alpha0_factor = factors[best_idxs[2]]\n #\n # print('Best values for %s: %f, %f, %f' % (method, exp.nu0_factor, exp.alpha0_diags, exp.alpha0_factor))\n #\n # # this will run task 1 -- train on all crowdsourced data, test on the labelled portion thereof\n # exp.methods = [method]\n # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, return_model=True,\n # ground_truth_val=gt_dev, doc_start_val=doc_start_dev, text_val=text_dev,\n # new_data=regen_data\n # )\n\n # ------------------------------------------------------------------------------------------------------------------\n\n exp = Experiment(None, nclasses, annos.shape[1], None, max_iter=5)\n\n exp.save_results = True\n exp.opt_hyper = False #True\n\n # values obtained from tuning on dev:\n best_nu0factor = 1000#1\n best_diags = 1000\n best_factor = 0.1\n\n exp.nu0_factor = best_nu0factor\n exp.alpha0_diags = best_diags\n exp.alpha0_factor = best_factor\n\n exp.methods = [\n # 'bac_seq_integrateIF',\n # 'bac_seq',\n 'bac_seq_integrateIF_noHMM',\n ]\n\n exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, test_no_crowd=False)\n #\n # # values obtained from tuning on dev:\n # best_nu0factor = 0.1\n # best_diags = 10\n # best_factor = 0.1\n #\n # exp.nu0_factor = best_nu0factor\n # exp.alpha0_diags = best_diags\n # exp.alpha0_factor = best_factor\n #\n # exp.methods = [\n # 'bac_vec_integrateIF',\n # ]\n #\n # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, test_no_crowd=False)\n #\n # best_nu0factor = 1\n # best_diags = 0.1\n # best_factor = 0.1\n #\n # exp.nu0_factor = best_nu0factor\n # exp.alpha0_diags = best_diags\n # exp.alpha0_factor = best_factor\n #\n # exp.methods = [\n # 'bac_mace_integrateIF',\n # 'bac_acc_integrateIF',\n # ]\n #\n # # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, test_no_crowd=False)\n #\n # # settings obtained by tuning on dev:\n # best_nu0factor = 0.1 # 1\n # best_diags = 100 # 0.1\n # best_factor = 0.1 # 0.1\n #\n # exp.nu0_factor = best_nu0factor\n # exp.alpha0_diags = best_diags\n # exp.alpha0_factor = best_factor\n #\n # exp.methods = [\n # # 'bac_ibcc_integrateIF',\n # 'bac_ibcc',\n # 'bac_ibcc_integrateIF_noHMM',\n # ]\n #\n # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, test_no_crowd=False)\n #\n # settings obtained from tuning on dev:\n # best_nu0factor = 1.0\n # best_diags = 1.0\n # best_factor = 5.0\n #\n # exp.nu0_factor = best_nu0factor\n # exp.alpha0_diags = best_diags\n # exp.alpha0_factor = best_factor\n #\n # exp.methods = [\n # # 'majority',\n # # 'mace',\n # # 'ds',\n # 'ibcc',\n # # 'best',\n # # 'worst',\n # # 'bac_seq_integrateIF_weakprior',\n # # 'bac_ibcc_integrateIF_weakprior',\n # # 'bac_vec_integrateIF_weakprior',\n # ]\n #\n # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, new_data=True)\n\n # # settings obtained from tuning on dev:\n # best_nu0factor = 0.1\n # best_diags = 0.1\n # best_factor = 0.1\n #\n # exp.nu0_factor = best_nu0factor\n # exp.alpha0_diags = best_diags\n # exp.alpha0_factor = best_factor\n #\n # exp.methods = [\n # # 'bac_vec_integrateIF',\n # 'HMM_crowd',\n # ]\n\n # exp.run_methods(annos, gt, doc_start, output_dir, text, rerun_all=True, new_data=True)" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.argwhere", "pandas.read_csv", "numpy.concatenate", "numpy.any", "pandas.DataFrame", "numpy.copy", "numpy.arange", "numpy.all", "numpy.where" ] ]
Stochastik-TU-Ilmenau/hospitalization-nowcast-hub
[ "df1b2f52060cfa5c275c8c25a0cf2d7b6ad5df0d" ]
[ "code/create_reporting_triangle.py" ]
[ "import pandas as pd\nfrom pathlib import Path\nfrom tqdm.auto import tqdm\ntqdm.pandas()\n\npath = Path('../data-truth/COVID-19/deconvoluted/')\nfiles = sorted([f.name for f in path.glob('**/*')])\ndates = [f[:10] for f in files]\n\ndfs = []\nfor f in files:\n date = f[:10]\n df_temp = pd.read_csv(path/f)\n df_temp = df_temp[df_temp.date == date]\n dfs.append(df_temp)\n\ndf = pd.concat(dfs)\ndf.date = pd.to_datetime(df.date)\ndates = pd.Series(df.date.unique())\ndf.rename(columns = {'value': 'value_0d'}, inplace = True)\n\nfor delay in tqdm(range(1, 81), total = 80):\n dfs_delayed = []\n for date in dates:\n date_delayed = (date + pd.Timedelta(days = delay)).date()\n if date_delayed <= max(dates):\n df_temp = pd.read_csv(path/f'{date_delayed}_COVID-19_hospitalization_deconvoluted.csv', parse_dates = ['date'])\n df_temp = df_temp[df_temp.date == date]\n dfs_delayed.append(df_temp)\n df_delayed = pd.concat(dfs_delayed)\n df_delayed.rename(columns = {'value': f'value_{delay}d'}, inplace = True)\n df = df.merge(df_delayed, how = 'left')\n \ndf_latest = pd.read_csv(path/files[-1], parse_dates = ['date'])\ndf_latest.rename(columns = {'value': f'value_>80d'}, inplace = True)\ndf = df.merge(df_latest, how = 'left')\n \ndf.iloc[:, 4:] = df.iloc[:, 3:].diff(axis=1).iloc[:, 1:]\n\nvalue_cols = [c for c in df.columns if 'value' in c]\nfor col in value_cols:\n df[col] = df[col].astype('Int64')\n \ndf.sort_values(['location', 'age_group', 'date'], inplace = True)\n\ndf.to_csv('../data-truth/COVID-19/COVID-19_hospitalizations.csv', index = False)\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.concat", "pandas.Timedelta" ] ]
zacario-li/Fast-SCNN_pytorch
[ "c7ff081e3ed626fcf7fc752696a38431f9a00942" ]
[ "models/fastscnn.py" ]
[ "'''\nfast scnn\n\nauthor: zacario li\ndate: 2020-03-27\n'''\nimport time\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FastSCNN(nn.Module):\n def __init__(self, numClasses, aux=False, **kwargs):\n super(FastSCNN, self).__init__()\n # auxiliary, use to accelarate the convergence\n self.aux = aux\n \n # learning to down-sample (ph1)\n self.learningToDownSample = LearningToDownSample(32, 48, 64)\n # global feature extractor (ph2)\n self.globalFeatureExtractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6, [3,3,3])\n # feature fusion (ph3)\n self.featureFusion = FeatureFusion(64,128, 128)\n # classifier (ph4)\n self.classifier = Classifier(128, numClasses)\n # for training only use\n if self.aux is not None:\n self.auxlayer = nn.Sequential(\n nn.Conv2d(64, 32, 3, padding=1, bias=False),\n nn.BatchNorm2d(32),\n nn.ReLU(True),\n nn.Dropout(0.1),\n nn.Conv2d(32, numClasses, 1)\n )\n\n def forward(self, x):\n inputSize = x.shape[2:]\n out = []\n # ph1\n ph1 = self.learningToDownSample(x)\n # ph2\n x = self.globalFeatureExtractor(ph1)\n # ph3\n x = self.featureFusion(ph1,x)\n # ph4\n x = self.classifier(x)\n # resize to input img size\n x = F.interpolate(x, inputSize, mode='bilinear', align_corners=True)\n out.append(x)\n # when training, use auxiliary\n if self.aux:\n auxout = self.auxlayer(ph1)\n auxout = F.interpolate(auxout, inputSize, mode='bilinear', align_corners=True)\n out.append(auxout)\n \n return out\n\n\n'''\ncommon used module in paper\nRed: Conv2D\nGray: DWConv\nBlue: DSConv\nGreen: Bottleneck\nPink: Pyramid Pooling\nYellow: Upsample\n'''\nclass _Conv2D(nn.Module):\n '''\n Red\n '''\n def __init__(self, inChannels, outChannels, kernel, stride=1, padding=0, **kwargs):\n super(_Conv2D, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(inChannels, outChannels, kernel, stride, padding, bias=False),\n nn.BatchNorm2d(outChannels),\n nn.ReLU(True)\n )\n \n def forward(self, x):\n x = self.conv(x)\n return x\n\nclass _DSConv(nn.Module):\n '''\n Blue\n '''\n def __init__(self, inChannels, outChannels, stride=1, **kwargs):\n super(_DSConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(inChannels,inChannels, 3, stride, 1, groups=inChannels, bias=False),\n nn.BatchNorm2d(inChannels),\n nn.ReLU(True),\n nn.Conv2d(inChannels,outChannels,1,bias=False),\n nn.BatchNorm2d(outChannels),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass _DWConv(nn.Module):\n '''\n Gray\n '''\n def __init__(self, inChannels, outChannels, stride=1, **kwargs):\n super(_DWConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(inChannels, outChannels, 3, stride, 1, groups=inChannels,bias=False),\n nn.BatchNorm2d(outChannels),\n nn.ReLU(True)\n )\n\n def forward(self, x):\n return self.conv(x)\n\nclass _Bottleneck(nn.Module):\n '''\n Green:\n Bottleneck\n '''\n def __init__(self, inChannels, outChannels, t=6, stride=2, **kwargs):\n super(_Bottleneck, self).__init__()\n self.shortcut = stride == 1 and inChannels == outChannels\n self.block = nn.Sequential(\n _Conv2D(inChannels, inChannels*t, 1),\n _DWConv(inChannels*t, inChannels*t, stride),\n #the last pointwise conv does not use non-linearity f. described in Table 2. Page 4\n nn.Conv2d(inChannels*t, outChannels, 1,bias=False),\n nn.BatchNorm2d(outChannels)\n )\n\n def forward(self, x):\n out = self.block(x)\n if self.shortcut:\n out = x + out\n return out\n\n\nclass _PPM(nn.Module):\n '''\n Pink\n '''\n def __init__(self,inChannels, outChannels, **kwargs):\n super(_PPM, self).__init__()\n # described in PSPNet paper(https://arxiv.org/pdf/1612.01105.pdf), 3.2, page 3\n tempChannel = int(inChannels/4)\n self.p1 = _Conv2D(inChannels, tempChannel, 1)\n self.p2 = _Conv2D(inChannels, tempChannel, 1)\n self.p3 = _Conv2D(inChannels, tempChannel, 1)\n self.p4 = _Conv2D(inChannels, tempChannel, 1)\n # why need conv2d here? There isn't any words about it in the paper\n self.cat = _Conv2D(inChannels*2, outChannels, 1)\n \n def featurePooling(self, x, size):\n avgp = nn.AdaptiveAvgPool2d(size)\n x = avgp(x)\n return x\n \n def upsample(self, x, size):\n return F.interpolate(x, size, mode='bilinear', align_corners=True)\n \n def forward(self, x):\n size = x.shape[2:]\n f1 = self.upsample(self.p1(self.featurePooling(x,1)),size)\n f2 = self.upsample(self.p2(self.featurePooling(x,2)),size)\n f3 = self.upsample(self.p3(self.featurePooling(x,3)),size)\n f4 = self.upsample(self.p4(self.featurePooling(x,6)),size)\n x = torch.cat([x, f1, f2, f3, f4],dim=1)\n x = self.cat(x)\n return x\n# ph1\nclass LearningToDownSample(nn.Module):\n '''\n ph1 has two dsconv, so wo need input these parameters\n '''\n def __init__(self, dsc1, dsc2, dsc2out, **kwargs):\n super(LearningToDownSample, self).__init__()\n # described in paper, Table 1, page 4\n self.conv = _Conv2D(3,dsc1, 3, 2)\n self.dsc1 = _DSConv(dsc1,dsc2,2)\n self.dsc2 = _DSConv(dsc2,dsc2out,2)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.dsc1(x)\n x = self.dsc2(x)\n return x\n\n# ph2\nclass GlobalFeatureExtractor(nn.Module):\n '''\n ph2\n '''\n def __init__(self, inChannels=64, btChannels=[64,96,128], \n outChannels=128, t=6, numBt=[3,3,3], **kwargs):\n super(GlobalFeatureExtractor, self).__init__()\n # described in paper, Figure 1, page 2, we have 3 different shape bottlenecks\n self.bt1 = self._make_btlayer(_Bottleneck, inChannels, btChannels[0],numBt[0],t,2)\n self.bt2 = self._make_btlayer(_Bottleneck, btChannels[0], btChannels[1],numBt[1],t,2)\n self.bt3 = self._make_btlayer(_Bottleneck, btChannels[1], btChannels[2],numBt[2],t,1)\n self.ppm = _PPM(btChannels[2],outChannels)\n\n def _make_btlayer(self, bt, inChannels, outChannels, numBlock, t=6, stride=1):\n layers = []\n layers.append(bt(inChannels, outChannels, t, stride))\n for i in range(1, numBlock):\n layers.append(bt(outChannels, outChannels, t, 1))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.bt1(x)\n x = self.bt2(x)\n x = self.bt3(x)\n x = self.ppm(x)\n return x\n\n\n# ph3\nclass FeatureFusion(nn.Module):\n def __init__(self, ph1InChannel, ph2InChannel, outChannels, scale=4, **kwargs):\n super(FeatureFusion, self).__init__()\n self.scale = scale\n self.dwconv = _DWConv(ph2InChannel,outChannels,1)\n self.upBranch = nn.Sequential(nn.Conv2d(outChannels, outChannels, 1),\n nn.BatchNorm2d(outChannels))\n self.downBranch = nn.Sequential(nn.Conv2d(ph1InChannel, outChannels, 1),\n nn.BatchNorm2d(outChannels))\n self.activation = nn.ReLU(True)\n \n\n def forward(self, ph1Feature, ph2Feature):\n xUp = F.interpolate(ph2Feature, size=ph1Feature.shape[2:], mode='bilinear', align_corners=True)\n xUp = self.dwconv(xUp)\n xUp = self.upBranch(xUp)\n \n xDown = self.downBranch(ph1Feature)\n \n out = xUp + xDown\n out = self.activation(out)\n return out\n \n# ph4\nclass Classifier(nn.Module):\n '''\n without upsample and softmax\n '''\n def __init__(self, inChannels, numClasses, stride=1):\n super(Classifier, self).__init__()\n # described in 3.2.4 Classifier, page 5\n self.dsconv1 = _DSConv(inChannels, inChannels, stride)\n self.dsconv2 = _DSConv(inChannels, inChannels, stride)\n self.conv = nn.Conv2d(inChannels, numClasses, 1)\n \n def forward(self, x):\n x = self.dsconv1(x)\n x = self.dsconv2(x)\n x = self.conv(x)\n return x\n \n \nif __name__ == '__main__':\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n ntimes = 100\n model = FastSCNN(4)\n model.cuda()\n model.eval()\n with torch.no_grad():\n x = torch.randn(1,3,320,320)\n x = x.cuda()\n # warmup\n out = model(x)\n start = time.time()\n for i in range(ntimes):\n model(x)\n print('fps is :', 1.0/((time.time() - start)/ntimes))" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.Dropout", "torch.randn", "torch.nn.AdaptiveAvgPool2d", "torch.no_grad", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU", "torch.cat", "torch.nn.functional.interpolate" ] ]
anhdang000/mmsegmentation
[ "3a189329ee919eb68a362361984c6f697aaf0788" ]
[ "tools/custom_test.py" ]
[ "import argparse\nimport os\nfrom os.path import join\n\nimport mmcv\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import (get_dist_info, init_dist, load_checkpoint,\n wrap_fp16_model)\nfrom mmcv.utils import DictAction\n\nfrom mmseg.apis import multi_gpu_test, single_gpu_test\nfrom mmseg.datasets import build_dataloader, build_dataset\nfrom mmseg.models import build_segmentor\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='mmseg test (and eval) a model')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file')\n parser.add_argument('--image-dir', help='Another image directory for evaluation')\n parser.add_argument('--mask', action='store_true', default=False, help='Save results as masks')\n parser.add_argument(\n '--aug-test', action='store_true', help='Use Flip and Multi scale aug')\n parser.add_argument('--out', help='output result file in pickle format')\n parser.add_argument(\n '--format-only',\n action='store_true',\n help='Format the output results without perform evaluation. It is'\n 'useful when you want to format the result to a specific format and '\n 'submit it to the test server')\n parser.add_argument(\n '--eval',\n type=str,\n nargs='+',\n help='evaluation metrics, which depends on the dataset, e.g., \"mIoU\"'\n ' for generic datasets, and \"cityscapes\" for Cityscapes')\n parser.add_argument('--show', action='store_true', help='show results')\n parser.add_argument(\n '--show-dir', help='directory where painted images will be saved')\n parser.add_argument(\n '--gpu-collect',\n action='store_true',\n help='whether to use gpu to collect results.')\n parser.add_argument(\n '--tmpdir',\n help='tmp directory used for collecting results from multiple '\n 'workers, available when gpu_collect is not specified')\n parser.add_argument(\n '--options', nargs='+', action=DictAction, help='custom options')\n parser.add_argument(\n '--eval-options',\n nargs='+',\n action=DictAction,\n help='custom options for evaluation')\n parser.add_argument(\n '--launcher',\n choices=['none', 'pytorch', 'slurm', 'mpi'],\n default='none',\n help='job launcher')\n parser.add_argument(\n '--opacity',\n type=float,\n default=0.5,\n help='Opacity of painted segmentation map. In (0, 1] range.')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n if 'LOCAL_RANK' not in os.environ:\n os.environ['LOCAL_RANK'] = str(args.local_rank)\n return args\n\n\ndef main():\n args = parse_args()\n\n assert args.out or args.eval or args.format_only or args.show \\\n or args.show_dir, \\\n ('Please specify at least one operation (save/eval/format/show the '\n 'results / save the results) with the argument \"--out\", \"--eval\"'\n ', \"--format-only\", \"--show\" or \"--show-dir\"')\n\n if args.eval and args.format_only:\n raise ValueError('--eval and --format_only cannot be both specified')\n\n if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n raise ValueError('The output file must be a pkl file.')\n\n cfg = mmcv.Config.fromfile(args.config)\n if args.options is not None:\n cfg.merge_from_dict(args.options)\n # set cudnn_benchmark\n if cfg.get('cudnn_benchmark', False):\n torch.backends.cudnn.benchmark = True\n if args.aug_test:\n # hard code index\n cfg.data.test.pipeline[1].img_ratios = [\n 0.5, 0.75, 1.0, 1.25, 1.5, 1.75\n ]\n cfg.data.test.pipeline[1].flip = True\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # init distributed env first, since logger depends on the dist info.\n if args.launcher == 'none':\n distributed = False\n else:\n distributed = True\n init_dist(args.launcher, **cfg.dist_params)\n\n # Modify config\n if not args.image_dir:\n cfg.data.test.data_root = '.'\n cfg.data.test.img_dir = 'test_images'\n else:\n cfg.data.test.data_root = '.'\n cfg.data.test.img_dir = args.image_dir\n\n cfg.data.test.split = 'test_list.txt'\n\n image_names = os.listdir(join(cfg.data.test.data_root, cfg.data.test.img_dir))\n image_ids = [img_name.split('.')[0] for img_name in image_names]\n\n with open(cfg.data.test.split, 'w') as f:\n f.write('\\n'.join(image_ids))\n \n # build the dataloader\n # TODO: support multiple images per gpu (only minor changes are needed)\n dataset = build_dataset(cfg.data.test)\n data_loader = build_dataloader(\n dataset,\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.workers_per_gpu,\n dist=distributed,\n shuffle=False)\n\n # build the model and load checkpoint\n cfg.model.train_cfg = None\n model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n wrap_fp16_model(model)\n checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n if 'CLASSES' in checkpoint.get('meta', {}):\n model.CLASSES = checkpoint['meta']['CLASSES']\n else:\n print('\"CLASSES\" not found in meta, use dataset.CLASSES instead')\n model.CLASSES = dataset.CLASSES\n if 'PALETTE' in checkpoint.get('meta', {}):\n model.PALETTE = checkpoint['meta']['PALETTE']\n else:\n print('\"PALETTE\" not found in meta, use dataset.PALETTE instead')\n model.PALETTE = dataset.PALETTE\n\n efficient_test = False\n if args.eval_options is not None:\n efficient_test = args.eval_options.get('efficient_test', False)\n\n if not distributed:\n model = MMDataParallel(model, device_ids=[0])\n outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,\n efficient_test, args.opacity)\n print(outputs[0].shape, set(outputs[0].flatten().tolist()))\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False)\n outputs = multi_gpu_test(model, data_loader, args.tmpdir,\n args.gpu_collect, efficient_test)\n\n rank, _ = get_dist_info()\n if rank == 0:\n if args.out:\n print(f'\\nwriting results to {args.out}')\n mmcv.dump(outputs, args.out)\n kwargs = {} if args.eval_options is None else args.eval_options\n if args.format_only:\n dataset.format_results(outputs, **kwargs)\n if args.eval:\n dataset.evaluate(outputs, args.eval, **kwargs)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.cuda.current_device" ] ]
mhamiltonj/EstimationPy3
[ "cdc0de3da05dd7a6b3a4d88ce76b477a1ff078c1" ]
[ "estimationpy/modelica/FmuExamples/Resources/Python-Scripts/addNoiseToData_StuckValve.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport getCSVdata\n\n# LINEAR CHARACTERISTIC - No DYNAMICS\n#inputFileName = '../data/SimulationData_StuckValve_lin_noDyn.csv'\n#outputFileName = '../data/NoisyData_StuckValve_lin_noDyn.csv'\n\n# LINEAR CHARACTERISTIC - DYNAMICS\n#inputFileName = '../data/SimulationData_StuckValve_lin_dyn.csv'\n#outputFileName = '../data/NoisyData_StuckValve_lin_dyn.csv'\n\n# QUADRATIC CHARACTERISTIC - No DYNAMICS\n#inputFileName = '../data/SimulationData_StuckValve_quad_noDyn.csv'\n#outputFileName = '../data/NoisyData_StuckValve_quad_noDyn.csv'\n\n# QUADRATIC CHARACTERISTIC - DYNAMICS\ninputFileName = '../data/SimulationData_StuckValve_quad_dyn.csv'\noutputFileName = '../data/NoisyData_StuckValve_quad_dyn.csv'\n\ndt = 2.0\n(DataMatrix, I, J, csv_writer) = getCSVdata.getCSVdata(inputFileName, outputFileName, dt)\n\n# the columns of the CSV file are\n# 0)Time,\n# 1)valveStuck.m_flow,\n# 2)valveStuck.dp,\n# 3)valveStuck.leakPosition,\n# 4)valveStuck.stuckPosition,\n# 5)valveStuck.valve.opening,\n# 6)valveStuck.cmd\n\n# define the amplitude of the noise for each column\n# the noise is uniform and of amplitude +/- Delta_*\nDelta_Dp = 10000.0\nDelta_mFlow = 0.05\n\n# compute the error vectors\nnoise_Dp = Delta_Dp*(2*np.random.random((I,)) - np.ones((I,)))\nnoise_mFlow = Delta_mFlow*(2*np.random.random((I,)) - np.ones((I,)))\n\n# create a copy of the original matrix and add the noise\nNoiseDataMatrix = DataMatrix.copy()\nNoiseDataMatrix[:,1] = NoiseDataMatrix[:,1] + noise_mFlow\nNoiseDataMatrix[:,2] = NoiseDataMatrix[:,2] + noise_Dp\nNoiseDataMatrix[:,3] = NoiseDataMatrix[:,3] \nNoiseDataMatrix[:,4] = NoiseDataMatrix[:,4]\nNoiseDataMatrix[:,6] = NoiseDataMatrix[:,6]\nprint(\"\\nComputed the noise to add...\")\n\n# write data to CSV file\nfor i in range(I):\n\tcsv_writer.writerow(NoiseDataMatrix[i,:])\nprint(\"Noise added\")\n\nprint(\"\\nPlotting...\")\n# plot the figures that show the difference between the simulation data\n# and the data corrupted by noise\nfig = plt.figure()\nax1 = fig.add_subplot(311)\nax1.plot(DataMatrix[:,0],DataMatrix[:,1],'b-', label='$m_{FLOW}$')\nax1.plot(DataMatrix[:,0],NoiseDataMatrix[:,1],'bo')\nax1.set_xlabel('Time [s]')\nax1.set_ylabel('Mass Flow Rate [kg/s]')\nax1.legend()\nax1.grid(True)\n\nax2 = fig.add_subplot(312)\nax2.plot(DataMatrix[:,0],DataMatrix[:,2],'b-', label='$\\Delta P$')\nax2.plot(DataMatrix[:,0],NoiseDataMatrix[:,2],'bo')\nax2.set_xlabel('Time [s]')\nax2.set_ylabel('Pressure difference [Pa]')\nax2.legend()\nax2.grid(True)\n\nax3 = fig.add_subplot(313)\nax3.plot(DataMatrix[:,0],DataMatrix[:,5],'g', label='$cmd$')\nax3.plot(DataMatrix[:,0],DataMatrix[:,6],'r', label='$position$')\nax3.set_xlabel('Time [s]')\nax3.set_ylabel('Actuator position [.]')\nax3.legend()\nax3.grid(True)\n\nplt.show()\n" ]
[ [ "numpy.random.random", "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "numpy.ones" ] ]
Ilyabasharov/made_mail.ru
[ "a81bfd874ab80eb8c7eaad8a4acf723f327f2f50" ]
[ "2_term/made_2021_ml_adv/homeworks/2/dataset.py" ]
[ "from datetime import (\n datetime,\n)\nfrom zipfile import (\n ZipFile,\n)\n\nimport pandas as pd\nimport pickle as pk\n\n\ndef question_rating_calc(\n tournament_name: dict,\n question_rating: list,\n dataset: dict,\n) -> pd.DataFrame:\n\n tournament_rating = {}\n\n questions_count = 0\n for tournament in dataset:\n n_questions = len(dataset[tournament]['teams'][0]['mask'])\n start_index, end_index = questions_count, questions_count + n_questions\n tournament_rating[tournament] = question_rating[start_index: end_index].mean()\n questions_count += n_questions\n\n tournament_rating = sorted(tournament_rating.items(), key=lambda x: x[1])\n df_tournament_rating = pd.DataFrame(tournament_rating, columns=['id', 'rating']).drop(columns=['rating'])\n df_tournament_rating['name'] = df_tournament_rating['id'].apply(lambda x: tournament_name[x])\n \n return df_tournament_rating\n\ndef read_dataset(\n path: str\n) -> dict:\n \n dataset = {}\n\n with ZipFile(path) as zipfile:\n for subfile in zipfile.namelist():\n with zipfile.open(subfile) as file:\n dataset[subfile.split('.')[0]] = pk.load(file)\n \n return dataset\n\ndef preprocess(\n dataset: dict,\n from_time: datetime,\n) -> tuple:\n \n all_data = {}\n tournament_names = {}\n \n for tournament_id in dataset['tournaments']:\n \n if not dataset['results'][tournament_id]:\n continue\n \n tournament_time = datetime.fromisoformat(\n dataset['tournaments'][tournament_id]['dateStart']\n ).replace(tzinfo=None)\n \n if tournament_time < from_time:\n continue\n \n questions_length = set()\n \n for team in dataset['results'][tournament_id]:\n if team.get('mask', None) is not None:\n questions_length.add(len(team['mask']))\n \n if len(questions_length) != 1:\n continue\n \n tournament_names[tournament_id] = dataset['tournaments'][tournament_id]['name']\n \n tournament = {\n 'time': tournament_time,\n 'teams': [],\n }\n \n for team in dataset['results'][tournament_id]:\n if team.get('mask', None) is None:\n continue\n \n tournament['teams'].append({\n 'id': team['team']['id'],\n 'mask': list(map(int, team['mask'].replace('X', '0').replace('?', '0'))),\n 'players': {\n player['player']['id']\n for player in team['teamMembers']\n }\n })\n \n if not tournament['teams']:\n continue\n \n all_data[tournament_id] = tournament\n \n return all_data, dataset['players'], tournament_names\n\ndef train_test_split(\n dataset: dict,\n train_time: datetime,\n test_time: datetime,\n) -> tuple:\n \n train, test = dict(), dict()\n \n for tournament_id in dataset:\n if dataset[tournament_id]['time'] >= test_time:\n test[tournament_id] = dataset[tournament_id]\n \n else:\n train[tournament_id] = dataset[tournament_id]\n \n return train, test" ]
[ [ "pandas.DataFrame" ] ]
VUB-HYDR/2020_Grant_etal
[ "59df2fe28857ce28a2f0e00b389d485786ab50a4" ]
[ "python/part2/plot/plot_p2.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 10 12:50:59 2020\n\n@author: Luke\n\"\"\"\n\n#==============================================================================\n# SUMMARY\n#==============================================================================\n\n\n# 18 May 2020\n\n# plots detection and attribution analysis\n\n\n#==============================================================================\n# IMPORT\n#==============================================================================\n\n\nimport os\nimport sys\nimport xarray as xr\nimport numpy as np\nimport matplotlib as mpl\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nfrom scipy import stats\nimport seaborn as sns\nfrom scipy.stats import norm\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\n#==============================================================================\n# FUNCTION\n#==============================================================================\n\ndef scale_take(array): #must take diff between b and sup/inf, store in separate lists\n b = array[1]\n b_inf = b - array[0]\n b_sup = array[2] - b\n p = array[3]\n return b,b_inf,b_sup,p\n\ndef plot_p2(outDIR,flag_svplt,endvariables,\\\n hist_mmm,pi_mmm,era5_obs,\\\n samples,mean,n,std,\\\n histmmm_obs_pcc,histmmm_obs_spcc,\\\n pi_histmmm_pcc,pi_histmmm_spcc,\\\n cc_99,cc_95,cc_90,\\\n var_fin):\n \n #==============================================================================\n # INITIALIZE\n #==============================================================================\n \n # figure size\n f = plt.figure(figsize=(10,14))\n \n #========== TEMPORAL ==========#\n \n # opt fing rect, rect=[left, bottom, right, top]\n t_left = 0.475\n t_bottom = 0.025\n t_right = 1\n t_top = 1.0\n t_rect = [t_left, t_bottom, t_right, t_top]\n \n # temporal - gs1; ax1,ax2,ax3, ax4\n gs1 = gridspec.GridSpec(4,1)\n ax1 = f.add_subplot(gs1[0]) \n ax2 = f.add_subplot(gs1[1]) \n ax3 = f.add_subplot(gs1[2]) \n ax4 = f.add_subplot(gs1[3]) \n gs1.tight_layout(figure=f, rect=t_rect, h_pad=5)\n \n temp_axes = [ax1,ax2,ax3,ax4]\n\n #========== CORRELATION ==========# \n \n gs5 = gridspec.GridSpec(4,1)\n \n # corr detection rect, rect=[left, bottom, right, top]\n c_left = 0\n c_bottom = 0.025\n c_right = 0.425\n c_top = 1.0\n c_rect = [c_left, c_bottom, c_right, c_top]\n ax13 = f.add_subplot(gs5[0])\n ax14 = f.add_subplot(gs5[1])\n ax15 = f.add_subplot(gs5[2])\n ax16 = f.add_subplot(gs5[3])\n corr_axes = [ax13,ax14,ax15,ax16]\n gs5.tight_layout(figure=f, rect=c_rect, h_pad=5)\n \n #==============================================================================\n # GENERAL TIMESERIES + SCALING SETTINGS\n #==============================================================================\n \n # list of figure panel ids\n letters = ['a', 'b', 'c',\\\n 'd', 'e', 'f',\\\n 'g', 'h', 'i',\\\n 'j', 'k', 'l']\n \n #========== LINE THICKNESS ==========#\n \n # mean line thickness\n lw_mean = 1.0\n \n # era5-land line thickness\n lw_era5 = 1.0\n \n #========== PLOT COLORS ==========#\n \n col_pimean = 'dodgerblue' # picontrol mean color\n col_pifill = '#a6bddb' # picontrol fill color\n col_PICmean = 'mediumblue' # PIC block mean color\n col_histmean = '0.3' # historical mean color\n col_histfill = '0.75' # historical fill color\n col_rcp26mean = 'darkgreen' # rcp26 mean color\n col_rcp26fill = '#adebad' # rcp26 fill color\n col_rcp60mean = 'darkgoldenrod' # rcp60 mean color\n col_rcp60fill = '#ffec80' # rcp60 fill color\n col_rcp85mean = 'darkred' # rcp85 mean color\n col_rcp85fill = '#F08080' # rcp85 fill color\n col_ALLmean = 'red' # ALL block mean color \n ub_alpha = 0.5\n col_era5 = 'k'\n col_OBSmean = 'k' # OBS block mean color\n \n #========== AXII ==========#\n \n # ymin\n ymin_ice = -10 # ymin ice vars\n ymax_ice = 10 # ymax ice vars\n ymin_wat = -0.25 # ymin watertemp \n ymax_wat = 1 # ymax watertemp\n xmin = 1985 # xmin\n xmax = 2018 # xmax\n \n # y ticks; ice\n yticks_ice = np.arange(ymin_ice,ymax_ice+2.5,2.5)\n ytick_labels_ice = [-10,None,-5,None,0,None,5,None,10]\n \n # y ticks; watertemp\n yticks_wat = np.arange(ymin_wat,ymax_wat+0.25,0.25)\n ytick_labels_wat = [None,0,None,0.5,None,1.0]\n \n # x ticks; timeseries\n xticks_ts = np.arange(1990,2020,5)\n xtick_labels_ts = [1990,None,2000,None,2010,None]\n \n # x ticks temporal OF insets\n xticks_OF = [0.5]\n xtick_labels_OF = ['EXT']\n \n # y ticks temporal OF insets\n yticks_OF = np.arange(-0.5,2.5,0.5)\n ytick_labels_OF = [None, '0', None, '1', None, '2']\n \n #========== FONTS ==========#\n \n title_font = 14\n tick_font = 10\n axis_font = 11\n legend_font = 11\n inset_font = 9\n \n #==============================================================================\n # CORRELATION\n #==============================================================================\n \n # main plot x axis label\n xlabel = 'Spearman (rank) correlation coefficient'\n xlabel_xpos = 0.225\n xlabel_ypos = 0.0\n \n ylabel = 'Density [-]'\n \n #========== LEGEND ==========#\n \n # bbox\n le_x0 = 0.365\n le_y0 = 0.48\n le_xlen = 0.15\n le_ylen = 0.25\n \n # space between entries\n legend_entrypad = 0.5\n \n # length per entry\n legend_entrylen = 0.75\n \n # space between entries\n legend_spacing = 1.5\n \n yticks_corr = np.arange(0,3,0.5)\n ytick_labels_corr = [0,None,1.0,None,2.0,None,]\n \n xticks_corr = np.arange(-0.8,1.0,0.2)\n xtick_labels_corr = [-0.8,None,-0.4,None,0,None,0.4,None,0.8]\n \n count = 0\n \n for endvar,ax in zip(endvariables,corr_axes):\n \n count += 1\n \n sns.distplot(samples[endvar], bins=20, hist_kws={\"color\":\"0.4\"},fit=norm, fit_kws={\"color\":\"0.4\"},\\\n norm_hist=True, kde=False, label='PIC, EXT', ax=ax)\n ax.axvline(x=histmmm_obs_spcc[endvar], color='red', linewidth=2, label='EXT, ERA5-land')\n ax.vlines(x=cc_99[endvar], ymin=0, ymax=0.125, colors='blue', linewidth=2.5, linestyle='-', label='99%', zorder=0)\n ax.vlines(x=cc_95[endvar], ymin=0, ymax=0.125, colors='blue', linewidth=1, linestyle='-', label='95%', zorder=0)\n ax.tick_params(labelsize=tick_font)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.set_title(letters[count-1],loc='left',fontsize=title_font,fontweight='bold')\n ax.yaxis.set_ticks(yticks_corr)\n ax.yaxis.set_ticklabels(ytick_labels_corr)\n ax.set_ylabel(ylabel)\n ax.set_ylim(yticks_corr[0],yticks_corr[-1])\n ax.xaxis.set_ticks(xticks_corr)\n ax.xaxis.set_ticklabels(xtick_labels_corr)\n \n ax13.legend(frameon=False,bbox_to_anchor=(le_x0, le_y0, le_xlen, le_ylen),\\\n fontsize=legend_font,labelspacing=legend_spacing)\n \n f.text(xlabel_xpos, xlabel_ypos, xlabel, ha='center', fontsize=axis_font)\n \n #==============================================================================\n # OF\n #==============================================================================\n \n # main plot x axis label\n xlabel = 'Years'\n xlabel_xpos = 0.735\n xlabel_ypos = 0.0\n \n #========== LEGEND ==========#\n \n # bbox\n le_x0 = 0.75\n le_y0 = 0.975\n le_xlen = 0.2\n le_ylen = 0.1\n \n # space between marker and label\n legend_entrypad = 0.5\n \n # length per entry\n legend_entrylen = 0.75\n \n # space between entries\n legend_spacing = 2.2\n \n time_ice = np.arange(1981,2018)\n time_wt = np.arange(1981,2018)\n \n for endvar,ax in zip(endvariables,temp_axes):\n \n if endvar == 'watertemp':\n time = time_wt\n else:\n time = time_ice\n \n count += 1\n \n # OF data prep\n b85,b_inf85,b_sup85,p85 = scale_take(var_fin[endvar])\n \n \n infers = [b_inf85]\n supers = [b_sup85]\n err = np.stack([infers,supers],axis=0)\n x = [0.5]\n y = [b85]\n\n # timeseries\n ax.plot(time, pi_mmm[endvar][0], lw=lw_mean, color=col_pimean, label='PIC', zorder=1)\n ax.fill_between(time, (pi_mmm[endvar][0] + pi_mmm[endvar][1]),\\\n ((pi_mmm[endvar][0]) - pi_mmm[endvar][1]),\\\n lw=0.1, color=col_pifill, zorder=1)\n ax.plot(time, hist_mmm[endvar][0], lw=lw_mean, color=col_rcp85mean, label='EXT', zorder=1)\n ax.fill_between(time, (hist_mmm[endvar][0] + hist_mmm[endvar][1]),\\\n (hist_mmm[endvar][0] - hist_mmm[endvar][1]),\n lw=0.1, color=col_rcp85fill, zorder=1 ,alpha=ub_alpha)\n ax.plot(time, era5_obs[endvar], lw=lw_era5, color=col_era5, label='OBS', zorder=4)\n \n if count == 5:\n ax_ins = inset_axes(ax, width=\"20%\", height=\"35%\", loc=2, borderpad=3)\n elif count == 6:\n ax_ins = inset_axes(ax, width=\"20%\", height=\"35%\", loc=2, borderpad=3)\n elif count == 7:\n ax_ins = inset_axes(ax, width=\"20%\", height=\"35%\", loc=3, borderpad=3)\n elif count == 8:\n ax_ins = inset_axes(ax, width=\"20%\", height=\"35%\", loc=3, borderpad=3)\n \n ax_ins.errorbar(x=x,y=y,yerr=err,\n fmt='o',\n markersize=3,\n ecolor=col_rcp85mean,\n markerfacecolor=col_rcp85mean,\n mec=col_rcp85mean,\n capsize=5,\n elinewidth=2,\n markeredgewidth=1)\n \n ax_ins.set_ylim(-0.5,2)\n ax_ins.set_xlim(0,1)\n \n ax_ins.hlines(y=1,xmin=0,xmax=3,colors='k',linestyle='dashed',linewidth=1)\n ax_ins.hlines(y=0,xmin=0,xmax=3,colors='k',linestyle='solid',linewidth=0.25)\n \n \n ax_ins.xaxis.set_ticks(xticks_OF)\n ax_ins.xaxis.set_ticklabels(xtick_labels_OF,fontsize=inset_font)\n \n ax_ins.yaxis.set_ticks(yticks_OF)\n ax_ins.yaxis.set_ticklabels(ytick_labels_OF,fontsize=inset_font)\n \n # settings for timeseries plots\n if 'ice' in endvar:\n if count == 2:\n ax.set_ylim(ymin_ice+5,ymax_ice)\n ax.yaxis.set_ticks(yticks_ice[2:])\n ax.yaxis.set_ticklabels(ytick_labels_ice[2:])\n elif count == 3:\n ax.set_ylim(ymin_ice,ymax_ice-5)\n ax.yaxis.set_ticks(yticks_ice[:-2])\n ax.yaxis.set_ticklabels(ytick_labels_ice[:-2])\n elif count ==4:\n ax.set_ylim(ymin_ice,ymax_ice-5)\n ax.yaxis.set_ticks(yticks_ice[:-2])\n ax.yaxis.set_ticklabels(ytick_labels_ice[:-2])\n elif 'water' in endvar:\n ax.set_ylim(ymin_wat,ymax_wat)\n ax.yaxis.set_ticks(yticks_wat)\n ax.yaxis.set_ticklabels(ytick_labels_wat)\n \n ax.set_xlim(xmin,xmax) \n ax.xaxis.set_ticks(xticks_ts)\n ax.xaxis.set_ticklabels(xtick_labels_ts)\n ax.set_title(letters[count-1],loc='left',fontsize=title_font,fontweight='bold')\n ax.tick_params(labelsize=tick_font,axis=\"x\",direction=\"in\",labelleft=\"on\",left=True)\n ax.tick_params(labelsize=tick_font,axis=\"y\",direction=\"in\",labelbottom=\"on\",bottom=True,top=False,)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.yaxis.grid(color='0.8', linestyle='dashed', linewidth=0.5)\n ax.xaxis.grid(color='0.8', linestyle='dashed', linewidth=0.5)\n ax.set_axisbelow(True)\n \n # labels\n if endvar == 'icedur':\n ylabel = 'Ice duration anomaly (days)'\n elif endvar == 'icestart':\n ylabel = 'Ice onset anomaly (days)'\n elif endvar == 'iceend':\n ylabel = 'Ice break-up anomaly (days)'\n elif endvar == 'watertemp':\n ylabel = 'Water temperature anomaly (°C)'\n \n ax.set_ylabel(ylabel, va='center', rotation='vertical', fontsize=axis_font, labelpad=10)\n\n \n labels = ['EXT', 'OBS', 'PIC']\n handles = [Rectangle((0,0),1,1,color=col_rcp85fill),\\\n Line2D([0],[0],linestyle='-',lw=2,color=col_era5),\\\n Rectangle((0,0),1,1,color=col_pifill)]\n \n f.legend(handles, labels, bbox_to_anchor=(le_x0, le_y0, le_xlen, le_ylen), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0.,\\\n frameon=False, columnspacing=0.1, handlelength=legend_entrylen,\\\n handletextpad=legend_entrypad,\\\n fontsize=legend_font,labelspacing=legend_spacing)\n \n f.text(xlabel_xpos, xlabel_ypos, xlabel, ha='center', fontsize=axis_font)\n \n plt.show()\n \n # save figure\n if flag_svplt == 0:\n None\n elif flag_svplt == 1:\n f.savefig(outDIR+'/f2.png',bbox_inches='tight',dpi=500)\n" ]
[ [ "matplotlib.lines.Line2D", "matplotlib.pyplot.figure", "numpy.arange", "matplotlib.pyplot.show", "matplotlib.patches.Rectangle", "numpy.stack", "matplotlib.gridspec.GridSpec" ] ]
hcc-test/test
[ "1d7aa18b7b19f8373420367c6f76b7db6da43e9c" ]
[ "infer_contrast.py" ]
[ "import argparse\nimport functools\n\nimport numpy as np\nimport torch\n\nfrom utils.reader import load_audio\nfrom utils.utility import add_arguments, print_arguments\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\nadd_arg('audio_path1', str, 'audio_db/李达康.wav', '预测第一个音频')\nadd_arg('audio_path2', str, 'audio_db/李达康.wav', '预测第二个音频')\nadd_arg('threshold', float, 0.71, '判断是否为同一个人的阈值')\nadd_arg('input_shape', str, '(1, 257, 257)', '数据输入的形状')\nadd_arg('model_path', str, 'models/resnet34.pth', '预测模型的路径')\nargs = parser.parse_args()\n\nprint_arguments(args)\n\n#device = torch.device(\"cuda\")\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n# 加载模型\nmodel = torch.jit.load(args.model_path)\nmodel.to(device)\n\nmodel.eval()\n\n\n# 预测音频\ndef infer(audio_path):\n input_shape = eval(args.input_shape)\n data = load_audio(audio_path, mode='infer', spec_len=input_shape[2])\n data = data[np.newaxis, :]\n data = torch.tensor(data, dtype=torch.float32, device=device)\n # 执行预测\n feature = model(data)\n return feature.data.cpu().numpy()\n\n\nif __name__ == '__main__':\n # 要预测的两个人的音频文件\n feature1 = infer(args.audio_path1)[0]\n feature2 = infer(args.audio_path2)[0]\n # 对角余弦值\n dist = np.dot(feature1, feature2) / (np.linalg.norm(feature1) * np.linalg.norm(feature2))\n if dist > args.threshold:\n print(\"%s 和 %s 为同一个人,相似度为:%f\" % (args.audio_path1, args.audio_path2, dist))\n else:\n print(\"%s 和 %s 不是同一个人,相似度为:%f\" % (args.audio_path1, args.audio_path2, dist))\n" ]
[ [ "torch.tensor", "torch.cuda.is_available", "torch.jit.load", "numpy.dot", "numpy.linalg.norm" ] ]
wweschen/Capstone
[ "52ed3209bd7da9bf5070ded5ecad32b61bbe84cf" ]
[ "coqa_lib.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Library to process data for Coqa 1.1 and Coqa 2.0.\"\"\"\n\n# pylint: disable=g-bad-import-order\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport copy\nimport json\nimport math\nimport six\n\nfrom absl import logging\nimport tensorflow as tf\n\nfrom bert import tokenization\n\nclass CoqaExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n story_id,\n turn_id,\n question_text,\n doc_tokens,\n gold_answer_text=None,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n qa_history_text = None):\n self.story_id = story_id\n self.turn_id=turn_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.gold_answer_text=gold_answer_text\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.qa_history_text = qa_history_text\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"\\nstory_id: %s\" % (tokenization.printable_text(self.story_id))\n s +=\"\\nturn id: %s\" % (self.turn_id)\n s += \"\\nquestion_text: %s\" % (\n tokenization.printable_text(self.question_text))\n s +=\"\\ngold_answer_text: %s\" % (self.gold_answer_text)\n s +=\"\\noriginal text: %s\" % (self.orig_answer_text)\n s += \"\\ndoc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n s += \"\\nqa_history_text: [%s]\" % (self.qa_history_text)\n if self.start_position:\n s += \"\\nstart_position: %d\" % (self.start_position)\n if self.start_position:\n s += \"\\nend_position: %d\" % (self.end_position)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n answer_ids=None,\n start_position=None,\n end_position=None ):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.answer_ids=answer_ids\n self.start_position = start_position\n self.end_position = end_position\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.compat.v1.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n features[\"answer_ids\"] = create_int_feature(feature.answer_ids)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\n\ndef read_coqa_examples(input_file, is_training):\n \"\"\"Read a CoQA json file into a list of SquadExample.\"\"\"\n\n with tf.io.gfile.GFile(input_file, \"r\") as reader:\n input_data = json.load(reader)[\"data\"]\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n\n #for entry in input_data:\n for entry in input_data:\n paragraph_text = entry[\"story\"]\n story_id = entry[\"id\"]\n # print(story_id)\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n\n for c in paragraph_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n history_qas = []\n for i in range(len(entry[\"questions\"])):\n que = entry[\"questions\"][i]\n turn_id = int(que[\"turn_id\"])\n question_text = que[\"input_text\"]\n # print('turn id:',turn_id)\n answer = entry['answers'][i]\n start_position = None\n end_position = None\n orig_answer_text = \"\"\n qa_history_text = \"\"\n\n orig_answer_text = answer[\"span_text\"]\n gold_answer_text = answer[\"input_text\"]\n\n answer_offset = answer[\"span_start\"]\n answer_length = len(orig_answer_text)\n if answer_offset <0: #here we have bad data, we don't want to generate wrong start/end positions\n start_position = 0\n end_position = 0\n else:\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length - 1]\n\n history_qas.append('{} {}'.format(question_text, gold_answer_text))\n\n\n for j in range(i):\n qa_history_text=qa_history_text + '.' + history_qas[j]\n\n\n if not is_training:\n start_position = -1\n end_position = -1\n orig_answer_text = \"\"\n gold_answer_text = \"\"\n\n example = CoqaExample(\n story_id=story_id,\n turn_id=turn_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n gold_answer_text=gold_answer_text,\n orig_answer_text=orig_answer_text,\n start_position=start_position,\n end_position=end_position,\n qa_history_text=qa_history_text\n )\n examples.append(example)\n\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n base_id = 1000000000\n unique_id = base_id\n\n for (example_index, example) in enumerate(examples):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n max_qa_history = max_query_length - len(query_tokens)\n qa_history_tokens = tokenizer.tokenize(example.qa_history_text)\n\n qa_history_tokens = qa_history_tokens[ -1 * min(len(qa_history_tokens) , max_qa_history):]\n #fetch lastest QA up to the maximum qa history length\n\n\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n answer_tokens=[]\n if is_training:\n\n answer_tokens = tokenizer.tokenize(example.gold_answer_text)\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - len(qa_history_tokens)- 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n\n for token in qa_history_tokens:\n tokens.append(token)\n segment_ids.append(1)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n answer_ids=tokenizer.convert_tokens_to_ids(answer_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n\n\n if is_training:\n\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n if end_position <0 or start_position<0:\n end_position=0\n start_position=0\n\n if example_index < 20:\n logging.info(\"*** Example ***\")\n logging.info(\"unique_id: %s\" % (unique_id))\n logging.info(\"example_index: %s\" % (example_index))\n logging.info(\"doc_span_index: %s\" % (doc_span_index))\n logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n if is_training:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n logging.info(\"start_position: %d\" % (start_position))\n logging.info(\"end_position: %d\" % (end_position))\n logging.info(\n \"rationale: %s\" % (tokenization.printable_text(answer_text)))\n logging.info(\n \"span text: %s\" % (tokenization.printable_text(example.orig_answer_text)))\n\n logging.info(\n \"answer: %s\" % (tokenization.printable_text(example.gold_answer_text)))\n logging.info(\n \"answer_ids: %s\" % \" \".join([str(x) for x in answer_ids]))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n answer_ids=answer_ids,\n start_position=start_position,\n end_position=end_position)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n return unique_id - base_id\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The CoQA annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in CoQA, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n \n\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)): # pylint: disable=consider-using-enumerate\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\ndef generate_tf_record_from_json_file(input_file_path,\n vocab_file_path,\n output_path,\n max_seq_length=384,\n do_lower_case=True,\n max_query_length=128,\n doc_stride=128 ):\n \"\"\"Generates and saves training data into a tf record file.\"\"\"\n train_examples = read_coqa_examples(\n input_file=input_file_path,\n is_training=True )\n tokenizer = tokenization.FullTokenizer(\n vocab_file=vocab_file_path, do_lower_case=do_lower_case)\n train_writer = FeatureWriter(filename=output_path, is_training=True)\n number_of_examples = convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=max_seq_length,\n doc_stride=doc_stride,\n max_query_length=max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature\n )\n train_writer.close()\n\n meta_data = {\n \"task_type\": \"bert_coqa\",\n \"train_data_size\": number_of_examples,\n \"max_seq_length\": max_seq_length,\n \"max_query_length\": max_query_length,\n \"doc_stride\": doc_stride,\n }\n\n return meta_data\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n _Prediction = collections.namedtuple('Prediction',['id','turn_id','answer'])\n\n all_predictions = [] #collections.OrderedDict()\n\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n if feature.unique_id not in unique_id_to_result:\n logging.info('%s not found.' % (feature.unique_id))\n if feature.unique_id in unique_id_to_result:\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n\n all_predictions.append({\"id\":example.story_id,\"turn_id\":example.turn_id,\"answer\": nbest_json[0][\"text\"]})\n\n\n with tf.io.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the CoQA eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n" ]
[ [ "tensorflow.compat.v1.python_io.TFRecordWriter", "tensorflow.train.Features", "tensorflow.io.gfile.GFile" ] ]
dxfg/2DVoxelmorph
[ "115f4f0776f9c6f94a7706983ed7f0dc926b80ea" ]
[ "ext/medipy-lib/medipy/metrics.py" ]
[ "'''\nmetrics\n\nContact: [email protected]\n'''\n\n# imports\nimport numpy as np\n\n\ndef dice(vol1, vol2, labels=None, nargout=1):\n '''\n Dice [1] volume overlap metric\n\n The default is to *not* return a measure for the background layer (label = 0)\n\n [1] Dice, Lee R. \"Measures of the amount of ecologic association between species.\"\n Ecology 26.3 (1945): 297-302.\n\n Parameters\n ----------\n vol1 : nd array. The first volume (e.g. predicted volume)\n vol2 : nd array. The second volume (e.g. \"true\" volume)\n labels : optional vector of labels on which to compute Dice.\n If this is not provided, Dice is computed on all non-background (non-0) labels\n nargout : optional control of output arguments. if 1, output Dice measure(s).\n if 2, output tuple of (Dice, labels)\n\n Output\n ------\n if nargout == 1 : dice : vector of dice measures for each labels\n if nargout == 2 : (dice, labels) : where labels is a vector of the labels on which\n dice was computed\n '''\n if labels is None:\n labels = np.unique(np.concatenate((vol1, vol2)))\n labels = np.delete(labels, np.where(labels == 0)) # remove background\n\n dicem = np.zeros(len(labels))\n for idx, lab in enumerate(labels):\n top = 2 * np.sum(np.logical_and(vol1 == lab, vol2 == lab))\n bottom = np.sum(vol1 == lab) + np.sum(vol2 == lab)\n bottom = np.maximum(bottom, np.finfo(float).eps) # add epsilon.\n dicem[idx] = top / bottom\n\n if nargout == 1:\n return dicem\n else:\n return (dicem, labels)\n" ]
[ [ "numpy.sum", "numpy.finfo", "numpy.concatenate", "numpy.logical_and", "numpy.where" ] ]
leondgarse/addons
[ "6c512e1ec49ae14ca61f1bbf0fd3403204ba21e2" ]
[ "tensorflow_addons/layers/tests/normalizations_test.py" ]
[ "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n\nimport pytest\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_addons.layers.normalizations import FilterResponseNormalization\nfrom tensorflow_addons.layers.normalizations import GroupNormalization\nfrom tensorflow_addons.layers.normalizations import InstanceNormalization\n\n\n# ------------Tests to ensure proper inheritance. If these suceed you can\n# test for Instance norm by setting Groupnorm groups = -1\ndef test_inheritance():\n assert issubclass(InstanceNormalization, GroupNormalization)\n assert InstanceNormalization.build == GroupNormalization.build\n assert InstanceNormalization.call == GroupNormalization.call\n\n\ndef test_groups_after_init():\n layers = InstanceNormalization()\n assert layers.groups == -1\n\n\ndef test_weights():\n # Check if weights get initialized correctly\n layer = GroupNormalization(groups=1, scale=False, center=False)\n layer.build((None, 3, 4))\n assert len(layer.trainable_weights) == 0\n assert len(layer.weights) == 0\n\n layer = InstanceNormalization()\n layer.build((None, 3, 4))\n assert len(layer.trainable_weights) == 2\n assert len(layer.weights) == 2\n\n\ndef test_apply_normalization():\n input_shape = (1, 4)\n reshaped_inputs = tf.constant([[[2.0, 2.0], [3.0, 3.0]]])\n layer = GroupNormalization(groups=2, axis=1, scale=False, center=False)\n normalized_input = layer._apply_normalization(reshaped_inputs, input_shape)\n np.testing.assert_equal(normalized_input, np.array([[[0.0, 0.0], [0.0, 0.0]]]))\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_reshape():\n def run_reshape_test(axis, group, input_shape, expected_shape):\n group_layer = GroupNormalization(groups=group, axis=axis)\n group_layer._set_number_of_groups_for_instance_norm(input_shape)\n\n inputs = np.ones(input_shape)\n tensor_input_shape = tf.convert_to_tensor(input_shape)\n reshaped_inputs, group_shape = group_layer._reshape_into_groups(\n inputs, (10, 10, 10), tensor_input_shape\n )\n for i in range(len(expected_shape)):\n assert group_shape[i] == expected_shape[i]\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 10, 5, 2]\n run_reshape_test(2, 5, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 2, 5, 10]\n run_reshape_test(1, 2, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 10, 10]\n run_reshape_test(1, -1, input_shape, expected_shape)\n\n input_shape = (10, 10, 10)\n expected_shape = [10, 1, 10, 10]\n run_reshape_test(1, 1, input_shape, expected_shape)\n\n\[email protected](\"center\", [True, False])\[email protected](\"scale\", [True, False])\ndef test_feature_input(center, scale):\n shape = (10, 100)\n for groups in [-1, 1, 2, 5]:\n _test_random_shape_on_all_axis_except_batch(shape, groups, center, scale)\n\n\[email protected](\"center\", [True, False])\[email protected](\"scale\", [True, False])\ndef test_picture_input(center, scale):\n shape = (10, 30, 30, 3)\n for groups in [-1, 1, 3]:\n _test_random_shape_on_all_axis_except_batch(shape, groups, center, scale)\n\n\ndef _test_random_shape_on_all_axis_except_batch(shape, groups, center, scale):\n inputs = tf.random.normal(shape)\n for axis in range(1, len(shape)):\n _test_specific_layer(inputs, axis, groups, center, scale)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef _test_specific_layer(inputs, axis, groups, center, scale):\n\n input_shape = inputs.shape\n\n # Get Output from Keras model\n layer = GroupNormalization(axis=axis, groups=groups, center=center, scale=scale)\n model = tf.keras.models.Sequential()\n model.add(layer)\n outputs = model.predict(inputs, steps=1)\n assert not np.isnan(outputs).any()\n\n is_instance_norm = False\n # Create shapes\n if groups == -1:\n groups = input_shape[axis]\n if (input_shape[axis] // groups) == 1:\n is_instance_norm = True\n np_inputs = inputs\n reshaped_dims = list(np_inputs.shape)\n if not is_instance_norm:\n reshaped_dims[axis] = reshaped_dims[axis] // groups\n reshaped_dims.insert(axis, groups)\n reshaped_inputs = np.reshape(np_inputs, tuple(reshaped_dims))\n else:\n reshaped_inputs = np_inputs\n\n group_reduction_axes = list(range(1, len(reshaped_dims)))\n if not is_instance_norm:\n axis = -2 if axis == -1 else axis - 1\n else:\n axis = -1 if axis == -1 else axis - 1\n group_reduction_axes.pop(axis)\n\n # Calculate mean and variance\n mean = np.mean(reshaped_inputs, axis=tuple(group_reduction_axes), keepdims=True)\n variance = np.var(reshaped_inputs, axis=tuple(group_reduction_axes), keepdims=True)\n\n # Get gamma and beta initalized by layer\n gamma, beta = layer._get_reshaped_weights(input_shape)\n if gamma is None:\n gamma = 1.0\n if beta is None:\n beta = 0.0\n\n # Get ouput from Numpy\n zeroed = reshaped_inputs - mean\n rsqrt = 1 / np.sqrt(variance + 1e-5)\n output_test = gamma * zeroed * rsqrt + beta\n\n # compare outputs\n output_test = tf.reshape(output_test, input_shape)\n np.testing.assert_almost_equal(tf.reduce_mean(output_test - outputs), 0, decimal=7)\n\n\ndef _create_and_fit_sequential_model(layer, shape):\n # Helperfunction for quick evaluation\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n model.add(layer)\n model.add(tf.keras.layers.Dense(32))\n model.add(tf.keras.layers.Dense(1))\n\n model.compile(\n optimizer=tf.keras.optimizers.RMSprop(0.01), loss=\"categorical_crossentropy\"\n )\n layer_shape = (10,) + shape\n input_batch = np.random.rand(*layer_shape)\n output_batch = np.random.rand(*(10, 1))\n model.fit(x=input_batch, y=output_batch, epochs=1, batch_size=1)\n return model\n\n\ndef test_groupnorm_flat():\n # Check basic usage of groupnorm_flat\n # Testing for 1 == LayerNorm, 16 == GroupNorm, -1 == InstanceNorm\n\n groups = [-1, 16, 1]\n shape = (64,)\n for i in groups:\n model = _create_and_fit_sequential_model(GroupNormalization(groups=i), shape)\n assert hasattr(model.layers[0], \"gamma\")\n assert hasattr(model.layers[0], \"beta\")\n\n\ndef test_instancenorm_flat():\n # Check basic usage of instancenorm\n model = _create_and_fit_sequential_model(InstanceNormalization(), (64,))\n assert hasattr(model.layers[0], \"gamma\")\n assert hasattr(model.layers[0], \"beta\")\n\n\ndef test_initializer():\n # Check if the initializer for gamma and beta is working correctly\n layer = GroupNormalization(\n groups=32,\n beta_initializer=\"random_normal\",\n beta_constraint=\"NonNeg\",\n gamma_initializer=\"random_normal\",\n gamma_constraint=\"NonNeg\",\n )\n\n model = _create_and_fit_sequential_model(layer, (64,))\n\n weights = np.array(model.layers[0].get_weights())\n negativ = weights[weights < 0.0]\n assert len(negativ) == 0\n\n\ndef test_axis_error():\n with pytest.raises(ValueError):\n GroupNormalization(axis=0)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_conv():\n # Check if Axis is working for CONV nets\n # Testing for 1 == LayerNorm, 5 == GroupNorm, -1 == InstanceNorm\n np.random.seed(0x2020)\n groups = [-1, 5, 1]\n for i in groups:\n model = tf.keras.models.Sequential()\n model.add(GroupNormalization(axis=1, groups=i, input_shape=(20, 20, 3)))\n model.add(tf.keras.layers.Conv2D(5, (1, 1), padding=\"same\"))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(1, activation=\"softmax\"))\n model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01), loss=\"mse\")\n x = np.random.randint(1000, size=(10, 20, 20, 3))\n y = np.random.randint(1000, size=(10, 1))\n model.fit(x=x, y=y, epochs=1)\n assert hasattr(model.layers[0], \"gamma\")\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_regularizations():\n layer = GroupNormalization(\n gamma_regularizer=\"l1\", beta_regularizer=\"l1\", groups=4, axis=2\n )\n layer.build((None, 4, 4))\n assert len(layer.losses) == 2\n max_norm = tf.keras.constraints.max_norm\n layer = GroupNormalization(\n groups=2, gamma_constraint=max_norm, beta_constraint=max_norm\n )\n layer.build((None, 3, 4))\n assert layer.gamma.constraint == max_norm\n assert layer.beta.constraint == max_norm\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_correctness_1d():\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(input_shape=(10,), groups=2)\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))\n model.fit(x, x, epochs=5, verbose=0)\n out = model.predict(x)\n out -= norm.beta.numpy()\n out /= norm.gamma.numpy()\n\n np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1)\n np.testing.assert_allclose(out.std(), 1.0, atol=1e-1)\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_2d_different_groups():\n np.random.seed(0x2020)\n groups = [2, 1, 10]\n for i in groups:\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(axis=1, groups=i, input_shape=(10, 3))\n model.add(norm)\n # centered and variance are 5.0 and 10.0, respectively\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10, 3))\n model.fit(x, x, epochs=5, verbose=0)\n out = model.predict(x)\n out -= np.reshape(norm.beta.numpy(), (1, 10, 1))\n out /= np.reshape(norm.gamma.numpy(), (1, 10, 1))\n\n np.testing.assert_allclose(\n out.mean(axis=(0, 1), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n np.testing.assert_allclose(\n out.std(axis=(0, 1), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_convnet():\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(axis=1, input_shape=(3, 4, 4), groups=3)\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n\n # centered = 5.0, variance = 10.0\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(x, x, epochs=4, verbose=0)\n out = model.predict(x)\n out -= np.reshape(norm.beta.numpy(), (1, 3, 1, 1))\n out /= np.reshape(norm.gamma.numpy(), (1, 3, 1, 1))\n\n np.testing.assert_allclose(\n np.mean(out, axis=(0, 2, 3), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n np.testing.assert_allclose(\n np.std(out, axis=(0, 2, 3), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_groupnorm_convnet_no_center_no_scale():\n np.random.seed(0x2020)\n model = tf.keras.models.Sequential()\n norm = GroupNormalization(\n axis=-1, groups=2, center=False, scale=False, input_shape=(3, 4, 4)\n )\n model.add(norm)\n model.compile(loss=\"mse\", optimizer=\"sgd\")\n # centered and variance are 5.0 and 10.0, respectively\n x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4))\n model.fit(x, x, epochs=4, verbose=0)\n out = model.predict(x)\n\n np.testing.assert_allclose(\n np.mean(out, axis=(0, 2, 3), dtype=np.float32), (0.0, 0.0, 0.0), atol=1e-1\n )\n np.testing.assert_allclose(\n np.std(out, axis=(0, 2, 3), dtype=np.float32), (1.0, 1.0, 1.0), atol=1e-1\n )\n\n\ndef calculate_frn(\n x, beta=0.2, gamma=1, eps=1e-6, learned_epsilon=False, dtype=np.float32\n):\n if learned_epsilon:\n eps = eps + 1e-4\n eps = tf.cast(eps, dtype=dtype)\n nu2 = tf.reduce_mean(tf.square(x), axis=[1, 2], keepdims=True)\n x = x * tf.math.rsqrt(nu2 + tf.abs(eps))\n return gamma * x + beta\n\n\ndef set_random_seed():\n seed = 0x2020\n np.random.seed(seed)\n tf.random.set_seed(seed)\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_beta(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(inputs, beta=1, gamma=1, dtype=dtype)\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_gamma(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=\"zeros\", gamma_initializer=\"ones\", dtype=dtype\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(inputs, beta=0, gamma=1, dtype=dtype)\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_with_epsilon(dtype):\n set_random_seed()\n inputs = np.random.rand(28, 28, 1).astype(dtype)\n inputs = np.expand_dims(inputs, axis=0)\n frn = FilterResponseNormalization(\n beta_initializer=tf.keras.initializers.Constant(0.5),\n gamma_initializer=\"ones\",\n learned_epsilon=True,\n dtype=dtype,\n )\n frn.build((None, 28, 28, 1))\n observed = frn(inputs)\n expected = calculate_frn(\n inputs, beta=0.5, gamma=1, learned_epsilon=True, dtype=dtype\n )\n np.testing.assert_allclose(expected[0], observed[0])\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_keras_model(dtype):\n set_random_seed()\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n random_inputs = np.random.rand(10, 32, 32, 3).astype(dtype)\n random_labels = np.random.randint(2, size=(10,)).astype(dtype)\n input_layer = tf.keras.layers.Input(shape=(32, 32, 3))\n x = frn(input_layer)\n x = tf.keras.layers.Flatten()(x)\n out = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n model = tf.keras.models.Model(input_layer, out)\n model.compile(loss=\"binary_crossentropy\", optimizer=\"sgd\")\n model.fit(random_inputs, random_labels, epochs=2)\n\n\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_serialization(dtype):\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", dtype=dtype\n )\n serialized_frn = tf.keras.layers.serialize(frn)\n new_layer = tf.keras.layers.deserialize(serialized_frn)\n assert frn.get_config() == new_layer.get_config()\n\n\[email protected](\"maybe_run_functions_eagerly\")\[email protected](\"dtype\", [np.float16, np.float32, np.float64])\ndef test_eps_gards(dtype):\n set_random_seed()\n random_inputs = np.random.rand(10, 32, 32, 3).astype(np.float32)\n random_labels = np.random.randint(2, size=(10,)).astype(np.float32)\n input_layer = tf.keras.layers.Input(shape=(32, 32, 3))\n frn = FilterResponseNormalization(\n beta_initializer=\"ones\", gamma_initializer=\"ones\", learned_epsilon=True\n )\n initial_eps_value = frn.eps_learned.numpy()[0]\n x = frn(input_layer)\n x = tf.keras.layers.Flatten()(x)\n out = tf.keras.layers.Dense(1, activation=\"sigmoid\")(x)\n model = tf.keras.models.Model(input_layer, out)\n model.compile(loss=\"binary_crossentropy\", optimizer=\"sgd\")\n model.fit(random_inputs, random_labels, epochs=1)\n final_eps_value = frn.eps_learned.numpy()[0]\n assert initial_eps_value != final_eps_value\n\n\[email protected](\"maybe_run_functions_eagerly\")\ndef test_filter_response_normalization_save(tmpdir):\n input_layer = tf.keras.layers.Input(shape=(32, 32, 3))\n frn = FilterResponseNormalization()(input_layer)\n model = tf.keras.Model(input_layer, frn)\n filepath = str(tmpdir / \"test.h5\")\n model.save(filepath, save_format=\"h5\")\n filepath = str(tmpdir / \"test\")\n model.save(filepath, save_format=\"tf\")\n" ]
[ [ "numpy.ones", "tensorflow.keras.layers.Flatten", "tensorflow.reshape", "numpy.random.seed", "tensorflow.keras.models.Model", "tensorflow.abs", "tensorflow.convert_to_tensor", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.initializers.Constant", "tensorflow.keras.layers.serialize", "numpy.expand_dims", "numpy.random.rand", "tensorflow.keras.layers.Dense", "numpy.isnan", "tensorflow.constant", "tensorflow.random.set_seed", "numpy.mean", "numpy.sqrt", "tensorflow.keras.layers.deserialize", "tensorflow.cast", "numpy.std", "numpy.array", "tensorflow.keras.models.Sequential", "tensorflow.keras.optimizers.RMSprop", "tensorflow.reduce_mean", "tensorflow.keras.Model", "tensorflow.square", "tensorflow.random.normal", "numpy.testing.assert_allclose", "numpy.random.normal", "numpy.random.randint", "tensorflow.keras.layers.Input" ] ]
asatk/improved_CcGAN
[ "29a58e6e2a03e56c2ad80ae1a2ebbd0710e026f3" ]
[ "UTKFace/UTKFace_64x64/cGAN-concat/models/ResNet_embed.py" ]
[ "'''\nResNet-based model to map an image from pixel space to a features space.\nNeed to be pretrained on the dataset.\n\nif isometric_map = True, there is an extra step (elf.classifier_1 = nn.Linear(512, 32*32*3)) to increase the dimension of the feature map from 512 to 32*32*3. This selection is for desity-ratio estimation in feature space.\n\ncodes are based on\n@article{\nzhang2018mixup,\ntitle={mixup: Beyond Empirical Risk Minimization},\nauthor={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},\njournal={International Conference on Learning Representations},\nyear={2018},\nurl={https://openreview.net/forum?id=r1Ddp1-Rb},\n}\n'''\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nNC = 3\nIMG_SIZE = 64\nDIM_EMBED = 128\n\n\n#------------------------------------------------------------------------------\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet_embed(nn.Module):\n def __init__(self, block, num_blocks, nc=NC, dim_embed=DIM_EMBED, ngpu = 1):\n super(ResNet_embed, self).__init__()\n self.in_planes = 64\n self.ngpu = ngpu\n\n self.main = nn.Sequential(\n nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h\n # nn.Conv2d(nc, 64, kernel_size=4, stride=2, padding=1, bias=False), # h=h/2\n nn.BatchNorm2d(64),\n nn.ReLU(),\n # self._make_layer(block, 64, num_blocks[0], stride=1), # h=h\n self._make_layer(block, 64, num_blocks[0], stride=2), # h=h/2 32\n self._make_layer(block, 128, num_blocks[1], stride=2), # h=h/2 16\n self._make_layer(block, 256, num_blocks[2], stride=2), # h=h/2 8\n self._make_layer(block, 512, num_blocks[3], stride=2), # h=h/2 4\n # nn.AvgPool2d(kernel_size=4)\n nn.AdaptiveAvgPool2d((1, 1))\n )\n\n self.x2h_res = nn.Sequential(\n nn.Linear(512, 512),\n nn.BatchNorm1d(512),\n nn.ReLU(),\n\n nn.Linear(512, dim_embed),\n nn.BatchNorm1d(dim_embed),\n nn.ReLU(),\n )\n\n self.h2y = nn.Sequential(\n nn.Linear(dim_embed, 1),\n nn.ReLU()\n )\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n\n if x.is_cuda and self.ngpu > 1:\n features = nn.parallel.data_parallel(self.main, x, range(self.ngpu))\n features = features.view(features.size(0), -1)\n features = nn.parallel.data_parallel(self.x2h_res, features, range(self.ngpu))\n out = nn.parallel.data_parallel(self.h2y, features, range(self.ngpu))\n else:\n features = self.main(x)\n features = features.view(features.size(0), -1)\n features = self.x2h_res(features)\n out = self.h2y(features)\n\n return out, features\n\n\ndef ResNet18_embed(dim_embed=DIM_EMBED, ngpu = 1):\n return ResNet_embed(BasicBlock, [2,2,2,2], dim_embed=dim_embed, ngpu = ngpu)\n\ndef ResNet34_embed(dim_embed=DIM_EMBED, ngpu = 1):\n return ResNet_embed(BasicBlock, [3,4,6,3], dim_embed=dim_embed, ngpu = ngpu)\n\ndef ResNet50_embed(dim_embed=DIM_EMBED, ngpu = 1):\n return ResNet_embed(Bottleneck, [3,4,6,3], dim_embed=dim_embed, ngpu = ngpu)\n\n#------------------------------------------------------------------------------\n# map labels to the embedding space\nclass model_y2h(nn.Module):\n def __init__(self, dim_embed=DIM_EMBED):\n super(model_y2h, self).__init__()\n self.main = nn.Sequential(\n nn.Linear(1, dim_embed),\n # nn.BatchNorm1d(dim_embed),\n nn.GroupNorm(8, dim_embed),\n nn.ReLU(),\n\n nn.Linear(dim_embed, dim_embed),\n # nn.BatchNorm1d(dim_embed),\n nn.GroupNorm(8, dim_embed),\n nn.ReLU(),\n\n nn.Linear(dim_embed, dim_embed),\n # nn.BatchNorm1d(dim_embed),\n nn.GroupNorm(8, dim_embed),\n nn.ReLU(),\n\n nn.Linear(dim_embed, dim_embed),\n # nn.BatchNorm1d(dim_embed),\n nn.GroupNorm(8, dim_embed),\n nn.ReLU(),\n\n nn.Linear(dim_embed, dim_embed),\n nn.ReLU()\n )\n\n def forward(self, y):\n y = y.view(-1, 1) +1e-8\n # y = torch.exp(y.view(-1, 1))\n return self.main(y)\n\n\n\nif __name__ == \"__main__\":\n net = ResNet34_embed(ngpu = 1).cuda()\n x = torch.randn(16,NC,IMG_SIZE,IMG_SIZE).cuda()\n out, features = net(x)\n print(out.size())\n print(features.size())\n\n net_y2h = model_y2h()\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.GroupNorm", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.randn", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.relu", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.nn.ReLU" ] ]
ryanbrand/mil
[ "6524047febe35fa59c356794f1649946332c4e7f" ]
[ "tf_utils.py" ]
[ "\"\"\" Utility functions for tensorflow. \"\"\"\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import array_ops\nimport numpy as np\n\ndef safe_get(name, *args, **kwargs):\n \"\"\" Same as tf.get_variable, except flips on reuse_variables automatically \"\"\"\n try:\n return tf.get_variable(name, *args, **kwargs)\n except ValueError:\n tf.get_variable_scope().reuse_variables()\n return tf.get_variable(name, *args, **kwargs)\n\ndef init_weights(shape, name=None):\n shape = tuple(shape)\n weights = np.random.normal(scale=0.01, size=shape).astype('f')\n return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32)\n \ndef init_bias(shape, name=None):\n return safe_get(name, initializer=tf.zeros(shape, dtype=tf.float32))\n\ndef init_fc_weights_xavier(shape, name=None):\n fc_initializer = tf.contrib.layers.xavier_initializer(dtype=tf.float32)\n return safe_get(name, list(shape), initializer=fc_initializer, dtype=tf.float32)\n\ndef init_conv_weights_xavier(shape, name=None):\n conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=tf.float32)\n return safe_get(name, list(shape), initializer=conv_initializer, dtype=tf.float32)\n \ndef init_fc_weights_snn(shape, name=None):\n weights = np.random.normal(scale=np.sqrt(1.0/shape[0]), size=shape).astype('f')\n return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32)\n\ndef init_conv_weights_snn(shape, name=None):\n weights = np.random.normal(scale=np.sqrt(1.0/(shape[0]*shape[1]*shape[2])), size=shape).astype('f')\n return safe_get(name, list(shape), initializer=tf.constant_initializer(weights), dtype=tf.float32)\n\ndef batched_matrix_vector_multiply(vector, matrix):\n \"\"\" computes x^T A in mini-batches. \"\"\"\n vector_batch_as_matricies = tf.expand_dims(vector, [1])\n mult_result = tf.matmul(vector_batch_as_matricies, matrix)\n squeezed_result = tf.squeeze(mult_result, [1])\n return squeezed_result\n\ndef euclidean_loss_layer(a, b, multiplier=100.0, use_l1=False, eps=0.01):\n \"\"\" Math: out = (action - mlp_out)'*precision*(action-mlp_out)\n = (u-uhat)'*A*(u-uhat)\"\"\"\n multiplier = tf.constant(multiplier, dtype='float') #for bc #10000\n uP =a*multiplier-b*multiplier\n if use_l1:\n return tf.reduce_mean(eps*tf.square(uP) + tf.abs(uP))\n return tf.reduce_mean(tf.square(uP))\n\ndef conv2d(img, w, b, strides=[1, 1, 1, 1], is_dilated=False):\n if is_dilated:\n layer = tf.nn.atrous_conv2d(img, w, rate=2, padding='SAME') + b\n else:\n layer = tf.nn.conv2d(img, w, strides=strides, padding='SAME') + b\n return layer\n \ndef dropout(layer, keep_prob=0.9, is_training=True, name=None, selu=False):\n if selu:\n return dropout_selu(layer, 1.0 - keep_prob, name=name, training=is_training)\n if is_training:\n return tf.nn.dropout(layer, keep_prob=keep_prob, name=name)\n else:\n return tf.add(layer, 0, name=name)\n\ndef norm(layer, norm_type='batch_norm', decay=0.9, id=0, is_training=True, activation_fn=tf.nn.relu, prefix='conv_'):\n if norm_type != 'batch_norm' and norm_type != 'layer_norm':\n return tf.nn.relu(layer)\n with tf.variable_scope('norm_layer_%s%d' % (prefix, id)) as vs:\n if norm_type == 'batch_norm':\n if is_training:\n try:\n layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True,\n scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs) # updates_collections=None\n except ValueError:\n layer = tf.contrib.layers.batch_norm(layer, is_training=True, center=True,\n scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None\n else:\n layer = tf.contrib.layers.batch_norm(layer, is_training=False, center=True,\n scale=False, decay=decay, activation_fn=activation_fn, updates_collections=None, scope=vs, reuse=True) # updates_collections=None\n elif norm_type == 'layer_norm': # layer_norm\n # Take activation_fn out to apply lrelu\n try:\n layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True,\n scale=False, scope=vs)) # updates_collections=None\n \n except ValueError:\n layer = activation_fn(tf.contrib.layers.layer_norm(layer, center=True,\n scale=False, scope=vs, reuse=True))\n elif norm_type == 'selu':\n layer = selu(layer)\n else:\n raise NotImplementedError('Other types of norm not implemented.')\n return layer\n \nclass VBN(object):\n \"\"\"\n Virtual Batch Normalization\n \"\"\"\n\n def __init__(self, x, name, epsilon=1e-5):\n \"\"\"\n x is the reference batch\n \"\"\"\n assert isinstance(epsilon, float)\n\n shape = x.get_shape().as_list()\n with tf.variable_scope(name) as scope:\n self.epsilon = epsilon\n self.name = name\n self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True)\n self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True)\n self.batch_size = int(x.get_shape()[0])\n assert x is not None\n assert self.mean is not None\n assert self.mean_sq is not None\n out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, \"reference\"))\n self.reference_output = out\n\n def __call__(self, x, update=False):\n with tf.variable_scope(self.name) as scope:\n if not update:\n new_coeff = 1. / (self.batch_size + 1.)\n old_coeff = 1. - new_coeff\n new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True)\n new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True)\n mean = new_coeff * new_mean + old_coeff * self.mean\n mean_sq = new_coeff * new_mean_sq + old_coeff * self.mean_sq\n out = tf.nn.relu(self._normalize(x, mean, mean_sq, \"live\"))\n # Update the mean and mean_sq when passing the reference data\n else:\n self.mean = tf.reduce_mean(x, [0, 1, 2], keep_dims=True)\n self.mean_sq = tf.reduce_mean(tf.square(x), [0, 1, 2], keep_dims=True)\n out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, \"reference\"))\n return out\n\n def _normalize(self, x, mean, mean_sq, message):\n # make sure this is called with a variable scope\n shape = x.get_shape().as_list()\n assert len(shape) == 4\n self.gamma = safe_get(\"gamma\", [shape[-1]],\n initializer=tf.random_normal_initializer(1., 0.02))\n gamma = tf.reshape(self.gamma, [1, 1, 1, -1])\n self.beta = safe_get(\"beta\", [shape[-1]],\n initializer=tf.constant_initializer(0.))\n beta = tf.reshape(self.beta, [1, 1, 1, -1])\n assert self.epsilon is not None\n assert mean_sq is not None\n assert mean is not None\n std = tf.sqrt(self.epsilon + mean_sq - tf.square(mean))\n out = x - mean\n out = out / std\n out = out * gamma\n out = out + beta\n return out\n\ndef max_pool(img, k):\n return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n\n\n# Consider stride size when using xavier for fp network\ndef get_xavier_weights(filter_shape, poolsize=(2, 2), name=None):\n fan_in = np.prod(filter_shape[1:])\n fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) //\n np.prod(poolsize))\n\n low = -4*np.sqrt(6.0/(fan_in + fan_out)) # use 4 for sigmoid, 1 for tanh activation\n high = 4*np.sqrt(6.0/(fan_in + fan_out))\n weights = np.random.uniform(low=low, high=high, size=filter_shape)\n return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights))\n\ndef get_he_weights(filter_shape, name=None):\n fan_in = np.prod(filter_shape[1:])\n\n stddev = np.sqrt(2.6/fan_in)\n weights = stddev * np.random.randn(filter_shape[0], filter_shape[1], filter_shape[2], filter_shape[3])\n return safe_get(name, filter_shape, initializer=tf.constant_initializer(weights))\n" ]
[ [ "tensorflow.reshape", "tensorflow.contrib.layers.batch_norm", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.squeeze", "tensorflow.contrib.layers.xavier_initializer_conv2d", "tensorflow.abs", "tensorflow.get_variable_scope", "tensorflow.contrib.layers.layer_norm", "tensorflow.nn.dropout", "tensorflow.nn.max_pool", "tensorflow.nn.atrous_conv2d", "tensorflow.random_normal_initializer", "tensorflow.constant", "numpy.random.uniform", "tensorflow.constant_initializer", "tensorflow.expand_dims", "numpy.random.normal", "numpy.prod", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.zeros", "numpy.random.randn", "tensorflow.add", "tensorflow.reduce_mean", "tensorflow.nn.conv2d", "tensorflow.square", "numpy.sqrt", "tensorflow.nn.relu", "tensorflow.get_variable" ] ]
vrnanshuman/fpl-simulator
[ "1d78f8a74f69f740870fcbb729cb19ba4620c6fa" ]
[ "scout.py" ]
[ "from simulator import *\nimport numpy as np\n\n\n'''\n\tNotes : Use the env variable and its helper functions to \n\t1. get the points for a set of player ids\n\t2. get the cost for a set of player ids\n'''\n\nprofiles = [{'cols': ['stats.minutes'],\n 'order': [False],\n 'prob_dist': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]},\n {'cols': ['stats.own_goals', 'stats.yellow_cards', 'stats.red_cards'],\n 'order': [True, True, True],\n 'prob_dist': [0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0]},\n {'cols': ['stats.ict_index'],\n 'order': [False],\n 'prob_dist': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]},\n {'cols': ['selected_by_percent'],\n 'order': [False],\n 'prob_dist': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]},\n {'cols': ['saves_goal_conceded_ratio',\n 'stats.saves',\n 'stats.clean_sheets',\n 'stats.penalties_saved',\n 'stats.penalties_missed'],\n 'order': [False, False, False, False, True],\n 'prob_dist': [1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0]}]\n\n\nclass Scout():\n\n\tdef __init__(self, env:FPLSimulator, week_idx:int, min_balance:float, current_balance:float, k:int=15, multi_transfer=False):\n\t\t# multi transfer allows multiple transfers to happen in a game week\n\t\tself.env = env\n\t\tself.week_idx = week_idx\n\t\tself.min_balance = min_balance\n\t\tself.multi_transfer = multi_transfer\n\t\tself.current_balance = current_balance\n\t\tself.k = k\n\n\tdef find_transfer_out_candidates(self):\n\t\t'''\n\t\t\tThis functions finds the candidates in our FPL manager team to transfer out in that particular game week\n\t\t\tParameters:\n\t\t\t-----------\n\t\t\tk : the top K players who should be transferred out\n\t\t\t-----------\n\t\t\treturns ndarray of players_ids . shape :(k,)\n\n\t\t'''\n\t\tk = self.k\n\t\tassert(k <= self.env.running_player_ids.shape[0])\n\t\tROI = self.env.running_player_points/self.env.running_player_cost\n\t\tindices = np.argsort(ROI, 0)\n\t\tsorted_ids_based_on_ROI = self.env.running_player_ids[:,self.week_idx][indices[:,self.week_idx]][:k]\n\t\treturn sorted_ids_based_on_ROI\n \n\tdef find_transfer_in_candidates(self, player_profile_idx:int, transfer_out_candidates:np.ndarray):\n\t\t'''\n\t\t\tThis functions finds the candidates in our FPL manager team to transfer out in that particular game week\n\t\t\tParameters:\n\t\t\t-----------\n\t\t\tplayer_profile_idx : int : index of profile from profiles variable\n\t\t\t-----------\n\t\t\treturns ndarray of players_ids . shape :(k,)\n\n\t\t'''\n\t\t# print(self.env.all_week_data[self.week_idx].columns)\n\t\tk = self.k\n\t\tassert(k <= self.env.running_player_ids.shape[0])\n\t\tprofile = profiles[player_profile_idx]\n\t\t# in_ids = np.setdiff1d(self.env.all_week_data[self.week_idx].sort_values(by=profile['cols'], ascending=profile['order']).index,transfer_out_candidates)[:k]\n\t\trankings_table = self.env.all_week_data[self.week_idx].sort_values(by=profile['cols'], ascending=profile['order']).index\n\t\tin_ids = rankings_table[~np.in1d(rankings_table,transfer_out_candidates)][:k]\n\t\treturn in_ids\n\n\n\tdef get_transfer_in_out_players(self, transfer_in_candidates:np.ndarray, transfer_out_candidates:np.ndarray):\n\t\t'''\n\t\t This function takes two sets of player candidates and uses their (cost, type, roi) for that game week to find the perfect pair of players to be transferred in and out. \n\t\t The pair of players (p_in, p_out). roi = (points / cost)\n\t\t Parameters:\n\t\t -----------\n\t\t\tbalance : int . The current remaining balance for the manager\n\t\t\ttransfer_in_candidates:np.ndarray . shape : (k,) : the ids of players returned from the find_transfer_in_candidates function\n\t\t\ttransfer_out_candidates:np.ndarray . shape : (k,): the ids of players returned from the find_transfer_out_candidates function\n\t\t -----------\n\n\t\treturns : ndarray : shape (N_t,15,10) . transfer in out matrix where N_t is the number of transfers\n\t\t\t\t balance : the readjusted balance of the FPL team\n\t\t'''\n\t\t#print(transfer_in_candidates.shape , transfer_out_candidates.shape)\n\t\tbalance = self.current_balance\n\n\t\tassert(transfer_in_candidates.shape == transfer_out_candidates.shape)\n\t\ttransfer_in_candidates = np.broadcast_to(transfer_in_candidates[:,np.newaxis] , (transfer_in_candidates.shape[0], self.env.current_week)) # (K,10)\n\t\ttransfer_out_candidates = np.broadcast_to(transfer_out_candidates[:,np.newaxis] , (transfer_out_candidates.shape[0], self.env.current_week)) # (K,10)\n\n\t\tall_player_types = self.env.all_player_other_data[self.env.all_player_other_data_cols.index(\"element_type\")] #(620,10)\n\n\t\ttransfer_in_candidates_cost = self.env.get_player_info_matrix(self.env.all_player_cost, transfer_in_candidates)[:,self.week_idx] # (K,)\n\t\ttransfer_in_candidates_types = self.env.get_player_info_matrix(all_player_types, transfer_in_candidates)[:,self.week_idx] # (K,)\n\t\ttransfer_out_candidates_cost = self.env.get_player_info_matrix(self.env.all_player_cost, transfer_out_candidates)[:,self.week_idx] # (K,)\n\t\ttransfer_out_candidates_types = self.env.get_player_info_matrix(all_player_types, transfer_out_candidates)[:,self.week_idx] # (K,)\n\n\t\tin_out_type_match_mask = transfer_in_candidates_types[:,np.newaxis] == transfer_out_candidates_types[np.newaxis, :] #(K,K)\n\n\t\ttransfer_out_candidates_balance_after_out = transfer_out_candidates_cost + balance - self.min_balance #(K,)\n\t\tin_out_cost_diff = transfer_in_candidates_cost[np.newaxis,:] - transfer_out_candidates_balance_after_out[:,np.newaxis] # (K,K)\n\t\tin_out_cost_match_mask = in_out_cost_diff < 0 # (K,K)\n\n\t\t#print(in_out_type_match_mask.shape, in_out_cost_match_mask.shape)\n\t\tp_in_idxs, p_out_idxs = np.where(in_out_type_match_mask & in_out_cost_match_mask > 0)\n\t\tp_in_ids = transfer_in_candidates[:,self.week_idx][p_in_idxs] # (k_,)\n\t\tp_out_ids = transfer_out_candidates[:,self.week_idx][p_out_idxs] # (k_,)\n\n\t\tassert(p_in_ids.shape == p_out_ids.shape)\n\t\t#print(p_in_ids, p_out_ids)\n\t\t#print(in_out_cost_diff)\n\t\t\n\t\tif not self.multi_transfer:\n\t\t\ttransfer_in_out_mat = np.zeros((1,)+self.env.running_player_ids.shape)\n\t\t\ttransfer_in_out_mat[0,self.env.running_player_ids[:,self.week_idx] == p_out_ids[0], self.week_idx] = p_in_ids[0]\n\n\t\t\tremaining_balance = np.abs(in_out_cost_diff[p_in_idxs[0], p_out_idxs[0]]) + self.min_balance\n\t\t\t#assert(remaining_balance >= self.min_balance)\n\n\t\treturn transfer_in_out_mat, remaining_balance" ]
[ [ "numpy.zeros", "numpy.abs", "numpy.argsort", "numpy.in1d", "numpy.broadcast_to", "numpy.where" ] ]
ObinnaObeleagu/evalml
[ "3b5bf62b08a5a5bc6485ba5387a08c32e1857473" ]
[ "evalml/tests/component_tests/test_arima_regressor.py" ]
[ "from unittest.mock import patch\n\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pytest import importorskip\n\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines.components import ARIMARegressor\nfrom evalml.problem_types import ProblemTypes\n\nsktime_arima = importorskip(\n \"sktime.forecasting.arima\", reason=\"Skipping test because sktime not installed\"\n)\nforecasting = importorskip(\n \"sktime.forecasting.base\", reason=\"Skipping test because sktime not installed\"\n)\n\n\ndef test_model_family():\n assert ARIMARegressor.model_family == ModelFamily.ARIMA\n\n\ndef test_problem_types():\n assert set(ARIMARegressor.supported_problem_types) == {\n ProblemTypes.TIME_SERIES_REGRESSION\n }\n\n\ndef test_model_instance(ts_data):\n X, y = ts_data\n clf = ARIMARegressor()\n fitted = clf.fit(X, y)\n assert isinstance(fitted, ARIMARegressor)\n\n\ndef test_get_dates_fit_and_predict(ts_data):\n X, y = ts_data\n clf = ARIMARegressor()\n date_col, X_ = clf._get_dates(X, y)\n assert isinstance(date_col, pd.DatetimeIndex)\n assert X_.equals(X)\n\n\ndef test_match_indices(ts_data):\n X, y = ts_data\n date_index = pd.date_range(\"2020-10-02\", \"2020-11-01\")\n clf = ARIMARegressor()\n X_, y_ = clf._match_indices(X, y, date_index)\n assert isinstance(X_.index, pd.DatetimeIndex)\n assert isinstance(y_.index, pd.DatetimeIndex)\n assert X_.index.equals(y_.index)\n assert X_.index.equals(date_index)\n\n\[email protected](\"predict\", [True, False])\[email protected](\"dates_shape\", [0, 1, 2])\ndef test_format_dates(predict, dates_shape, ts_data):\n X, y = ts_data\n date_index = pd.date_range(\"2020-10-02\", \"2020-11-01\")\n if dates_shape == 1:\n date_index = pd.DataFrame(date_index)\n elif dates_shape == 2:\n date_index = pd.DataFrame(data={\"a\": date_index, \"b\": date_index})\n\n clf = ARIMARegressor()\n\n if predict:\n if dates_shape != 2:\n X_, y_, fh_ = clf._format_dates(date_index, X, y, True)\n assert X_.index.equals(y_.index)\n assert isinstance(fh_, forecasting.ForecastingHorizon)\n elif dates_shape == 2:\n with pytest.raises(ValueError, match=\"Found 2 columns\"):\n clf._format_dates(date_index, X, y, True)\n else:\n if dates_shape != 2:\n X_, y_, _ = clf._format_dates(date_index, X, y, False)\n assert X_.index.equals(y_.index)\n assert _ is None\n elif dates_shape == 2:\n with pytest.raises(ValueError, match=\"Found 2 columns\"):\n clf._format_dates(date_index, X, y, False)\n\n\ndef test_feature_importance(ts_data):\n X, y = ts_data\n clf = ARIMARegressor()\n with patch.object(clf, \"_component_obj\"):\n clf.fit(X, y)\n assert clf.feature_importance == np.zeros(1)\n\n\ndef test_fit_predict_ts_with_datetime_in_X_column(\n ts_data_seasonal_train, ts_data_seasonal_test\n):\n X, y = ts_data_seasonal_train\n X_test, _ = ts_data_seasonal_test\n assert isinstance(X.index, pd.DatetimeIndex)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n m_clf = ARIMARegressor(d=None)\n m_clf.fit(X=X, y=y)\n y_pred = m_clf.predict(X=X_test)\n\n X[\"Sample\"] = pd.date_range(start=\"1/1/2016\", periods=25)\n\n dt_clf = ARIMARegressor(d=None)\n dt_clf.fit(X=X, y=y)\n y_pred_dt = dt_clf.predict(X=X_test)\n\n assert isinstance(y_pred_dt, pd.Series)\n pd.testing.assert_series_equal(y_pred, y_pred_dt)\n\n\ndef test_fit_predict_ts_with_only_datetime_column_in_X(\n ts_data_seasonal_train, ts_data_seasonal_test\n):\n X, y = ts_data_seasonal_train\n X_test, y_test = ts_data_seasonal_test\n assert isinstance(X.index, pd.DatetimeIndex)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n fh_ = forecasting.ForecastingHorizon(y_test.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n clf = a_clf.fit(y=y)\n y_pred_sk = clf.predict(fh=fh_)\n\n X = X.drop([\"features\"], axis=1)\n\n m_clf = ARIMARegressor(d=None)\n m_clf.fit(X=X, y=y)\n y_pred = m_clf.predict(X=X_test)\n\n assert (y_pred_sk.to_period(\"D\") == y_pred).all()\n\n\ndef test_fit_predict_ts_with_X_and_y_index_out_of_sample(\n ts_data_seasonal_train, ts_data_seasonal_test\n):\n X, y = ts_data_seasonal_train\n X_test, y_test = ts_data_seasonal_test\n assert isinstance(X.index, pd.DatetimeIndex)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n fh_ = forecasting.ForecastingHorizon(y_test.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n clf = a_clf.fit(X=X, y=y)\n y_pred_sk = clf.predict(fh=fh_, X=X_test)\n\n m_clf = ARIMARegressor(d=None)\n m_clf.fit(X=X, y=y)\n y_pred = m_clf.predict(X=X_test)\n\n assert (y_pred_sk.to_period(\"D\") == y_pred).all()\n\n\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._format_dates\"\n)\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._get_dates\"\n)\ndef test_fit_predict_ts_with_X_and_y_index(\n mock_get_dates,\n mock_format_dates,\n ts_data_seasonal_train,\n):\n X, y = ts_data_seasonal_train\n assert isinstance(X.index, pd.DatetimeIndex)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n mock_get_dates.return_value = (X.index, X)\n mock_format_dates.return_value = (X, y, None)\n\n fh_ = forecasting.ForecastingHorizon(y.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n clf = a_clf.fit(X=X, y=y)\n y_pred_sk = clf.predict(fh=fh_, X=X)\n\n m_clf = ARIMARegressor(d=None)\n m_clf.fit(X=X, y=y)\n mock_format_dates.return_value = (X, y, fh_)\n y_pred = m_clf.predict(X=X)\n\n assert (y_pred_sk == y_pred).all()\n\n\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._format_dates\"\n)\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._get_dates\"\n)\ndef test_fit_predict_ts_with_X_not_y_index(\n mock_get_dates, mock_format_dates, ts_data_seasonal_train\n):\n X, y = ts_data_seasonal_train\n assert isinstance(X.index, pd.DatetimeIndex)\n assert isinstance(y.index, pd.DatetimeIndex)\n\n mock_get_dates.return_value = (X.index, X)\n mock_format_dates.return_value = (X, y, None)\n\n fh_ = forecasting.ForecastingHorizon(y.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n clf = a_clf.fit(X=X, y=y)\n y_pred_sk = clf.predict(fh=fh_, X=X)\n\n y = y.reset_index(drop=True)\n assert not isinstance(y.index, pd.DatetimeIndex)\n\n m_clf = ARIMARegressor(d=None)\n clf_ = m_clf.fit(X=X, y=y)\n mock_format_dates.return_value = (X, y, fh_)\n y_pred = clf_.predict(X=X)\n\n assert (y_pred_sk == y_pred).all()\n\n\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._format_dates\"\n)\n@patch(\n \"evalml.pipelines.components.estimators.regressors.arima_regressor.ARIMARegressor._get_dates\"\n)\ndef test_fit_predict_ts_with_y_not_X_index(\n mock_get_dates, mock_format_dates, ts_data_seasonal_train\n):\n X, y = ts_data_seasonal_train\n\n mock_get_dates.return_value = (y.index, X)\n mock_format_dates.return_value = (X, y, None)\n\n fh_ = forecasting.ForecastingHorizon(y.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n clf = a_clf.fit(X=X, y=y)\n y_pred_sk = clf.predict(fh=fh_, X=X)\n\n X_no_ind = X.reset_index(drop=True)\n assert isinstance(y.index, pd.DatetimeIndex)\n assert not isinstance(X_no_ind.index, pd.DatetimeIndex)\n\n m_clf = ARIMARegressor(d=None)\n clf_ = m_clf.fit(X=X_no_ind, y=y)\n mock_format_dates.return_value = (X, y, fh_)\n y_pred = clf_.predict(X=X, y=y)\n\n assert (y_pred_sk == y_pred).all()\n\n\ndef test_predict_ts_without_X_error(ts_data):\n X, y = ts_data\n\n m_clf = ARIMARegressor()\n clf_ = m_clf.fit(X=X, y=y)\n with pytest.raises(\n ValueError, match=\"If X was passed to the fit method of the ARIMARegressor\"\n ):\n clf_.predict(y=y)\n\n\n@patch(\"sktime.forecasting.base._sktime.BaseForecaster.predict\")\n@patch(\"sktime.forecasting.base._sktime.BaseForecaster.fit\")\ndef test_predict_ts_X_error(mock_sktime_fit, mock_sktime_predict, ts_data):\n X, y = ts_data\n\n mock_sktime_predict.side_effect = ValueError(\"Sktime value error\")\n\n m_clf = ARIMARegressor()\n clf_ = m_clf.fit(X=X, y=y)\n with pytest.raises(ValueError, match=\"Sktime value error\"):\n clf_.predict(y=y)\n\n\ndef test_fit_ts_with_not_X_not_y_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n y = y.reset_index(drop=True)\n assert not isinstance(y.index, pd.DatetimeIndex)\n assert not isinstance(X.index, pd.DatetimeIndex)\n\n clf = ARIMARegressor()\n with pytest.raises(\n ValueError,\n match=\"If not it will look for the datetime column in the index of X or y.\",\n ):\n clf.fit(X=X, y=y)\n\n\ndef test_predict_ts_with_not_X_index(ts_data):\n X, y = ts_data\n X = X.reset_index(drop=True)\n assert not isinstance(X.index, pd.DatetimeIndex)\n\n m_clf = ARIMARegressor()\n clf_ = m_clf.fit(X=X, y=y)\n with pytest.raises(\n ValueError,\n match=\"If not it will look for the datetime column in the index of X.\",\n ):\n clf_.predict(X)\n\n\ndef test_fit_ts_without_y(ts_data):\n X, y = ts_data\n\n clf = ARIMARegressor()\n with pytest.raises(ValueError, match=\"ARIMA Regressor requires y as input.\"):\n clf.fit(X=X)\n\n\ndef test_fit_predict_ts_no_X_out_of_sample(\n ts_data_seasonal_train, ts_data_seasonal_test\n):\n X, y = ts_data_seasonal_train\n X_test, y_test = ts_data_seasonal_test\n\n fh_ = forecasting.ForecastingHorizon(y_test.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n a_clf.fit(y=y)\n y_pred_sk = a_clf.predict(fh=fh_)\n\n m_clf = ARIMARegressor(d=None)\n m_clf.fit(X=None, y=y)\n y_pred = m_clf.predict(X=None, y=y_test)\n\n assert (y_pred_sk.to_period(\"D\") == y_pred).all()\n\n\[email protected](\"X_none\", [True, False])\ndef test_fit_predict_date_index_named_out_of_sample(\n X_none, ts_data_seasonal_train, ts_data_seasonal_test\n):\n X, y = ts_data_seasonal_train\n X_test, y_test = ts_data_seasonal_test\n\n fh_ = forecasting.ForecastingHorizon(y_test.index, is_relative=False)\n\n a_clf = sktime_arima.AutoARIMA()\n if X_none:\n clf = a_clf.fit(y=y)\n y_pred_sk = clf.predict(fh=fh_)\n else:\n clf = a_clf.fit(X=X, y=y)\n y_pred_sk = clf.predict(fh=fh_, X=X_test)\n\n X = X.reset_index()\n assert not isinstance(X.index, pd.DatetimeIndex)\n m_clf = ARIMARegressor(date_index=\"index\", d=None)\n if X_none:\n m_clf.fit(X=None, y=y)\n y_pred = m_clf.predict(X=None, y=y_test)\n else:\n m_clf.fit(X=X, y=y)\n y_pred = m_clf.predict(X=X_test, y=y_test)\n\n assert (y_pred_sk.to_period(\"D\") == y_pred).all()\n" ]
[ [ "pandas.testing.assert_series_equal", "pandas.DataFrame", "pandas.date_range", "numpy.zeros" ] ]
dcmvdbekerom/exojax
[ "9b9305f8e383c73bdb97c1cfb0e276ddafcd75de" ]
[ "examples/LUH16A/COMP/compmodit.py" ]
[ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tqdm\n\nimport jax.numpy as jnp\nfrom jax import random\nfrom jax import vmap, jit\n\nfrom exojax.spec import rtransfer as rt\nfrom exojax.spec import planck, moldb, contdb, response, molinfo\nfrom exojax.spec.lpf import xsmatrix\nfrom exojax.spec.exomol import gamma_exomol\nfrom exojax.spec.hitran import SijT, doppler_sigma, gamma_natural, gamma_hitran\nfrom exojax.spec.hitrancia import read_cia, logacia \nfrom exojax.spec.rtransfer import rtrun, dtauM, dtauCIA, nugrid\nfrom exojax.plot.atmplot import plottau, plotcf, plot_maxpoint\nfrom exojax.utils.afunc import getjov_logg\nfrom exojax.utils.constants import RJ, pc, Rs, c\nfrom exojax.spec.evalline import mask_weakline\n\nfrom exojax.spec import dit, modit\n\n#reference pressure for a T-P model\nPref=1.0 #bar\n\n#FLUX reference\nFabs_REF2=2.7e-12 #absolute flux (i.e. flux@10pc) erg/s/cm2/um Burgasser+ 1303.7283 @2.29um\nfac0=RJ**2/((10.0*pc)**2) #nomralize by RJ\nFref=(2.29**2)*Fabs_REF2/fac0/1.e4 #erg/cm2/s/cm-1 @ 2.3um\n\n#loading spectrum\ndat=pd.read_csv(\"../data/luhman16a_spectra_detector1.csv\",delimiter=\",\")\nwavd=(dat[\"wavelength_micron\"].values)*1.e4 #AA\nnusd=1.e8/wavd[::-1]\nfobs=(dat[\"normalized_flux\"].values)[::-1]\nerr=(dat[\"err_normalized_flux\"].values)[::-1]\n\n#ATMOSPHERE\nNP=100\nParr, dParr, k=rt.pressure_layer(NP=NP)\nmmw=2.33 #mean molecular weight\nR=100000.\nbeta=c/(2.0*np.sqrt(2.0*np.log(2.0))*R) #IP sigma need check\nONEARR=np.ones_like(Parr) #ones_array for MMR\nmolmassCO=molinfo.molmass(\"CO\") #molecular mass (CO)\nmolmassH2O=molinfo.molmass(\"H2O\") #molecular mass (H2O)\n\n#LOADING CIA\nmmrH2=0.74\nmmrHe=0.25\nmolmassH2=molinfo.molmass(\"H2\")\nmolmassHe=molinfo.molmass(\"He\")\nvmrH2=(mmrH2*mmw/molmassH2)\nvmrHe=(mmrHe*mmw/molmassHe)\n\n#LINES\ng=10**(5.0)\nT0c=1700.0\nTarr = T0c*np.ones_like(Parr) \nmaxMMR_CO=0.01\nmaxMMR_H2O=0.005\n\n\n###########################################################\n#Loading Molecular datanase and Reducing Molecular Lines\n###########################################################\nNx=3000\nws=22876.0\nwe=23010.0\nmask=(ws<wavd[::-1])*(wavd[::-1]<we)\n#additional mask to remove a strong telluric\nmask=mask*((22898.5>wavd[::-1])+(wavd[::-1]>22899.5)) \nfobsx=fobs[mask]\nnusdx=nusd[mask]\nwavdx=1.e8/nusdx[::-1]\nerrx=err[mask]\n\nprint(\"data masked\",len(nusd),\"->\",len(nusdx))\n\nnus,wav,res=nugrid(ws-5.0,we+5.0,Nx,unit=\"AA\",xsmode=\"modit\")\n#loading molecular database \nmdbCO=moldb.MdbExomol('.database/CO/12C-16O/Li2015',nus) \nmdbH2O=moldb.MdbExomol('.database/H2O/1H2-16O/POKAZATEL',nus,crit=1.e-46) \n#LOADING CIA\ncdbH2H2=contdb.CdbCIA('.database/H2-H2_2011.cia',nus)\ncdbH2He=contdb.CdbCIA('.database/H2-He_2011.cia',nus)\n\n### MODIT settings\nfrom exojax.spec import initspec\nfrom exojax.spec.modit import minmax_dgmatrix\n\ncnu_CO, indexnu_CO, R_CO, pmarray_CO=initspec.init_modit(mdbCO.nu_lines,nus)\ncnu_H2O, indexnu_H2O, R_H2O, pmarray_H2O=initspec.init_modit(mdbH2O.nu_lines,nus)\n\n# Precomputing gdm_ngammaL \nfrom exojax.spec.modit import setdgm_exomol\nfrom jax import jit, vmap\n\nfT = lambda T0,alpha: T0[:,None]*(Parr[None,:]/Pref)**alpha[:,None]\nT0_test=np.array([1000.0,1700.0,1000.0,1700.0])\nalpha_test=np.array([0.15,0.15,0.05,0.05])\nres=0.05\ndgm_ngammaL_CO=setdgm_exomol(mdbCO,fT,Parr,R_CO,molmassCO,res,T0_test,alpha_test)\ndgm_ngammaL_H2O=setdgm_exomol(mdbH2O,fT,Parr,R_H2O,molmassH2O,res,T0_test,alpha_test)\n\n#######################################################\n#HMC-NUTS FITTING PART\n#######################################################\nimport numpyro.distributions as dist\nimport numpyro\nfrom numpyro.infer import MCMC, NUTS\nfrom numpyro.infer import Predictive\nfrom numpyro.diagnostics import hpdi\nfrom exojax.spec.modit import exomol,xsmatrix\n\nbaseline=1.07 #(baseline for a CIA photosphere in the observed (normaized) spectrum)\n# Model\ndef predmod(nu1,y1,e1): \n Rp = 0.88\n Mp = 33.2\n RV = 28.07\n sigma=0.0135\n MMR_CO = 0.0059\n MMR_H2O = 0.0023\n T0 = 1295\n alpha = 0.097\n vsini = 16.0\n q1 = 0.5\n q2 = 0.5\n sqrtq1=jnp.sqrt(q1)\n u1=2.0*sqrtq1*q2\n u2=sqrtq1*(1.0-2.0*q2)\n \n g=2478.57730044555*Mp/Rp**2 #gravity\n \n #T-P model//\n Tarr = T0*(Parr/Pref)**alpha \n \n #line computation CO\n qt_CO=vmap(mdbCO.qr_interp)(Tarr)\n qt_H2O=vmap(mdbH2O.qr_interp)(Tarr)\n \n def obyo(y,tag,nusdx,nus,mdbCO,mdbH2O,cdbH2H2,cdbH2He):\n #CO\n SijM_CO,ngammaLM_CO,nsigmaDl_CO=exomol(mdbCO,Tarr,Parr,R_CO,molmassCO)\n xsm_CO=xsmatrix(cnu_CO,indexnu_CO,R_CO,pmarray_CO,nsigmaDl_CO,ngammaLM_CO,SijM_CO,nus,dgm_ngammaL_CO)\n dtaumCO=dtauM(dParr,jnp.abs(xsm_CO),MMR_CO*ONEARR,molmassCO,g)\n \n #H2O\n SijM_H2O,ngammaLM_H2O,nsigmaDl_H2O=exomol(mdbH2O,Tarr,Parr,R_H2O,molmassH2O)\n xsm_H2O=xsmatrix(cnu_H2O,indexnu_H2O,R_H2O,pmarray_H2O,nsigmaDl_H2O,ngammaLM_H2O,SijM_H2O,nus,dgm_ngammaL_H2O)\n dtaumH2O=dtauM(dParr,jnp.abs(xsm_H2O),MMR_H2O*ONEARR,molmassH2O,g)\n\n #CIA\n dtaucH2H2=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrH2,\\\n mmw,g,cdbH2H2.nucia,cdbH2H2.tcia,cdbH2H2.logac)\n dtaucH2He=dtauCIA(nus,Tarr,Parr,dParr,vmrH2,vmrHe,\\\n mmw,g,cdbH2He.nucia,cdbH2He.tcia,cdbH2He.logac)\n \n dtau=dtaumCO+dtaumH2O+dtaucH2H2+dtaucH2He \n sourcef = planck.piBarr(Tarr,nus)\n\n Ftoa=Fref/Rp**2\n F0=rtrun(dtau,sourcef)/baseline/Ftoa\n \n Frot=response.rigidrot(nus,F0,vsini,u1,u2)\n mu=response.ipgauss_sampling(nusdx,nus,Frot,beta,RV)\n return mu\n\n mu=obyo(y1,\"y1\",nusdx,nus,mdbCO,mdbH2O,cdbH2H2,cdbH2He)\n return mu\n \n#mcmc.run(rng_key_, nu1=nusd1, y1=fobs1, e1=err1)\nmu=predmod(nusdx,fobsx,errx)\nprint(mu)\nnp.savez(\"cmodit.npz\",[nusdx,mu])\n" ]
[ [ "pandas.read_csv", "numpy.savez", "numpy.ones_like", "numpy.log", "numpy.array" ] ]
AhmedArslan/arcsv
[ "596cbc30df6a213a6dc95fae1bcac865e2793dd9" ]
[ "arcsv/convex_diploid.py" ]
[ "import cvxpy as cvx\nimport numpy as np\n\ndef convex_diploid(lhs, ncs, pi_robust):\n n = len(lhs)\n w = cvx.Variable(n)\n print('building objective. . .')\n f = ((1-pi_robust) * np.asarray(lhs) + pi_robust) / np.asarray(ncs)\n print(f.shape)\n for i in range(f.shape[0]):\n print(np.sum(np.log(f[i,:])))\n obj = sum(cvx.log(f.T * w))\n # for i in range(len(lhs[0])):\n # obj_sum += cvx.log(w.T * [(pi_robust + (1-pi_robust)*lh[i])/nc[i] for (lh, nc) in zip(lhs, ncs)])\n objective = cvx.Maximize(obj)\n\n constraints = [w >= 0,\n sum(w) == 1]\n problem = cvx.Problem(objective, constraints)\n print('solving. . .')\n problem.solve()\n print(problem.status)\n return w.value, objective.value\n" ]
[ [ "numpy.log", "numpy.asarray" ] ]
batmancn/Tensorflow-Tutorial
[ "946abac1b3723fff5525758c12bda827a93f9d2b" ]
[ "tutorial-contents/203_variable.py" ]
[ "\"\"\"\nKnow more, visit my Python tutorial page: https://morvanzhou.github.io/tutorials/\nMy Youtube Channel: https://www.youtube.com/user/MorvanZhou\n\nDependencies:\ntensorflow: 1.1.0\n\"\"\"\nimport tensorflow as tf\n\nvar = tf.Variable(0) # our first variable in the \"global_variable\" set\n\nadd_operation = tf.add(var, 1)\nupdate_operation = tf.assign(var, add_operation)\n\nwith tf.Session() as sess:\n # once define variables, you have to initialize them by doing this\n sess.run(tf.global_variables_initializer())\n for _ in range(3):\n sess.run(update_operation)\n print(sess.run(var))" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.add", "tensorflow.assign", "tensorflow.Session", "tensorflow.Variable" ] ]
takecore/blueoil
[ "ae3001f4c5cdf1023371fa065898adbc6fcd1d33" ]
[ "lmnet/tests/lmnet_tests/test_pre_processor.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright 2018 The Blueoil Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nimport numpy as np\nimport pytest\n\nfrom lmnet.pre_processor import (\n ResizeWithJoints,\n JointsToGaussianHeatmap\n)\n\n# Apply reset_default_graph() in conftest.py to all tests in this file.\npytestmark = pytest.mark.usefixtures(\"reset_default_graph\")\n\n\ndef test_resize_with_joints():\n\n image = np.zeros(shape=(5, 5, 3), dtype=np.uint8)\n joints = np.ones(shape=(1, 3))\n\n # x\n joints[0, 0] = 2\n # y\n joints[0, 1] = 3\n\n image_size = (10, 10)\n resizer_10x10 = ResizeWithJoints(image_size)\n\n # No joints will be provided on inference time.\n resized = resizer_10x10(image=image)\n resized_image = resized[\"image\"]\n\n assert resized_image.shape[0] == 10\n assert resized_image.shape[1] == 10\n assert resized_image.shape[2] == 3\n\n resized = resizer_10x10(image=image, joints=joints)\n resized_image = resized[\"image\"]\n resized_joints = resized[\"joints\"]\n\n assert isinstance(resized_image, np.ndarray)\n assert isinstance(resized_joints, np.ndarray)\n\n assert resized_image.shape[0] == 10\n assert resized_image.shape[1] == 10\n assert resized_image.shape[2] == 3\n\n assert resized_joints[0, 0] == 4\n assert resized_joints[0, 1] == 6\n assert resized_joints[0, 2] == joints[0, 2]\n\n # joints should not be changed in-place.\n assert resized_joints is not joints\n\n image_size = (10, 20)\n resizer_10x20 = ResizeWithJoints(image_size)\n\n # No joints will be provided on inference time.\n resized = resizer_10x20(image=image, joints=joints)\n resized_image = resized[\"image\"]\n resized_joints = resized[\"joints\"]\n\n assert resized_image.shape[0] == 10\n assert resized_image.shape[1] == 20\n assert resized_image.shape[2] == 3\n\n assert resized_joints[0, 0] == 8\n assert resized_joints[0, 1] == 6\n assert resized_joints[0, 2] == joints[0, 2]\n\n\ndef test_joints_to_gaussian_heatmap():\n\n image_size = (256, 320)\n\n stride = 2\n num_joints = 17\n\n input_joints = np.array([[1, 1, 1],\n [2, 2, 1],\n [3, 3, 1],\n [4, 4, 1],\n [5, 5, 1],\n [6, 6, 1],\n [7, 7, 1],\n [8, 8, 1],\n [9, 9, 1],\n [10, 10, 1],\n [11, 11, 1],\n [12, 12, 1],\n [13, 13, 1],\n [14, 14, 1],\n [15, 15, 1],\n [16, 16, 1],\n [17, 17, 0]])\n\n pre_process = JointsToGaussianHeatmap(image_size, num_joints=num_joints,\n stride=stride, sigma=2, max_value=10)\n\n heatmap = pre_process(joints=input_joints)[\"heatmap\"]\n\n # It is hard to test semantic correctness of a gaussian heatmap manually.\n # That part will be tested jointly with FormatJoints() in test_post_processor.py.\n assert isinstance(heatmap, np.ndarray)\n assert heatmap.shape[0] == image_size[0] // stride\n assert heatmap.shape[1] == image_size[1] // stride\n assert heatmap.shape[2] == 17\n assert np.max(heatmap) == 10\n" ]
[ [ "numpy.array", "numpy.ones", "numpy.max", "numpy.zeros" ] ]
bryanmr/pytorch
[ "129e99fbce8694f5694ec85c40ffa8e32ea674ed" ]
[ "test/distributed/test_c10d_common.py" ]
[ "# Owner(s): [\"oncall: distributed\"]\n\nimport copy\nimport os\nimport sys\nimport tempfile\nimport threading\nimport time\nimport unittest\nfrom datetime import timedelta\nfrom itertools import product\nfrom sys import platform\n\nimport torch\nimport torch.distributed as dist\n\nif not dist.is_available():\n print(\"distributed package not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nimport torch.distributed.distributed_c10d as c10d\nimport torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD\nimport torch.nn.functional as F\nimport torch.testing._internal.common_utils as common\nfrom torch import nn\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n)\nfrom torch.testing._internal.common_utils import (\n TestCase,\n load_tests,\n run_tests,\n TEST_WITH_DEV_DBG_ASAN,\n)\n\nif TEST_WITH_DEV_DBG_ASAN:\n print(\"Multiprocessing spawn is not compatible with dev/dbg asan\", file=sys.stderr)\n sys.exit(0)\n\n# load_tests from common_utils is used to automatically filter tests for\n# sharding on sandcastle. This line silences flake warnings\nload_tests = load_tests\n\nif platform == \"darwin\":\n LOOPBACK = \"lo0\"\nelse:\n LOOPBACK = \"lo\"\n\ntorch.backends.cuda.matmul.allow_tf32 = False\n\n\ndef gpus_for_rank(world_size):\n \"\"\"Multigpu tests are designed to simulate the multi nodes with multi\n GPUs on each node. Nccl backend requires equal #GPUs in each process.\n On a single node, all visible GPUs are evenly\n divided to subsets, each process only uses a subset.\n \"\"\"\n visible_devices = list(range(torch.cuda.device_count()))\n gpus_per_process = torch.cuda.device_count() // world_size\n gpus_for_rank = []\n for rank in range(world_size):\n gpus_for_rank.append(\n visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]\n )\n return gpus_for_rank\n\n\nclass AbstractTimeoutTest(object):\n def _test_store_timeout(self, backend, init_method, c2p):\n try:\n dist.init_process_group(\n backend=backend,\n init_method=init_method,\n world_size=1,\n rank=0,\n timeout=timedelta(seconds=1),\n )\n default_store = c10d._get_default_store()\n tik = time.time()\n with self.assertRaisesRegex(RuntimeError, \"Timeout\"):\n default_store.get(\"nonexistent key\")\n tok = time.time()\n dist.destroy_process_group()\n c2p.append(float(tok - tik))\n except RuntimeError as e:\n # catch \"Address already in use\" error and report it to the main\n # thread\n c2p.append(e)\n\n def _init_methods(self):\n f = tempfile.NamedTemporaryFile(delete=False)\n if sys.platform == \"win32\":\n yield \"file:///%s\" % f.name.replace(\"\\\\\", \"/\")\n f.close()\n else:\n yield \"file://%s\" % f.name\n f.close()\n yield \"tcp://127.0.0.1:%d\" % common.find_free_port()\n\n def _test_default_store_timeout(self, backend):\n for init_method in self._init_methods():\n c2p = []\n t = threading.Thread(\n target=self._test_store_timeout, args=(backend, init_method, c2p)\n )\n t.daemon = True\n t.start()\n t.join(5)\n\n self.assertEqual(1, len(c2p))\n if isinstance(c2p[0], float):\n # waiting time should be 1s, use 3s to rule out false alarm\n self.assertGreater(3, c2p[0])\n elif isinstance(c2p[0], RuntimeError):\n # let @retry_on_connect_failures handle the error\n raise c2p[0]\n else:\n raise RuntimeError(\"Unexpected type {}\".format(type(c2p[0])))\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 50, bias=False)\n self.fc3 = nn.Linear(50, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n return F.softmax(x, dim=1)\n\n\nclass DoubleGpuNet(nn.Module):\n def __init__(self, gpus):\n super(DoubleGpuNet, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])\n self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])\n self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(\n torch.tensor([2, 2]).long(), requires_grad=False\n ).to(gpus[0])\n\n def forward(self, x):\n dev0 = self.fc1.weight.device\n dev1 = self.fc2.weight.device\n x = self.relu(self.fc1(x.to(dev0)))\n x = self.relu(self.fc2(x.to(dev1)))\n x = self.fc3(x)\n return F.softmax(x, dim=1).to(dev0)\n\n\nclass QuadraGpuNet(nn.Module):\n def __init__(self, gpus):\n super(QuadraGpuNet, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])\n self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])\n self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])\n self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])\n self.relu = nn.ReLU()\n self.no_grad_param = nn.Parameter(\n torch.tensor([2, 2]).long(), requires_grad=False\n ).to(gpus[0])\n\n def forward(self, x):\n dev0 = self.fc1.weight.device\n dev1 = self.fc2.weight.device\n dev2 = self.fc3.weight.device\n dev3 = self.fc4.weight.device\n x = self.relu(self.fc1(x.to(dev0)))\n x = self.relu(self.fc2(x.to(dev1)))\n x = self.relu(self.fc3(x.to(dev2)))\n x = self.fc4(x.to(dev3))\n return F.softmax(x, dim=1).to(dev0)\n\n\nclass ConvNet(nn.Module):\n def __init__(self, gpus, layouts, dtypes):\n super(ConvNet, self).__init__()\n self.dtypes = dtypes\n if isinstance(gpus, list):\n self.layer_gpus = gpus\n else:\n gpus = [gpus] * 4\n self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(\n device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]\n )\n self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(\n device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]\n )\n self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(\n device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]\n )\n self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(\n device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]\n )\n\n def forward(self, x):\n x = x.to(self.dtypes[0])\n # Could say\n # x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])\n # etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose\n # is to verify weights are where expected if the model gets replicated.\n gpus = self.layer_gpus if hasattr(self, \"layer_gpus\") else [x.device] * 4\n x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])\n x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])\n x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])\n return self.conv3(x)\n\n\nclass Task(nn.Module):\n def __init__(self):\n super().__init__()\n self.p = nn.Parameter(torch.ones(2, 2))\n\n def forward(self, x):\n return self.p + x\n\n\nclass ModuleForDdpCommHook(nn.Module):\n def __init__(self):\n super().__init__()\n self.t0 = Task()\n\n def forward(self, x, rank):\n return self.t0(x + rank)\n\n\nclass SparseGradientModule(nn.Module):\n def __init__(self):\n super(SparseGradientModule, self).__init__()\n self.embedding = nn.EmbeddingBag(10, 10, sparse=True)\n\n def forward(self, x):\n return F.softmax(self.embedding(x), dim=1)\n\n\nclass AbstractDistributedDataParallelTest(object):\n def tearDown(self):\n # DistributedDataParallel test doesn't seem to call FileStore destructor\n # TODO: investigate this test and the test is known to have issues\n # Use this hack to remove files for that test\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n @property\n def world_size(self):\n return 2\n\n def _prepare_single_device_module(\n self,\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n model = Net()\n device = devices[0] if devices else torch.device(\"cuda:%d\" % self.rank)\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model).to(device),\n device_ids=device_ids,\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n model.to(device)\n\n input = torch.randn(global_batch_size, 2).to(device)\n target = torch.randn(global_batch_size, 4).to(device)\n\n return model, ddp_model, input, target\n\n def _prepare_multi_device_module(\n self,\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view=False,\n ):\n self.assertTrue(\n len(devices) == 2 or len(devices) == 4,\n \"unexpected devices for ddp tests {}\".format(devices),\n )\n if len(devices) == 2:\n model = DoubleGpuNet(devices)\n elif len(devices) == 4:\n model = QuadraGpuNet(devices)\n\n ddp_model = DistributedDataParallel(\n copy.deepcopy(model),\n device_ids=device_ids,\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n input = torch.randn(global_batch_size, 2).cuda(devices[0])\n target = torch.randn(global_batch_size, 4)\n\n return model, ddp_model, input, target\n\n def _test_ddp_with_process_group(\n self,\n process_group,\n devices,\n device_ids,\n multi_device=False,\n gradient_as_bucket_view=False,\n ):\n \"\"\"\n Note: we pass down `device_ids` all the way to DistributedDataParallel\n as part of the test. Below you find tests that either use a list of\n integers, a list of `torch.Device` instances, or an empty list.\n The `devices` argument is used to control placement of the model and\n must always be specified as list of `torch.Device` instances.\n \"\"\"\n local_batch_size = 1 if devices is None else len(devices)\n global_batch_size = self.world_size * local_batch_size\n\n if multi_device:\n model, ddp_model, input, target = self._prepare_multi_device_module(\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view,\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertTrue(ddp_logging_data.get(\"is_multi_device_module\"))\n else:\n model, ddp_model, input, target = self._prepare_single_device_module(\n process_group,\n devices,\n device_ids,\n global_batch_size,\n gradient_as_bucket_view,\n )\n ddp_logging_data = ddp_model._get_ddp_logging_data()\n self.assertFalse(ddp_logging_data.get(\"is_multi_device_module\"))\n\n def step_model(model, input, target):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n loss.backward()\n\n def update_parameters(model):\n for param in model.parameters():\n with torch.no_grad():\n param -= param.grad\n param.grad = None\n\n # check two model parameters over 2 iterations\n for iteration in range(2):\n # single cpu/gpu training\n step_model(model, input, target)\n\n # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs\n step_model(\n ddp_model,\n input[\n self.rank * local_batch_size : (self.rank + 1) * local_batch_size\n ],\n target[\n self.rank * local_batch_size : (self.rank + 1) * local_batch_size\n ],\n )\n\n # Update weights and run a second iteration to shake out errors\n update_parameters(model)\n update_parameters(ddp_model)\n self.assertEqual(\n len(list(model.parameters())), len(list(ddp_model.parameters()))\n )\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + iteration)\n input = input[torch.randperm(global_batch_size)]\n\n def _gpu_model_with_ddp_comm_hook(\n self, process_group, hook=None, gradient_as_bucket_view=False, state=None\n ):\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n ModuleForDdpCommHook().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n # Register a DDP communication hook if any.\n if hook is not None:\n gpu_model.register_comm_hook(state, hook)\n\n return gpu_model\n\n def _gpu_model_with_builtin_ddp_comm_hook(\n self, process_group, hook=None, gradient_as_bucket_view=False\n ):\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n ModuleForDdpCommHook().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n # Register a built-in DDP communication hook if defined\n if hook is not None:\n gpu_model._register_builtin_comm_hook(hook)\n\n return gpu_model\n\n def _run_and_verify_hook(self, model, input, expected_grad):\n # Run forward\n output = model(input, self.rank)\n\n # Run backward\n output.mean().backward()\n\n [self.assertEqual(p.grad, expected_grad) for p in model.parameters()]\n\n def _simple_hook(\n self, state: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n fut = torch.futures.Future()\n fut.set_result(torch.ones_like(bucket.buffer()))\n\n def fut_then(fut):\n # Add ones to fut's result.\n t = fut.value()\n return t + torch.ones_like(t)\n\n return fut.then(fut_then)\n\n\nclass DistributedDataParallelTest(\n AbstractDistributedDataParallelTest, MultiProcessTestCase\n):\n def setUp(self):\n super(DistributedDataParallelTest, self).setUp()\n self._spawn_processes()\n\n def test_invalid_powerSGD_state(self):\n for start_powerSGD_iter, use_error_feedback, warm_start in product(\n [0, 1], [True, False], [True, False]\n ):\n if not use_error_feedback and not warm_start:\n continue\n with self.assertRaisesRegex(\n ValueError,\n \"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, \"\n \"because PowerSGD can only be applied after the first two iterations in DDP.\",\n ):\n state = powerSGD.PowerSGDState(\n process_group=None,\n matrix_approximation_rank=1,\n start_powerSGD_iter=start_powerSGD_iter,\n use_error_feedback=use_error_feedback,\n warm_start=warm_start,\n )\n\n\nclass ComputeBucketAssignmentTest(TestCase):\n def test_single_limit_single_dtype(self):\n tensors = [\n torch.empty([100], dtype=torch.float),\n torch.empty([200], dtype=torch.float),\n torch.empty([100], dtype=torch.float),\n torch.empty([50], dtype=torch.float),\n ]\n result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(\n tensors, [400]\n )\n self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))\n self.assertEqual([[0], [1], [2], [3]], result)\n\n def test_single_limit_multi_dtype(self):\n tensors = [\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n ]\n result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(\n tensors, [400]\n )\n self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))\n self.assertEqual([[0, 2], [1, 3], [4], [5]], result)\n\n def test_multi_limit_single_dtype(self):\n tensors = [\n torch.empty([10], dtype=torch.float),\n torch.empty([10], dtype=torch.float),\n torch.empty([10], dtype=torch.float),\n torch.empty([10], dtype=torch.float),\n ]\n result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(\n tensors, [40, 80]\n )\n self.assertEqual(per_bucket_size_limits, [40, 80, 80])\n self.assertEqual([[0], [1, 2], [3]], result)\n\n def test_multi_limit_multi_dtype(self):\n tensors = [\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n torch.empty([50], dtype=torch.float),\n torch.empty([25], dtype=torch.double),\n ]\n result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(\n tensors, [200, 400]\n )\n self.assertEqual([[0], [1], [2, 4], [3, 5]], result)\n self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])\n\n\nclass AbstractCommTest(object):\n @property\n def op_timeout_sec(self):\n return 1\n\n @property\n def world_size(self):\n return 2\n\n def _verify_sequence_number_across_pg(self, pg, verify_pg):\n\n seq_num = pg._get_sequence_number_for_group()\n obj_list = [None for _ in range(dist.get_world_size(verify_pg))]\n # We use a separate pg to verify the sequence numbers, otherwise these\n # collectives will themselves increment the sequence number.\n dist.all_gather_object(obj_list, seq_num, group=verify_pg)\n self.assertEqual(len(set(obj_list)), 1)\n return obj_list[0]\n\n def _test_sequence_num_incremented(self, process_group, ranks):\n # verify initial sequence numbers. Use a distinct process group for\n # verification to keep counts as expected with respect to process_group.\n verify_pg = dist.new_group(\n ranks=ranks,\n backend=\"gloo\",\n )\n assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)\n\n initial_num = (\n self._verify_sequence_number_across_pg(\n pg=process_group, verify_pg=verify_pg\n )\n if not c10d._rank_not_in_group(process_group)\n else -1\n )\n\n # Verify sequence numbers are appropriately incremented\n for i in range(10):\n t = torch.ones(1, device=torch.cuda.current_device())\n dist.all_reduce(t, group=process_group)\n if not c10d._rank_not_in_group(process_group):\n seq_num = self._verify_sequence_number_across_pg(\n pg=process_group,\n verify_pg=verify_pg,\n )\n self.assertEqual(initial_num + i + 1, seq_num)\n\n if dist.get_world_size(process_group) > 2:\n # Test when certain ranks don't call collectives\n if dist.get_rank(process_group) not in [0, 2]:\n dist.all_reduce(t, group=process_group, async_op=True)\n # Now ranks 0 and 2 should be lagging by 1.\n if not c10d._rank_not_in_group(process_group):\n seq_num = process_group._get_sequence_number_for_group()\n rank = dist.get_rank(process_group)\n obj_list = [None for _ in range(dist.get_world_size(verify_pg))]\n dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)\n rank_to_seq_num = {rank: num for (rank, num) in obj_list}\n self.assertEqual(len(set(rank_to_seq_num.values())), 2)\n self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])\n expected_same = {\n rank_to_seq_num[i]\n for i in rank_to_seq_num.keys()\n if i not in [0, 2]\n }\n self.assertEqual(len(expected_same), 1)\n self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])\n\n def _test_sequence_num_incremented_default_group(self, backend_name):\n torch.cuda.set_device(self.rank)\n store = dist.FileStore(self.file_name, self.world_size)\n dist.init_process_group(\n backend_name,\n world_size=self.world_size,\n rank=self.rank,\n store=store,\n )\n self._test_sequence_num_incremented(\n c10d._get_default_group(),\n ranks=list(i for i in range(dist.get_world_size())),\n )\n\n def _test_sequence_num_incremented_subgroup(self, backend_name):\n torch.cuda.set_device(self.rank)\n store = dist.FileStore(self.file_name, self.world_size)\n dist.init_process_group(\n backend_name,\n world_size=self.world_size,\n rank=self.rank,\n store=store,\n )\n subgroup_ranks = [0, 1, 2]\n subgroup = dist.new_group(subgroup_ranks)\n self._test_sequence_num_incremented(subgroup, subgroup_ranks)\n\n def _test_sequence_num_set_default_pg(self, backend):\n store = dist.FileStore(self.file_name, self.world_size)\n dist.init_process_group(\n backend,\n world_size=self.world_size,\n rank=self.rank,\n store=store,\n )\n\n default_pg = c10d._get_default_group()\n seq_num = default_pg._get_sequence_number_for_group()\n obj_list = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(obj_list, seq_num)\n self.assertEqual(len(set(obj_list)), 1)\n\n def _test_sequence_num_set_new_group(self, backend):\n store = dist.FileStore(self.file_name, self.world_size)\n dist.init_process_group(\n backend,\n world_size=self.world_size,\n rank=self.rank,\n store=store,\n )\n\n subgroup = dist.new_group([0, 1])\n\n if not c10d._rank_not_in_group(subgroup):\n subgroup_seq = subgroup._get_sequence_number_for_group()\n obj_list = [None for _ in range(dist.get_world_size(subgroup))]\n dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)\n self.assertEqual(len(set(obj_list)), 1)\n\nclass CommTest(AbstractCommTest, MultiProcessTestCase):\n def setUp(self):\n super(CommTest, self).setUp()\n self._spawn_processes()\n\n def tearDown(self):\n super(CommTest, self).tearDown()\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n def test_distributed_debug_mode(self):\n # Default should be off\n default_debug_mode = dist._get_debug_mode()\n self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)\n mapping = {\n \"OFF\": dist._DistributedDebugLevel.OFF,\n \"INFO\": dist._DistributedDebugLevel.INFO,\n \"DETAIL\": dist._DistributedDebugLevel.DETAIL,\n }\n invalid_debug_modes = [\"foo\", 0, 1, -1]\n\n for mode in mapping.keys():\n os.environ[\"TORCH_DISTRIBUTED_DEBUG\"] = str(mode)\n set_debug_mode = dist._get_debug_mode()\n self.assertEqual(\n set_debug_mode,\n mapping[mode],\n f\"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}\",\n )\n\n for mode in invalid_debug_modes:\n os.environ[\"TORCH_DISTRIBUTED_DEBUG\"] = str(mode)\n with self.assertRaisesRegex(RuntimeError, \"to be one of\"):\n dist._get_debug_mode()\n\n\nclass DummyWork(dist._Work):\n def wait(self, timeout=5.0):\n if torch.cuda.is_available():\n torch.cuda.current_stream().synchronize()\n return True\n\n\nclass DummyProcessGroup(dist.ProcessGroup):\n def getBackendName(self):\n return \"Dummy\"\n\n def allgather(self, output_tensor_lists, input_tensor_list, opts=None):\n for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):\n for output_tensor in output_tensor_list:\n output_tensor.copy_(input_tensor)\n\n return DummyWork()\n\n def allreduce(self, tensor_list, opts=None):\n for tensor in tensor_list:\n tensor.add_(2)\n\n return DummyWork()\n\n def broadcast(self, tensor_list, opts=None):\n for tensor in tensor_list:\n tensor.add_(1)\n\n return DummyWork()\n\n def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):\n for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):\n output_tensor.copy_(input_tensor_list[self.rank()])\n\n return DummyWork()\n\n def send(self, tensor_list, dst, tag=0):\n for tensor in tensor_list:\n tensor.add_(1)\n\n return DummyWork()\n\n def recv(self, tensor_list, src, tag=0):\n for tensor in tensor_list:\n tensor.add_(2)\n\n return DummyWork()\n\n\nclass PythonProcessGroupTest(MultiProcessTestCase):\n def setUp(self):\n super(PythonProcessGroupTest, self).setUp()\n self._spawn_processes()\n\n def tearDown(self):\n super(PythonProcessGroupTest, self).tearDown()\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n def test_get_backend_name(self):\n dpg = DummyProcessGroup(0, 1)\n self.assertEqual(\"Dummy\", dpg.name())\n\n def test_backend_class_attr(self):\n dist.Backend.register_backend(\n \"dummy\",\n PythonProcessGroupTest.create_dummy\n )\n self.assertEqual(dist.Backend.DUMMY, \"DUMMY\")\n self.assertEqual(\n dist.Backend._plugins[\"DUMMY\"],\n PythonProcessGroupTest.create_dummy\n )\n\n @staticmethod\n def create_dummy(store, rank, size, timeout):\n return DummyProcessGroup(rank, size)\n\n @unittest.skipIf(\n common.IS_MACOS,\n \"Python c10d extension is not yet supported on MacOS\"\n )\n def test_collectives(self):\n dist.Backend.register_backend(\"dummy\", PythonProcessGroupTest.create_dummy)\n\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '6789'\n dist.init_process_group(\"dummy\", rank=self.rank, world_size=self.world_size)\n\n # test all_gather\n input_tensor = torch.ones(2, 2) * 7\n output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]\n dist.all_gather(output_tensor_list, input_tensor)\n\n for tensor in output_tensor_list:\n self.assertEqual(tensor, input_tensor)\n\n # test all_reduce\n input_tensor = torch.ones(2, 2) * 7\n dist.all_reduce(input_tensor)\n self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)\n\n # test broadcast\n input_tensor = torch.zeros(2, 2)\n dist.broadcast(input_tensor, 0, async_op=True).wait()\n self.assertEqual(torch.ones(2, 2), input_tensor)\n\n # test reduce_scatter\n output_tensor = torch.zeros(2, 2)\n input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]\n dist.reduce_scatter(output_tensor, input_tensor_list)\n self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)\n\n dist.destroy_process_group()\n\n @unittest.skipIf(\n common.IS_MACOS,\n \"Python c10d extension is not yet supported on MacOS\"\n )\n def test_send_recv(self):\n dist.Backend.register_backend(\"dummy\", PythonProcessGroupTest.create_dummy)\n\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = '6789'\n dist.init_process_group(\"dummy\", rank=self.rank, world_size=self.world_size)\n\n # test send\n input_tensor = torch.zeros(2, 2)\n dist.send(input_tensor, (self.rank + 1) % self.world_size)\n self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)\n\n # test recv\n input_tensor = torch.zeros(2, 2)\n dist.recv(input_tensor, (self.rank + 1) % self.world_size)\n self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)\n\n # intentionally not calling into `destroy_process_group` as not all\n # user applications would explicitly that.\n\n\nif __name__ == \"__main__\":\n assert (\n not torch.cuda._initialized\n ), \"test_distributed must not have initialized CUDA context on main process\"\n\n run_tests()\n" ]
[ [ "torch.distributed.distributed_c10d._get_default_group", "torch.empty", "torch.testing._internal.common_utils.run_tests", "torch.nn.functional.softmax", "torch.no_grad", "torch.distributed.FileStore", "torch.distributed.new_group", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.testing._internal.common_utils.find_free_port", "torch.nn.EmbeddingBag", "torch.distributed.distributed_c10d._rank_not_in_group", "torch.distributed.broadcast", "torch.randn", "torch.distributed.is_available", "torch.distributed.init_process_group", "torch.distributed._get_debug_mode", "torch.cuda.device_count", "torch.distributed.Backend.register_backend", "torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState", "torch.cuda.current_stream", "torch.device", "torch.cuda.set_device", "torch.ones_like", "torch.distributed.all_gather", "torch.ones", "torch.distributed.get_world_size", "torch.distributed.get_rank", "torch.manual_seed", "torch.tensor", "torch.futures.Future", "torch.distributed.send", "torch.distributed._compute_bucket_assignment_by_size", "torch.cuda.current_device", "torch.distributed.all_reduce", "torch.distributed.recv", "torch.nn.Linear", "torch.distributed.reduce_scatter", "torch.distributed.distributed_c10d._get_default_store", "torch.distributed.all_gather_object", "torch.randperm", "torch.zeros", "torch.distributed.destroy_process_group", "torch.nn.ReLU" ] ]
AIM3-RUC/ABAW2022
[ "f1d25dc9914cc6768e58c14cea893c8e00b541bd" ]
[ "models/networks/lstm_encoder.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\nfrom .fc_encoder import FcEncoder\n\nclass BiLSTMEncoder(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(BiLSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,\n bidirectional=True, num_layers=1)\n \n def forward(self, x, states=None):\n '''\n Parameters:\n ------------------------\n x: input feature seqences\n states: (h_0, c_0)\n '''\n r_out, (h_n, h_c) = self.rnn(x, states)\n return r_out, (h_n, h_c)\n\nclass AttentiveLSTMEncoder(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(AttentiveLSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,\n bidirectional=True, num_layers=1)\n \n self.layer_norm = nn.LayerNorm(hidden_size * 2)\n self.se = nn.Sequential(\n nn.Conv1d(hidden_size*2, hidden_size // 2, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_size // 2, hidden_size // 2, kernel_size=3, padding=1),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_size // 2, hidden_size*2, kernel_size=1),\n nn.Sigmoid()\n )\n \n self.out_cnn = nn.Sequential(\n nn.Conv1d(hidden_size*2, hidden_size*2, kernel_size=5, padding=2),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_size*2, hidden_size*2, kernel_size=3, padding=1),\n nn.ReLU(inplace=True)\n )\n \n def forward(self, x, states=None):\n '''\n Parameters:\n ------------------------\n x: input feature seqences\n states: (h_0, c_0)\n '''\n r_out, (h_n, h_c) = self.rnn(x, states)\n # attn = self.se(r_out.transpose(1, 2))\n # attn = attn.transpose(1, 2)\n # return r_out * attn, (h_n, h_c)\n return r_out, (h_n, h_c)\n\nclass LSTMEncoder(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(LSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,\n num_layers=1)\n \n def forward(self, x, states):\n '''\n Parameters:\n ------------------------\n x: input feature seqences\n states: (h_0, c_0)\n '''\n r_out, (h_n, h_c) = self.rnn(x, states)\n return r_out, (h_n, h_c)\n\nclass BiLSTM_official_Encoder(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(BiLSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,\n bidirectional=True, num_layers=1)\n \n def forward(self, x):\n '''\n Parameters:\n ------------------------\n x: input feature seqences\n '''\n r_out, (h_n, h_c) = self.rnn(x)\n return r_out, (h_n, h_c)\n\nclass LSTM_official_Encoder(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(LSTM_official_Encoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,\n num_layers=1)\n \n def forward(self, x):\n '''\n Parameters:\n ------------------------\n x: input feature seqences\n '''\n r_out, (h_n, h_c) = self.rnn(x)\n return r_out, (h_n, h_c)\n\nclass FcLstmEncoder(nn.Module):\n def __init__(self, input_size, hidden_size, bidirectional=False):\n super(FcLstmEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.fc = FcEncoder(input_size, [hidden_size, hidden_size], dropout=0.1, dropout_input=False)\n self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True,\n num_layers=1, bidirectional=bidirectional)\n \n def forward(self, x, states):\n x = self.fc(x)\n r_out, (h_n, h_c) = self.rnn(x, states)\n return r_out, (h_n, h_c)\n\nclass AttentionFusionNet(nn.Module):\n def __init__(self, a_dim, v_dim, l_dim, hidden_size):\n super(AttentionFusionNet, self).__init__()\n self.a_dim = a_dim\n self.v_dim = v_dim\n self.l_dim = l_dim\n self.hidden_size = hidden_size\n self.mapping = nn.Linear(self.hidden_size, self.hidden_size)\n self.modality_context = nn.Parameter(torch.Tensor(self.hidden_size, 1))\n self.modality_context.data.normal_(0, 0.05)\n self.A_conv = nn.Conv1d(a_dim, hidden_size, kernel_size=1, padding=0)\n self.V_conv = nn.Conv1d(v_dim, hidden_size, kernel_size=1, padding=0)\n self.L_conv = nn.Conv1d(l_dim, hidden_size, kernel_size=1, padding=0)\n self.rnn = self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True, )\n \n def atten_embd(self, a_input, v_input, l_input):\n a_input = a_input.unsqueeze(-2) # [batch_size, seq_len, 1, embd_dim]\n v_input = v_input.unsqueeze(-2)\n l_input = l_input.unsqueeze(-2)\n data = torch.cat([a_input, v_input, l_input], dim=-2) # [batch_size, seq_len, 3, embd_dim]\n batch_size, seq_len, _, embd_dim = data.size()\n proj_data = torch.tanh(self.mapping(data)) # [batch_size, seq_len, 3, hidden_size]\n weight = F.softmax(data @ self.modality_context, dim=-2) # [batch_size, seq_len, 3, 1]\n fusion = torch.sum(data * weight, dim=-2)\n return fusion\n\n def forward(self, a_input, v_input, l_input, states):\n '''\n Input size [batch_size, seq_len, embd_dim]\n '''\n a_input = self.A_conv(a_input.transpose(1, 2)).permute(0, 2, 1)\n v_input = self.V_conv(v_input.transpose(1, 2)).permute(0, 2, 1)\n l_input = self.L_conv(l_input.transpose(1, 2)).permute(0, 2, 1)\n fusion = self.atten_embd(a_input, v_input, l_input) # [batch_size, seq_len, embd_dim]\n r_out, (h_n, h_c) = self.rnn(fusion, states)\n return r_out, (h_n, h_c)\n\nclass AttentionFusionNet2(nn.Module):\n def __init__(self, a_dim, v_dim, l_dim, hidden_size):\n super(AttentionFusionNet2, self).__init__()\n self.a_dim = a_dim\n self.v_dim = v_dim\n self.l_dim = l_dim\n self.hidden_size = hidden_size\n self.mapping = nn.Linear(self.hidden_size, self.hidden_size)\n self.A_conv = nn.Conv1d(a_dim, hidden_size, kernel_size=1, padding=0)\n self.V_conv = nn.Conv1d(v_dim, hidden_size, kernel_size=1, padding=0)\n self.L_conv = nn.Conv1d(l_dim, hidden_size, kernel_size=1, padding=0)\n self.context_proj = nn.Linear(3 * hidden_size, hidden_size)\n self.rnn = self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True, )\n \n def atten_embd(self, a_input, v_input, l_input):\n batch_size, seq_len, embd_dim = a_input.size()\n context = torch.cat([a_input, v_input, l_input], dim=-1)\n context = torch.tanh(self.context_proj(context)).view(-1, self.hidden_size, 1) # [batch_size * seq_len, hidden_size, 1]\n _a_input = a_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]\n _v_input = v_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]\n _l_input = l_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]\n a_weight = torch.bmm(_a_input, context).view(batch_size, -1, 1) # [batch_size, seq_len, 1]\n v_weight = torch.bmm(_v_input, context).view(batch_size, -1, 1)\n l_weight = torch.bmm(_l_input, context).view(batch_size, -1, 1)\n weight = torch.cat([a_weight, v_weight, l_weight], dim=-1) # [batch_size, seq_len, 3]\n weight = F.softmax(weight, dim=-1).unsqueeze(-1)\n data = torch.cat([a_input.unsqueeze(-2), v_input.unsqueeze(-2), l_input.unsqueeze(-2)], dim=-2)\n fusion = torch.sum(data * weight, dim=-2)\n return fusion\n\n def forward(self, a_input, v_input, l_input, states):\n '''\n Input size [batch_size, seq_len, embd_dim]\n '''\n a_input = self.A_conv(a_input.transpose(1, 2)).permute(0, 2, 1)\n v_input = self.V_conv(v_input.transpose(1, 2)).permute(0, 2, 1)\n l_input = self.L_conv(l_input.transpose(1, 2)).permute(0, 2, 1)\n fusion = self.atten_embd(a_input, v_input, l_input) # [batch_size, seq_len, embd_dim]\n r_out, (h_n, h_c) = self.rnn(fusion, states)\n return r_out, (h_n, h_c)\n\n\n\"\"\"\nclass BiLSTMEncoder(nn.Module):\n ''' LSTM encoder\n '''\n def __init__(self, input_size, hidden_size, embd_size):\n super(LSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.embd_size = embd_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, bidirectional=True, batch_first=True)\n self.fc = nn.Sequential(\n nn.Linear(self.hidden_size*2, self.embd_size),\n nn.ReLU(),\n )\n\n def forward(self, x, length):\n batch_size = x.size(0)\n # x = pack_padded_sequence(x, length, batch_first=True, enforce_sorted=False)\n r_out, (h_n, h_c) = self.rnn(x)\n h_n = h_n.contiguous().view(batch_size, -1)\n embd = self.fc(h_n)\n return embd\n\nclass LSTMEncoder(nn.Module):\n ''' one directional LSTM encoder\n '''\n def __init__(self, input_size, hidden_size, embd_method='last'):\n super(LSTMEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True)\n assert embd_method in ['maxpool', 'attention', 'last']\n self.embd_method = embd_method\n\n if self.embd_method == 'maxpool':\n self.maxpool = nn.MaxPool1d(self.hidden_size)\n \n elif self.embd_method == 'attention':\n self.attention_vector_weight = nn.Parameter(torch.Tensor(hidden_size, 1))\n self.attention_layer = nn.Sequential(\n nn.Linear(self.hidden_size, self.hidden_size),\n nn.Tanh(),\n )\n self.softmax = nn.Softmax(dim=-1)\n\n def embd_attention(self, r_out, h_n):\n ''''\n 参考这篇博客的实现:\n https://blog.csdn.net/dendi_hust/article/details/94435919\n https://blog.csdn.net/fkyyly/article/details/82501126\n 论文:Hierarchical Attention Networks for Document Classification\n formulation: lstm_output*softmax(u * tanh(W*lstm_output + Bias)\n W and Bias 是映射函数,其中 Bias 可加可不加\n u 是 attention vector 大小等于 hidden size\n '''\n hidden_reps = self.attention_layer(r_out) # [batch_size, seq_len, hidden_size]\n atten_weight = (hidden_reps @ self.attention_vector_weight) # [batch_size, seq_len, 1]\n atten_weight = self.softmax(atten_weight) # [batch_size, seq_len, 1]\n # [batch_size, seq_len, hidden_size] * [batch_size, seq_len, 1] = [batch_size, seq_len, hidden_size]\n sentence_vector = torch.sum(r_out * atten_weight, dim=1) # [batch_size, hidden_size]\n return sentence_vector\n\n def embd_maxpool(self, r_out, h_n):\n embd = self.maxpool(r_out.transpose(1,2)) # r_out.size()=>[batch_size, seq_len, hidden_size]\n # r_out.transpose(1, 2) => [batch_size, hidden_size, seq_len]\n return embd.squeeze()\n\n def embd_last(self, r_out, h_n):\n #Just for one layer and single direction\n return h_n.squeeze()\n\n def forward(self, x):\n '''\n r_out shape: seq_len, batch, num_directions * hidden_size\n hn and hc shape: num_layers * num_directions, batch, hidden_size\n '''\n r_out, (h_n, h_c) = self.rnn(x)\n embd = getattr(self, 'embd_'+self.embd_method)(r_out, h_n)\n return embd\n\"\"\"\n\nif __name__ == '__main__':\n # model = AttentionFusionNet2(100, 200, 300, 128)\n # a_input = torch.rand(12, 30, 100)\n # v_input = torch.rand(12, 30, 200)\n # l_input = torch.rand(12, 30, 300)\n # state = (torch.zeros(1, 12, 128), torch.zeros(1, 12, 128))\n # r_out, (h_n, h_c) = model(a_input, v_input, l_input, state)\n # print(r_out.shape)\n\n model = AttentiveLSTMEncoder(345, 256)\n input = torch.rand(32, 300, 345)\n out, _ = model(input)\n print(out.shape)" ]
[ [ "torch.sum", "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.functional.softmax", "torch.rand", "torch.bmm", "torch.nn.Conv1d", "torch.nn.ReLU", "torch.nn.LayerNorm", "torch.nn.Sigmoid", "torch.cat", "torch.Tensor" ] ]