repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
rubenandrebarreiro/fct-nova-machine-learning-labs
[ "3ad34c4f49d7acfef04c757dc3317da6c717c8c1" ]
[ "tutorials/tutorial-3/files/t3_aux.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nAuxiliary code for tutorial 3\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\n\ndef poly_16features(X):\n \"\"\"Expand data polynomially\n \"\"\"\n X_exp = np.zeros((X.shape[0],X.shape[1]+14))\n X_exp[:,:2] = X \n X_exp[:,2] = X[:,0]*X[:,1]\n X_exp[:,3] = X[:,0]**2\n X_exp[:,4] = X[:,1]**2\n X_exp[:,5] = X[:,0]**3\n X_exp[:,6] = X[:,1]**3\n X_exp[:,7] = X[:,0]**2*X[:,1]\n X_exp[:,8] = X[:,1]**2*X[:,0]\n X_exp[:,9] = X[:,0]**4\n X_exp[:,10] = X[:,1]**4\n X_exp[:,11] = X[:,0]**3*X[:,1]\n X_exp[:,12] = X[:,1]**3*X[:,0]\n X_exp[:,13] = X[:,0]**2*X[:,1]**2\n X_exp[:,14] = X[:,0]**5\n X_exp[:,15] = X[:,1]**5 \n return X_exp\n\ndef poly_mat(reg,X_data,feats,ax_lims):\n \"\"\"create score matrix for contour\n \"\"\"\n Z = np.zeros((200,200))\n xs = np.linspace(ax_lims[0],ax_lims[1],200)\n ys = np.linspace(ax_lims[2],ax_lims[3],200)\n X,Y = np.meshgrid(xs,ys)\n points = np.zeros((200,2))\n points[:,0] = xs\n for ix in range(len(ys)):\n points[:,1] = ys[ix]\n x_points=poly_16features(points)[:,:feats]\n Z[ix,:] = reg.decision_function(x_points)\n return (X,Y,Z)\n\ndef create_plot(plt, ax_lims, alpha_factor, X_r, Y_r, X_t, Y_t, feats, best_c):\n \"\"\"create image with plot for best classifier\"\"\"\n reg = LogisticRegression(C=best_c, tol=1e-10)\n reg.fit(X_r[:,:feats],Y_r)\n plotX,plotY,Z = poly_mat(reg,X_r,feats,ax_lims)\n plt.contourf(plotX,plotY,Z,[-1e16,0,1e16], colors = ('b', 'r'),alpha=alpha_factor)\n plt.contour(plotX,plotY,Z,[0], colors = ('k'))\n plt.plot(X_r[Y_r>0,0],X_r[Y_r>0,1],'or')\n plt.plot(X_r[Y_r<=0,0],X_r[Y_r<=0,1],'ob')\n plt.plot(X_t[Y_t>0,0],X_t[Y_t>0,1],'or',mew=2)\n plt.plot(X_t[Y_t<=0,0],X_t[Y_t<=0,1],'ob',mew=2)\n " ]
[ [ "matplotlib.pyplot.contourf", "sklearn.linear_model.LogisticRegression", "numpy.linspace", "matplotlib.pyplot.plot", "matplotlib.pyplot.contour", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shihara-Dilshan/SinhalaCharacterRecogizer
[ "0662cdd8b8ea8911fc77bba3fea131bd02c22f0f" ]
[ "researchData/createCsv/images/read.py" ]
[ "import numpy as np\nimport pandas as pd\n\ndata = pd.read_csv(\"data.csv\", header=None, sep=\"\\t\")\nprint(data.shape)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
pzharrington/ExaGAN
[ "b86ad31d1581d3bc474fe98d7838bed5af0e8b62" ]
[ "networks/GANbuild.py" ]
[ "import numpy as np\nimport keras\nfrom keras.layers import *\nfrom keras.activations import relu\nfrom keras.models import Model, Sequential\nfrom keras.models import load_model\nimport keras.backend as K\nimport time\nimport sys\nsys.path.append('./utils')\nsys.path.append('./networks')\nimport logging\nimport logging_utils\nfrom parameters import load_params\nimport plots\nimport tboard\nfrom DeconvLayer_output_shape import MyConv2DTranspose\n\n\n\n\nclass DCGAN:\n \n def __init__(self, configtag, expDir):\n\n # Load hyperparmeters\n self.configtag = configtag\n logging.info('Parameters:')\n self.init_params(configtag)\n self.expDir = expDir\n self.inits = {'dense':keras.initializers.RandomNormal(mean=0.0, stddev=0.02),\n 'conv':keras.initializers.TruncatedNormal(mean=0.0, stddev=0.02),\n 'tconv':keras.initializers.RandomNormal(mean=0.0, stddev=0.02)}\n\n # Import slices\n self.real_imgs = np.load('./data/'+self.dataname+'_train.npy')\n self.val_imgs = np.load('./data/'+self.dataname+'_val.npy')\n self.n_imgs = self.real_imgs.shape[0]\n if self.datafmt == 'channels_first':\n self.real_imgs = np.moveaxis(self.real_imgs, -1, 1)\n self.val_imgs = np.moveaxis(self.val_imgs, -1, 1)\n self.set_transf_funcs()\n self.set_transf_funcs_tensor()\n self.real_imgs = self.transform(self.real_imgs)\n \n\n\n # Build networks\n self.discrim = self.build_discriminator()\n self.genrtor = self.build_generator()\n\n # Custom loss/metrics\n def mean_prob(y_true, y_pred):\n '''metric to measure mean probability of D predictions (0=fake, 1=real)'''\n return K.mean(K.sigmoid(y_pred))\n def crossentropy_from_logits(y_true, y_pred):\n '''crossentropy loss from logits (circumvents default Keras crossentropy loss, which is unstable)'''\n return K.mean(K.binary_crossentropy(y_true, y_pred, from_logits=True), axis=-1)\n \n # Compile discriminator so it can be trained separately\n self.discrim.compile(loss=crossentropy_from_logits, \n optimizer=keras.optimizers.Adam(lr=self.D_lr, beta_1=0.5),\n metrics=[mean_prob])\n\n # Stack generator and discriminator networks together and compile\n z = Input(shape=(1,self.noise_vect_len))\n genimg = self.genrtor(z)\n self.discrim.trainable = False\n decision = self.discrim(genimg)\n self.stacked = Model(z, decision)\n self.stacked.compile(loss=crossentropy_from_logits, optimizer=keras.optimizers.Adam(lr=self.G_lr, beta_1=0.5))\n\n # Setup tensorboard stuff\n self.TB_genimg = tboard.TboardImg('genimg')\n self.TB_pixhist = tboard.TboardImg('pixhist')\n self.TB_pspect = tboard.TboardImg('pspect')\n self.TB_scalars = tboard.TboardScalars()\n\n\n def build_discriminator(self):\n \n dmodel = Sequential()\n for lyrIdx in range(self.nlayers):\n if lyrIdx==0:\n convlayer = Conv2D(filters=self.nconvfilters[lyrIdx], kernel_size=self.convkern, \n strides=self.convstride, padding='same', input_shape=self.imshape, \n data_format=self.datafmt, kernel_initializer=self.inits['conv'])\n else:\n convlayer = Conv2D(filters=self.nconvfilters[lyrIdx], kernel_size=self.convkern, \n strides=self.convstride, padding='same', data_format=self.datafmt, \n kernel_initializer=self.inits['conv'])\n dmodel.add(convlayer)\n dmodel.add(BatchNormalization(epsilon=1e-5, momentum=0.9, axis=self.C_axis))\n dmodel.add(LeakyReLU(alpha=self.alpha))\n dmodel.add(Flatten(data_format=self.datafmt))\n dmodel.add(Dense(1, kernel_initializer=self.inits['dense']))\n dmodel.summary(print_fn=logging_utils.print_func)\n img = Input(shape=self.imshape)\n return Model(img, dmodel(img))\n\n\n def build_generator(self):\n fmapsize = self.img_dim//int(2**self.nlayers)\n gmodel = Sequential()\n gmodel.add(Dense(fmapsize*fmapsize*2*self.ndeconvfilters[0], input_shape=(1,self.noise_vect_len),\n kernel_initializer=self.inits['dense']))\n gmodel.add(BatchNormalization(epsilon=1e-5, momentum=0.9, axis=self.C_axis))\n gmodel.add(ReLU())\n if self.datafmt == 'channels_last':\n fmapshape = (fmapsize, fmapsize, 2*self.ndeconvfilters[0])\n else:\n fmapshape = (2*self.ndeconvfilters[0], fmapsize, fmapsize)\n gmodel.add(Reshape(fmapshape))\n for lyrIdx in range(self.nlayers):\n gmodel.add(MyConv2DTranspose(self.ndeconvfilters[lyrIdx], self.convkern, strides=self.convstride,\n padding='same', data_format=self.datafmt, kernel_initializer=self.inits['tconv'])) \n if lyrIdx == self.nlayers - 1:\n gmodel.add(Activation('tanh')) # last layer is deconv+tanh\n else:\n gmodel.add(BatchNormalization(epsilon=1e-5, momentum=0.9, axis=self.C_axis)) # hidden layers are deconv+batchnorm+relu\n gmodel.add(ReLU())\n gmodel.summary(print_fn=logging_utils.print_func)\n noise = Input(shape=(1,self.noise_vect_len)) \n if self.multichannel:\n # multi-channel re-scaling (currently just one extra channel)\n CH1 = gmodel(noise)\n raw_rescale = Lambda(lambda inp: self.invtransform_tensor(inp)/self.linear_scaler)(CH1)\n CH2 = Activation('tanh')(raw_rescale)\n result = Concatenate(axis=self.C_axis)([CH1, CH2])\n return Model(noise, result)\n else:\n return Model(noise, gmodel(noise))\n\n\n def init_params(self,configtag):\n \n params = load_params('./config.yaml', configtag)\n logging.info(str(params))\n self.dataname = params['dataname']\n self.img_dim = params['img_dim']\n self.noise_vect_len = params['noise_vect_len']\n self.nlayers = params['nlayers']\n self.convkern = params['convkern']\n self.convstride = params['convstride']\n self.nconvfilters = params['nconvfilters']\n self.ndeconvfilters = params['ndeconvfilters']\n self.label_flip = params['label_flip']\n self.batchsize = params['batchsize'] \n self.print_batch = params['print_batch']\n self.checkpt_batch = params['checkpt_batch']\n self.cscale = params['cscale']\n self.datascale = params['datascale']\n self.G_lr, self.D_lr = params['learn_rate']\n self.DG_update_ratio = params['DG_update_ratio']\n self.Nepochs = params['Nepochs']\n self.datafmt = params['datafmt']\n nchannels = 1\n self.multichannel = params['multichannel']\n if self.multichannel:\n self.linear_scaler = 1000.\n nchannels = 2\n self.alpha = 0.2\n self.start = 0\n self.bestchi = np.inf\n self.bestchi_pspect = np.inf\n if self.datafmt == 'channels_last':\n self.C_axis = -1\n self.imshape = (self.img_dim, self.img_dim, nchannels)\n else:\n self.C_axis = 1\n self.imshape = (nchannels, self.img_dim, self.img_dim)\n\n def train_epoch(self, shuffler, num_batches, epochIdx):\n \n d_losses = []\n d_real_losses = []\n d_fake_losses = []\n g_losses = []\n\n for batch in range(num_batches):\n iternum = (epochIdx*num_batches + batch)\n t1 = time.time()\n\n real_img_batch = self.real_imgs[shuffler[batch*self.batchsize:(batch+1)*self.batchsize]]\n if self.multichannel:\n CH2 = np.tanh(self.invtransform(real_img_batch)/self.linear_scaler)\n real_img_batch = np.concatenate((real_img_batch, CH2), axis=self.C_axis)\n noise_vects = np.random.normal(loc=0.0, size=(self.batchsize, 1, self.noise_vect_len))\n fake_img_batch = self.genrtor.predict(noise_vects)\n reals = np.ones((self.batchsize,1))\n fakes = np.zeros((self.batchsize,1))\n for i in range(reals.shape[0]):\n if np.random.uniform(low=0., high=1.0) < self.label_flip:\n reals[i,0] = 0\n fakes[i,0] = 1\n\n # train discriminator\n for iters in range(self.DG_update_ratio//2):\n discr_real_loss = self.discrim.train_on_batch(real_img_batch, reals)\n discr_fake_loss = self.discrim.train_on_batch(fake_img_batch, fakes)\n discr_loss = 0.5*(discr_real_loss[0]+discr_fake_loss[0])\n d_losses.append(discr_loss)\n d_real_losses.append(discr_real_loss[0])\n d_fake_losses.append(discr_fake_loss[0]) \n\n # train generator via stacked model\n genrtr_loss = self.stacked.train_on_batch(noise_vects, np.ones((self.batchsize,1)))\n t2 = time.time()\n\n g_losses.append(genrtr_loss)\n\n \n t2 = time.time()\n\n if batch%self.print_batch == 0:\n logging.info(\"| --- batch %d of %d --- |\"%(batch + 1, num_batches))\n logging.info(\"|Discr real pred=%f, fake pred=%f\"%(discr_real_loss[1], discr_fake_loss[1]))\n logging.info(\"|Discriminator: loss=%f\"%(discr_loss))\n logging.info(\"|Generator: loss=%f\"%(genrtr_loss))\n logging.info(\"|Time: %f\"%(t2-t1))\n if iternum%self.checkpt_batch == 0:\n # Tensorboard monitoring\n iternum = iternum/self.checkpt_batch\n self.TB_genimg.on_epoch_end(iternum, self)\n chisq = self.TB_pixhist.on_epoch_end(iternum, self)\n chisq_pspect = self.TB_pspect.on_epoch_end(iternum, self)\n scalars = {'d_loss':np.mean(d_losses), 'd_real_loss':np.mean(d_real_losses),\n 'd_fake_loss':np.mean(d_fake_losses), 'g_loss':np.mean(g_losses), \n 'chisq':chisq, 'chisq_pspect':chisq_pspect}\n self.TB_scalars.on_epoch_end(self, iternum, scalars)\n\n d_losses = []\n d_real_losses = []\n d_fake_losses = []\n g_losses = []\n \n if chisq<self.bestchi and iternum>30:\n # update best chi-square score and save\n self.bestchi = chisq\n self.genrtor.save_weights(self.expDir+'models/g_cosmo_best.h5')\n self.discrim.save_weights(self.expDir+'models/d_cosmo_best.h5')\n logging.info(\"BEST saved at %d, chi=%f\"%(iternum, chisq))\n if chisq_pspect < self.bestchi_pspect and iternum>30:\n self.bestchi_pspect = chisq_pspect\n self.genrtor.save_weights(self.expDir+'models/g_cosmo_best_pspect.h5')\n self.discrim.save_weights(self.expDir+'models/d_cosmo_best_pspect.h5')\n logging.info(\"BEST_PSPECT saved at %d, chi=%f\"%(iternum, chisq_pspect))\n\n\n def set_transf_funcs(self):\n def transform(x):\n return np.divide(2.*x, x + self.datascale) - 1.\n def invtransform(s):\n return self.datascale*np.divide(1. + s, 1. - s)\n self.transform = transform\n self.invtransform = invtransform\n\n def set_transf_funcs_tensor(self):\n def transform(x):\n return 2.*x/(x + self.datascale) - 1.\n def invtransform(s):\n return self.datascale*(1. + s)/(1. - s)\n self.transform_tensor = transform\n self.invtransform_tensor = invtransform\n\n\n\n\n\n" ]
[ [ "numpy.ones", "numpy.concatenate", "numpy.random.uniform", "numpy.random.normal", "numpy.mean", "numpy.moveaxis", "numpy.load", "numpy.zeros", "numpy.divide" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GaniAliguzhinov/tensorMultBenchmarks
[ "352e5d73e784ce346be0c48ca1174821164ac1b2" ]
[ "allocMult.py" ]
[ "import numpy\nimport timeit\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport tensorflow as tf\n\n\n\"\"\"\nhttps://sanjayasubedi.com.np/python/efficient-matrix-multiplication-in-python/\n\"\"\"\nglobal A, B\n\nglobal n\n\ndef allocate(n):\n global A\n global B\n A = numpy.random.rand(n, n).astype(numpy.float32)\n B = numpy.random.rand(n, n).astype(numpy.float32)\n\ndef matmul1(a, b):\n n = a.shape[0]\n res = numpy.empty(a.shape)\n for i in range(n):\n for j in range(n):\n res[i,j] = numpy.dot(a[i,:], b[:,j])\n return res\n\ndef matmul2(a,b):\n res = numpy.empty(a.shape)\n n = a.shape[0]\n for i in range(n):\n res[i] = numpy.dot(a[i], b)\n\ndef matmul3(a,b):\n return tf.matmul(a,b)\n\nfor i in range(100):\n n = 100+i*10\n timer = timeit.Timer(\"allocate(n)\", \"import numpy; from __main__ import allocate, n\")\n t = 1000.0*timer.repeat(1,1)[0]\n timer = timeit.Timer(\"matmul3(A, B)\", \"import tensorflow as tf; from __main__ import A,B,matmul3\")\n print(\"{:.3f} \".format(t+1000.0*numpy.min(timer.repeat(10, 1))), end='')\n" ]
[ [ "numpy.dot", "tensorflow.matmul", "numpy.random.rand", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
duongkstn/my-tod
[ "4ac82405d22b79e6eb9d3680b4e607c8f25c0c68" ]
[ "utils/utils_schema.py" ]
[ "import json\nimport ast\nimport collections\nimport os\n\nimport pandas as pd\n\nfrom .utils_function import get_input_example\n\n\ndef read_langs_turn(args, dial_files, max_line=None, ds_name=\"\"):\n print((\"Reading from {} for read_langs_turn\".format(ds_name)))\n\n data = []\n\n cnt_lin = 1\n need_translate = []\n for dial_file in dial_files:\n\n f_dials = open(dial_file, \"r\")\n\n dials = json.load(f_dials)\n\n turn_sys = \"\"\n turn_usr = \"\"\n\n for dial_dict in dials:\n dialog_history = []\n for ti, turn in enumerate(dial_dict[\"turns\"]):\n need_translate.append(turn[\"utterance\"].strip())\n if turn[\"speaker\"] == \"USER\":\n turn_usr = turn[\"utterance\"].lower().strip()\n data_detail = get_input_example(\"turn\")\n data_detail[\"ID\"] = \"{}-{}\".format(ds_name, cnt_lin)\n data_detail[\"turn_id\"] = ti % 2\n data_detail[\"turn_usr\"] = turn_usr\n data_detail[\"turn_sys\"] = turn_sys\n data_detail[\"dialog_history\"] = list(dialog_history)\n\n if not args[\"only_last_turn\"]:\n data.append(data_detail)\n\n dialog_history.append(turn_sys)\n dialog_history.append(turn_usr)\n\n elif turn[\"speaker\"] == \"SYSTEM\":\n turn_sys = turn[\"utterance\"].lower().strip()\n\n if args[\"only_last_turn\"]:\n data.append(data_detail)\n\n cnt_lin += 1\n if max_line and cnt_lin >= max_line:\n break\n need_translate = list(set(need_translate))\n need_translate = sorted(need_translate)\n pd.DataFrame({'text': need_translate}).to_excel(\"/media/user/Data/ToD-BERT/TODBERT_dialog_datasets/dialog_datasets/Translated/train_dumpdata_tod_schema.xlsx\", index=False)\n exit(0)\n return data\n\n\ndef read_langs_dial(file_name, ontology, dialog_act, max_line=None, domain_act_flag=False):\n print((\"Reading from {} for read_langs_dial\".format(file_name)))\n raise NotImplementedError\n\n\ndef prepare_data_schema(args):\n ds_name = \"Schema\"\n\n example_type = args[\"example_type\"]\n max_line = args[\"max_line\"]\n\n onlyfiles_trn = [\n os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/train/{}\".format(f))\n for f in os.listdir(os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/train/\"))\n if \"dialogues\" in f\n ]\n onlyfiles_dev = [\n os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/dev/{}\".format(f))\n for f in os.listdir(os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/dev/\"))\n if \"dialogues\" in f\n ]\n onlyfiles_tst = [\n os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/test/{}\".format(f))\n for f in os.listdir(os.path.join(args[\"data_path\"], \"dstc8-schema-guided-dialogue/test/\"))\n if \"dialogues\" in f\n ]\n\n _example_type = \"dial\" if \"dial\" in example_type else example_type\n pair_trn = globals()[\"read_langs_{}\".format(_example_type)](args, onlyfiles_trn, max_line, ds_name)\n pair_dev = globals()[\"read_langs_{}\".format(_example_type)](args, onlyfiles_dev, max_line, ds_name)\n pair_tst = globals()[\"read_langs_{}\".format(_example_type)](args, onlyfiles_tst, max_line, ds_name)\n\n print(\"Read {} pairs train from {}\".format(len(pair_trn), ds_name))\n print(\"Read {} pairs valid from {}\".format(len(pair_dev), ds_name))\n print(\"Read {} pairs test from {}\".format(len(pair_tst), ds_name))\n\n meta_data = {\"num_labels\": 0}\n\n return pair_trn, pair_dev, pair_tst, meta_data\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
paolotron/Drop-Geo
[ "140badaba61ee33eb31ba92609739cb9b68a0d0f" ]
[ "source/util.py" ]
[ "import torch\nimport shutil\nfrom os.path import join\n\n\ndef save_checkpoint(args, state, is_best, filename):\n model_path = join(args.output_folder, filename)\n torch.save(state, model_path)\n if is_best:\n shutil.copyfile(model_path, join(args.output_folder, \"best_model.pth\"))\n" ]
[ [ "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
UKPLab/acl2018-msr-workshop-binlin
[ "9b8021dfa14a8bc131df117fa9985699fc8cedea" ]
[ "components/nlgen/morph_rnn_algo.py" ]
[ "from components.utils.graph import dg_from_tokens\nfrom components.utils.readers import ConllFileReader, UDConllDataProcessor\nfrom components.data.base_morph_data import BaseMorphData\nfrom components.utils.tensors import cuda_if_gpu\nfrom torch.autograd import Variable\nimport torch\nfrom components.nlgen.base_algo import BaseAlgo\nimport logging\nfrom components.constants import MORPH_OUTPUT_CLASSES_START\nfrom components.utils.serialization import save_txt\nfrom components.constants import EOS_ID\nfrom components.data.base_data import pad_seq\n\nlogger = logging.getLogger('main')\n\nclass MorphRNNAlgo(BaseAlgo):\n\n def to_be_copied_verbatim(self, lemma, model, vocab):\n if (len(lemma) > model.max_src_len):\n return True\n\n for ch in lemma:\n if ch not in vocab.tok2id:\n return True\n\n return False\n\n def predict_from_raw_data(self, model, raw_data, vocab):\n\n model.eval()\n\n self.predictions = []\n self.targets = []\n self.error_analysis_data = []\n\n lemmas, forms, feat_dicts = BaseMorphData.extract_lemmas_forms_feature_dicts(raw_data)\n logger.info('Number of raw instances: %d', len(lemmas))\n\n for idx, lemma in enumerate(lemmas):\n feat_d = feat_dicts[idx]\n target = forms[idx]\n\n if self.to_be_copied_verbatim(lemma, model, vocab):\n predicted_form = lemma\n else:\n predicted_form = self.predict_one(model, (lemma, feat_d), vocab)\n\n self.targets.append(target)\n self.predictions.append(predicted_form)\n\n if predicted_form.lower() != target.lower():\n self.error_analysis_data.append((lemma, target, predicted_form))\n\n return self.predictions\n\n def predict_one(self, model, datum, vocab):\n \"\"\"\n\n :param model:\n :param datum: a (lemma chars)\n :param vocab:\n :return:\n \"\"\"\n\n lemma, feats = datum\n\n x_enc_inputs = BaseMorphData.lemma_chars_to_ids(lemma, vocab)\n x_feature_ids = BaseMorphData.feat_d_to_ids(feats, vocab)\n\n # TODO: this works only with batch=1; consider batching and transposition instead of viewing\n x_enc_inputs_v = cuda_if_gpu(Variable(torch.LongTensor(x_enc_inputs)).view(-1,1))\n x_feature_ids_v = cuda_if_gpu(Variable(torch.LongTensor(x_feature_ids)).view(-1,1))\n\n pred_form_char_ids = model.predict_one_instance((x_enc_inputs_v, x_feature_ids_v))\n pred_form_chars = BaseMorphData.unvectorize_seq(pred_form_char_ids, vocab)\n pred_form = ''.join([ch for ch in pred_form_chars if not ch in MORPH_OUTPUT_CLASSES_START])\n\n return pred_form\n\n def predict_from_dgnode(self, model, vocab, dg, node_id):\n node_attrs = dg.node[node_id]\n lemma = node_attrs['LEMMA']\n feat_d = BaseMorphData.extract_feature_dict(node_attrs)\n\n if self.to_be_copied_verbatim(lemma, model, vocab):\n predicted_form = lemma\n else:\n predicted_form = self.predict_one(model, (lemma, feat_d), vocab)\n\n return predicted_form\n\n def save_predictions(self, predictions, fname):\n\n assert type(predictions[0]) == str, \\\n 'Predictions are not strings -- consider re-implementing the method!'\n\n logger.debug('Saving Morph predictions to --> %s', fname)\n save_txt(predictions, fname)\n\n def save_dev_references(self, fname):\n logger.debug('Saving Morph references to --> %s', fname)\n save_txt(self.targets, fname)\n\ncomponent=MorphRNNAlgo" ]
[ [ "torch.LongTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zemarchezi/pyMilne
[ "62567c4d843227cc8de4ad1c3eddd485edd094f2" ]
[ "MilneEddington.py" ]
[ "import numpy as np\nimport pyMilne\n\n\nclass MilneEddington:\n \"\"\"\n MilneEddington class\n\n Purpose: Implementation of a parallel Milne-Eddington solver with analytical response functions\n Coded in C++/python by J. de la Cruz Rodriguez (ISP-SU, 2020)\n \n References:\n Landi Degl'Innocenti & Landolfi (2004)\n Orozco Suarez & del Toro Iniesta (2007)\n\n \"\"\"\n\n # *************************************************************************************************\n\n def _initLine(self, label, anomalous, dw, precision):\n if(precision == 'float64'):\n if(label == 6301):\n return pyMilne.pyLines(j1 = 2.0, j2 = 2.0, g1 = 1.84, g2 = 1.50, cw = 6301.4995, gf = 10.**-0.718, anomalous = anomalous, dw = dw)\n elif(label == 6302):\n return pyMilne.pyLines(j1 = 1.0, j2 = 0.0, g1 = 2.49, g2 = 0.00, cw = 6302.4931, gf = 10.**-0.968, anomalous = anomalous, dw = dw)\n elif(label == 6173):\n return pyMilne.pyLines(j1 = 1.0, j2 = 0.0, g1 = 2.50, g2 = 0.00, cw = 6173.3340, gf = 10.**-2.880, anomalous = anomalous, dw = dw)\n else:\n print(\"pyLines::setLine: Error line with label {0 } is not implented\".format(label))\n return pyMilne.pyLines()\n else:\n if(label == 6301):\n return pyMilne.pyLinesf(j1 = 2.0, j2 = 2.0, g1 = 1.84, g2 = 1.50, cw = 6301.4995, gf = 10.**-0.718, anomalous = anomalous, dw = dw)\n elif(label == 6302):\n return pyMilne.pyLinesf(j1 = 1.0, j2 = 0.0, g1 = 2.49, g2 = 0.00, cw = 6302.4931, gf = 10.**-0.968, anomalous = anomalous, dw = dw)\n elif(label == 6173):\n return pyMilne.pyLinesf(j1 = 1.0, j2 = 0.0, g1 = 2.50, g2 = 0.00, cw = 6173.3340, gf = 10.**-2.880, anomalous = anomalous, dw = dw)\n else:\n print(\"pyLines::setLine: Error line with label {0 } is not implented\".format(label))\n return pyMilne.pyLinesf()\n \n # *************************************************************************************************\n\n def _get_dtype(self):\n num = self.Me.get_dtype()\n\n if(num == 4): return 'float32'\n else: return 'float64'\n \n # *************************************************************************************************\n\n def _getLines(self, labels, anomalous, dw, precision):\n\n nLines = len(labels)\n lines = [None]*nLines\n\n for ii in range(nLines):\n lines[ii] = self._initLine(labels[ii], anomalous, dw, precision)\n\n return lines\n \n # *************************************************************************************************\n\n def __init__(self, regions, lines, anomalous=True, dw_lines = 20, nthreads=1, precision = 'float32'):\n \"\"\"\n __init__ method\n \n Arguments:\n regions: it is a list that contains lists with region information [[wav1, psf1], [wav2, psf2]]\n where wav1, wav2, psf1, psf2 are float64 numpy arrays. If no PSF is desired, use None.\n\n lines: list with the labels of lines to be used (defined in _initLine).\n\n anomalous: If True, all Zeeman components are calculated for each spectral lines.\n\n dw_lines: spectral window +/- dw from line center to compute the line profile. Outside that window the profile won't be calculated.\n Given in km/s (default 20 km/s)\n\n nthreads: number of threads to be used when synthesizing or inverting. Only relevant if there is \n more than 1 pixel.\n \n \"\"\"\n error = False\n \n # check regions\n for ii in range(len(regions)):\n if(len(regions[ii]) != 2):\n print(\"MilneEddington::__init__: ERROR, region {0} has {1} elements, should have 2!\".format(ii, len(regions[ii])))\n error = True\n\n if(error):\n return None\n \n # Init C++ object\n pyLines = self._getLines(lines, anomalous, dw_lines, precision)\n\n if(precision == 'float32'):\n self.Me = pyMilne.pyMilne_float(regions, pyLines, nthreads=nthreads, anomalous=anomalous)\n else:\n self.Me = pyMilne.pyMilne(regions, pyLines, nthreads=nthreads, anomalous=anomalous)\n\n\n # *************************************************************************************************\n\n def synthesize(self, model, mu = 1.0):\n \"\"\"\n synthesize spectra for a given model at a mu angle\n Arguments:\n model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model\n mu: heliocentric angle for the synthesis\n\n The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\\AA], eta_l, damp, S0, S1]\n\n Returns:\n 4D array [ny,nx,4,nwaw] with the emerging intensity\n \"\"\"\n ndim = len(model.shape)\n dtype = self._get_dtype()\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions\")\n return None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n \n \n return self.Me.synthesize(model1, mu=mu)\n\n\n # *************************************************************************************************\n\n\n def get_wavelength_array(self):\n \"\"\"\n get_wavelength_array returns the total wavelength array 1D (regions are concatenated)\n \n \"\"\"\n return self.Me.get_wavelength_array()\n\n\n # *************************************************************************************************\n \n def synthesize_rf(self, model, mu=1.0):\n \"\"\"\n synthesize the spectra and analytical response functions for a given model at a mu angle\n Arguments:\n model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model\n mu: heliocentric angle for the synthesis\n\n The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\\AA], eta_l, damp, S0, S1]\n\n Returns:\n a tuple (spectra, response_function)\n spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity\n response_function: 5D array [ny, ny, 9, 4, nwav]\n \"\"\"\n ndim = len(model.shape)\n dtype = self._get_dtype()\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize_rf: ERROR, the input model must have 1 or 3 dimensions\")\n return None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize_rf: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n \n \n return self.Me.synthesize_RF(model1, mu=mu)\n\n # *************************************************************************************************\n \n def invert(self, model, obs, sig = 1.e-3, mu = 1.0, nRandom = 3, nIter = 20, chi2_thres = 1.0, verbose = False):\n \"\"\"\n invert observations acquired at a given mu angle\n Arguments:\n model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model\n obs: 2D [4,nwav] or 4D array [ny,nx,4,nwav] with the observed profiles. Should be normalized to the mean continuum.\n sig: scalar or 2D array [4,nwav] with the noise estimate\n\n mu: heliocentric angle for the synthesis\n nRandom: if larger than 1, the input model parameters will be randomized and more inversion will be performed\n to avoid converging to a local minimum. The best fit will be returned\n nIter: maximum number of Levenberg Marquardt iterations per inversion\n chi2_thres: stop inversion if Chi2 <= chi2_thres\n verbose: only used if nthreads=1, printsout info of each LM iteration\n\n The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\\AA], eta_l, damp, S0, S1]\n\n Returns:\n a tuple (spectra, response_function)\n spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity\n response_function: 5D array [ny, ny, 9, 4, nwav]\n \"\"\"\n #\n # Check guessed model properties\n #\n ndim = len(model.shape)\n dtype = self._get_dtype()\n\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions\")\n return None, None, None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None, None, None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n\n \n #\n # Check observations\n #\n ndim = len(obs.shape)\n\n if(ndim == 2):\n obs1 = np.ascontiguousarray(model.reshape((1,1,obs.shape[0], obs.shape[1])), dtype=dtype)\n elif(ndim == 4):\n obs1 = obs\n else:\n print(\"MilneEddington::invert: ERROR, the input observations must have 2 or 4 dimensions\")\n return None, None, None\n\n \n wav = self.Me.get_wavelength_array()\n nwav = wav.size\n if(obs1.shape[3] != nwav):\n print(\"MilneEddington::invert: ERROR, input observations has nwav={0}, should be nwav={1}\".format(obs1.shape[3], nwav))\n return None, None, None\n\n isContiguous = obs1.flags['C_CONTIGUOUS']\n if(not isContiguous or obs1.dtype != dtype):\n obs1 = np.ascontiguousarray(obs1, dtype=dtype)\n\n \n \n #\n # Check sigma\n #\n if isinstance(sig, np.ndarray):\n if(sig.shape[1] != nwav):\n print(\"MilneEddington::invert: sigma array has nwav={0}, but it should be {1}\".format(sigma.shape[1], nwav))\n return None, None, None\n\n sig1 = np.zeros((4,nwav), dtype=dtype, order='c')\n sig1[:] = sig\n\n else:\n sig1 = np.zeros((4,nwav), dtype=dtype, order='c')\n sig1[:] = sig \n \n \n #\n # Call C++ module\n #\n return self.Me.invert(model1, obs1, sig1, mu=mu, nRandom=nRandom, nIter = nIter, chi2_thres = chi2_thres, verbose=verbose)\n \n # *************************************************************************************************\n\n def get_a_guessed_model(self, ny=1, nx=1):\n iPar = np.float64([750, 1.0, 0.39, 0.25, 0.02, 30., 0.1, 0.8, 0.2])\n dtype = self._get_dtype()\n\n res = np.zeros((ny, nx, 9), dtype = dtype, order='c')\n for ii in range(9):\n res[:,:,ii] = iPar[ii]\n return res\n \n # *************************************************************************************************\n\n def repeat_model(self, m_in, ny, nx):\n \"\"\"\n This routine repeats a 1D model over an entire FOV with dimensions ny, nx pixels\n m_in must have 9 elements\n \"\"\"\n dtype = self._get_dtype()\n\n res = np.zeros((ny, nx, 9), dtype = dtype, order='c')\n m = m_in.squeeze()\n \n nPar = m.shape[0]\n if(nPar != 9):\n print(\"MilneEddington::repeat_model: Error, input model must have 9 elements!\")\n return None\n\n for ii in range(9):\n res[:,:,ii] = m[ii]\n \n return res\n\n\n # *************************************************************************************************\n \n def estimate_uncertainties(self, model, obs, sig, mu=1.0):\n \"\"\"\n estimates uncertainties based on the quality of the fit\n and the parameters sensitivity.\n\n Model: output model from the inversion [ny, nx, 9]\n Obs : Observed profiles [ny, nx, 4, nwav]\n sig : Noise estimate 1D or 2D [4,nwav]\n\n returns the uncertainty estimate per parameter per pixel [ny, nx, 9]\n\n Reference: del Toro Iniesta (2003), Eq. 11.30\n \"\"\"\n\n \n syn, J = self.synthesize_rf(model, mu=mu)\n\n error = model*0\n ny, nx = error.shape[0:2]\n \n for yy in range(ny):\n for xx in range(nx):\n \n for kk in range(9):\n J[yy,xx,kk] /= sig\n \n\n Hdiag = (J[yy,xx,:]**2).sum(axis=(1,2))\n error[yy,xx,:] = (((obs[yy,xx]-syn[yy,xx]) / sig )**2).sum()\n\n for kk in range(9):\n error[yy,xx,kk] /= Hdiag[kk]\n\n error *= 2.0 / 9.0\n \n return np.sqrt(error)\n \n # *************************************************************************************************\n \n def invert_spatially_regularized(self, model, obs, sig = 1.e-3, mu = 1.0, nIter = 20, chi2_thres = 1.0, alpha=1.0, alphas=np.ones(9,dtype='float32'), method = 0, delay_bracket = 3):\n \"\"\"\n invert_spatially_regularized observations acquired at a given mu angle\n Arguments:\n model: 1D [9] or 3D array [ny,nx,9] with the parameters of the model\n obs: 2D [4,nwav] or 4D array [ny,nx,4,nwav] with the observed profiles. Should be normalized to the mean continuum.\n sig: scalar or 2D array [4,nwav] with the noise estimate\n mu: heliocentric angle for the synthesis\n nIter: maximum number of Levenberg Marquardt iterations per inversion\n chi2_thres: stop inversion if Chi2 <= chi2_thres\n alpha: global regularization weight that multiplies the value of \"alphas\" (default = 1).\n alphas: the relative scaling of regularization weights for each parameter (default = 1).\n method: Numerical method to solve the sparse system: 0) Conjugate Gradient, 1) BiCGStab, 2) SparseLU (default 0)\n delay_bracket: Delay optimal lambda bracketing for this number of iterations. Avoids taking too large steps in the initial iterations.\n The model parameters are: [|B| [G], inc [rad], azi [rad], vlos [km/s], vDop [\\AA], eta_l, damp, S0, S1]\n\n Returns:\n a tuple (spectra, response_function)\n spectra: 4D array [ny,nx,4,nwaw] with the emerging intensity\n response_function: 5D array [ny, ny, 9, 4, nwav]\n \"\"\"\n #\n # Check guessed model properties\n #\n ndim = len(model.shape)\n dtype = self._get_dtype()\n\n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::invert_spatially_regularized_float: ERROR, the input model must have 1 or 3 dimensions\")\n return None, None, None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::invert_spatially_regularized_float: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None, None, None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n\n \n #\n # Check observations\n #\n ndim = len(obs.shape)\n\n if(ndim == 2):\n obs1 = np.ascontiguousarray(model.reshape((1,1,obs.shape[0], obs.shape[1])), dtype=dtype)\n elif(ndim == 4):\n obs1 = obs\n else:\n print(\"MilneEddington::invert_spatially_regularized_float: ERROR, the input observations must have 2 or 4 dimensions\")\n return None, None, None\n\n \n wav = self.Me.get_wavelength_array()\n nwav = wav.size\n if(obs1.shape[3] != nwav):\n print(\"MilneEddington::invert_spatially_regularized_float: ERROR, input observations has nwav={0}, should be nwav={1}\".format(obs1.shape[3], nwav))\n return None, None, None\n\n isContiguous = obs1.flags['C_CONTIGUOUS']\n if(not isContiguous or obs1.dtype != dtype):\n obs1 = np.ascontiguousarray(obs1, dtype=dtype)\n\n \n \n #\n # Check sigma\n #\n if isinstance(sig, np.ndarray):\n if(sig.shape[1] != nwav):\n print(\"MilneEddington::invert_spatially_regularized_float: sigma array has nwav={0}, but it should be {1}\".format(sigma.shape[1], nwav))\n return None, None, None\n\n sig1 = np.zeros((4,nwav), dtype=dtype, order='c')\n sig1[:] = sig\n\n else:\n sig1 = np.zeros((4,nwav), dtype=dtype, order='c')\n sig1[:] = sig \n \n\n\n #\n # make alphas\n #\n alphas_in = np.zeros(9,dtype=dtype)\n for ii in range(9):\n alphas_in[ii] = alpha * alphas[ii]\n \n #\n # Call C++ module\n #\n return self.Me.invert_spatially_regularized(model1, obs1, sig1, alphas_in, mu=mu, nIter = nIter, chi2_thres = chi2_thres, method=method, delay_bracket = delay_bracket)\n \n" ]
[ [ "numpy.sqrt", "numpy.ascontiguousarray", "numpy.ones", "numpy.float64", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IBM/sifar-pytorch
[ "3ac9103245b98a4916dd45bcdf0167d01b5f9b38" ]
[ "utils.py" ]
[ "# Copyright (c) 2015-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the CC-by-NC license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport io\nimport os\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport tempfile\n\nimport torch\nimport torch.distributed as dist\nfrom fvcore.common.checkpoint import Checkpointer\n\n\nclass SmoothedValue(object):\n \"\"\"Track a series of values and provide access to smoothed values over a\n window or the global series average.\n \"\"\"\n\n def __init__(self, window_size=20, fmt=None):\n if fmt is None:\n fmt = \"{median:.4f} ({global_avg:.4f})\"\n self.deque = deque(maxlen=window_size)\n self.total = 0.0\n self.count = 0\n self.fmt = fmt\n\n def update(self, value, n=1):\n self.deque.append(value)\n self.count += n\n self.total += value * n\n\n def synchronize_between_processes(self):\n \"\"\"\n Warning: does not synchronize the deque!\n \"\"\"\n if not is_dist_avail_and_initialized():\n return\n t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n dist.barrier()\n dist.all_reduce(t)\n t = t.tolist()\n self.count = int(t[0])\n self.total = t[1]\n\n @property\n def median(self):\n d = torch.tensor(list(self.deque))\n return d.median().item()\n\n @property\n def avg(self):\n d = torch.tensor(list(self.deque), dtype=torch.float32)\n return d.mean().item()\n\n @property\n def global_avg(self):\n return self.total / self.count\n\n @property\n def max(self):\n return max(self.deque)\n\n @property\n def value(self):\n return self.deque[-1]\n\n def __str__(self):\n return self.fmt.format(\n median=self.median,\n avg=self.avg,\n global_avg=self.global_avg,\n max=self.max,\n value=self.value)\n\n\nclass MetricLogger(object):\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n type(self).__name__, attr))\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\n \"{}: {}\".format(name, str(meter))\n )\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = ''\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt='{avg:.4f}')\n data_time = SmoothedValue(fmt='{avg:.4f}')\n space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n log_msg = [\n header,\n '[{0' + space_fmt + '}/{1}]',\n 'eta: {eta}',\n '{meters}',\n 'time: {time}',\n 'data: {data}'\n ]\n if torch.cuda.is_available():\n log_msg.append('max mem: {memory:.0f}')\n log_msg = self.delimiter.join(log_msg)\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB))\n else:\n print(log_msg.format(\n i, len(iterable), eta=eta_string,\n meters=str(self),\n time=str(iter_time), data=str(data_time)))\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('{} Total time: {} ({:.4f} s / it)'.format(\n header, total_time_str, total_time / len(iterable)))\n\n\ndef _load_checkpoint_for_ema(model_ema, checkpoint):\n \"\"\"\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n \"\"\"\n mem_file = io.BytesIO()\n torch.save(checkpoint, mem_file)\n mem_file.seek(0)\n model_ema._load_checkpoint(mem_file)\n\n\ndef load_checkpoint(model, state_dict, mode=None):\n\n # reuse Checkpointer in fvcore to support flexible loading\n ckpt = Checkpointer(model, save_to_disk=False)\n # since Checkpointer requires the weight to be put under `model` field, we need to save it to disk\n tmp_path = tempfile.NamedTemporaryFile('w+b')\n torch.save({'model': state_dict}, tmp_path.name)\n ckpt.load(tmp_path.name)\n\ndef setup_for_distributed(is_master):\n \"\"\"\n This function disables printing when not in master process\n \"\"\"\n import builtins as __builtin__\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n force = kwargs.pop('force', False)\n if is_master or force:\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True\n\n\ndef get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()\n\n\ndef get_rank():\n if not is_dist_avail_and_initialized():\n return 0\n return dist.get_rank()\n\n\ndef is_main_process():\n return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n if is_main_process():\n torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n args.rank = int(os.environ[\"RANK\"])\n args.world_size = int(os.environ['WORLD_SIZE'])\n args.gpu = int(os.environ['LOCAL_RANK'])\n elif 'SLURM_PROCID' in os.environ:\n args.rank = int(os.environ['SLURM_PROCID'])\n args.gpu = args.rank % torch.cuda.device_count()\n else:\n print('Not using distributed mode')\n args.distributed = False\n return\n\n args.distributed = True\n\n torch.cuda.set_device(args.gpu)\n args.dist_backend = 'nccl'\n print('| distributed init (rank {}): {}'.format(\n args.rank, args.dist_url), flush=True)\n torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n world_size=args.world_size, rank=args.rank)\n torch.distributed.barrier()\n setup_for_distributed(args.rank == 0)\n" ]
[ [ "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.device_count", "torch.distributed.is_initialized", "torch.distributed.barrier", "torch.tensor", "torch.cuda.max_memory_allocated", "torch.distributed.is_available", "torch.cuda.is_available", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.distributed.all_reduce", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wehs7661/advanced_sampling
[ "92e5a25ec205ff84e55cd01de43c274b63f78a03" ]
[ "advanced_sampling/General_analysis/gmx_plot2d.py" ]
[ "#!/usr/bin/env python\n\"\"\"This is a Python code for the plotting of 2-dimensional data.\n\"\"\"\n\nimport argparse\nimport os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\n\ndef initialize():\n\n parser = argparse.ArgumentParser(\n description='This code saves a contour plot based on 3-dimensional data')\n parser.add_argument('-f',\n '--xvg',\n nargs='+',\n help='Names of the input .xvg files')\n parser.add_argument('-l',\n '--legend',\n default='label',\n nargs='+',\n help='Lengends of the curves')\n parser.add_argument('-x',\n '--xlabel',\n type=str,\n help='The name and units of x-axis')\n parser.add_argument('-y',\n '--ylabel',\n type=str,\n help='The name and units of y-axis')\n parser.add_argument('-t', '--title', type=str, help='Title of the plot')\n parser.add_argument('-n',\n '--pngname',\n type=str,\n help='The filename of the figure')\n\n args_parse = parser.parse_args()\n\n return args_parse\n\n\nif __name__ == '__main__':\n\n args = initialize()\n\n rc('font', **{\n 'family': 'sans-serif',\n 'sans-serif': ['DejaVu Sans'],\n 'size': 10\n })\n # Set the font used for MathJax - more on this later\n rc('mathtext', **{'default': 'regular'})\n plt.rc('font', family='serif')\n\n plt.figure() # ready to plot!\n\n if isinstance(args.xvg, str): # the case of only one input\n args.xvg = list(args.xvg)\n # for the case of only one input, the legend arugment takes the default\n # but will not be shown\n\n for i in range(len(args.xvg)):\n x, y = [], []\n infile = open('%s' % args.xvg[i], 'r')\n lines = infile.readlines()\n infile.close\n # Parse data\n n = 0\n m = 0\n for line in lines:\n if line[0] == '#' or line[0] == '@':\n m += 1 # number of parameter lines\n # read in data starting from (m+1)-th line to the end\n for line in lines[m:]:\n if line[0] != '#' and line[0] != '@':\n tokens = line.split()\n x.append(float(tokens[0]))\n y.append(float(tokens[1]))\n x, y = np.array(x), np.array(y)\n\n if max(abs(x)) >= 10000:\n x = x / 1000\n T = 298.15\n conversion1 = 1.38064852 * 6.02 * T / 1000 # from kcal/mol to kT\n conversion2 = np.pi/180 # from radian to degree\n x = x /conversion2\n y = y / conversion1\n plt.plot(x, y, label='%s' % args.legend[i])\n # plt.hold(True)\n\n plt.title('%s' % args.title)\n plt.xlabel('%s' % args.xlabel)\n #plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n plt.ylabel('%s' % args.ylabel)\n if max(abs(y)) >= 10000:\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))\n plt.grid(True)\n\n if len(args.xvg) > 1:\n plt.legend(ncol=2)\n\n # Save the image, but not overwrite the file with the same file name.\n # The name of the file should be 'pic.png', 'pic_1.png', ...\n n = 0 # number of the figures that have been produce in the same dir.\n if os.path.isfile('%s.png' % args.pngname):\n n += 1\n while os.path.isfile('%s_%s.png' % (args.pngname, n)):\n n += 1\n plt.savefig('%s_%s.png' % (args.pngname, n))\n plt.savefig('%s_0.png' % args.pngname)\n\n plt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.rc", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ticklabel_format", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.rc", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mlhenderson/xarray
[ "07de257c5884df49335496ee6347fb633a7c302c" ]
[ "xarray/core/groupby.py" ]
[ "import datetime\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom . import dtypes, duck_array_ops, nputils, ops\nfrom .arithmetic import DataArrayGroupbyArithmetic, DatasetGroupbyArithmetic\nfrom .concat import concat\nfrom .formatting import format_array_flat\nfrom .indexes import propagate_indexes\nfrom .options import _get_keep_attrs\nfrom .pycompat import integer_types\nfrom .utils import (\n either_dict_or_kwargs,\n hashable,\n is_scalar,\n maybe_wrap_array,\n peek_at,\n safe_cast_to_index,\n)\nfrom .variable import IndexVariable, Variable, as_variable\n\n\ndef check_reduce_dims(reduce_dims, dimensions):\n\n if reduce_dims is not ...:\n if is_scalar(reduce_dims):\n reduce_dims = [reduce_dims]\n if any(dim not in dimensions for dim in reduce_dims):\n raise ValueError(\n f\"cannot reduce over dimensions {reduce_dims!r}. expected either '...' \"\n f\"to reduce over all dimensions or one or more of {dimensions!r}.\"\n )\n\n\ndef unique_value_groups(ar, sort=True):\n \"\"\"Group an array by its unique values.\n\n Parameters\n ----------\n ar : array-like\n Input array. This will be flattened if it is not already 1-D.\n sort : bool, optional\n Whether or not to sort unique values.\n\n Returns\n -------\n values : np.ndarray\n Sorted, unique values as returned by `np.unique`.\n indices : list of lists of int\n Each element provides the integer indices in `ar` with values given by\n the corresponding value in `unique_values`.\n \"\"\"\n inverse, values = pd.factorize(ar, sort=sort)\n groups = [[] for _ in range(len(values))]\n for n, g in enumerate(inverse):\n if g >= 0:\n # pandas uses -1 to mark NaN, but doesn't include them in values\n groups[g].append(n)\n return values, groups\n\n\ndef _dummy_copy(xarray_obj):\n from .dataarray import DataArray\n from .dataset import Dataset\n\n if isinstance(xarray_obj, Dataset):\n res = Dataset(\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.data_vars.items()\n },\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n xarray_obj.attrs,\n )\n elif isinstance(xarray_obj, DataArray):\n res = DataArray(\n dtypes.get_fill_value(xarray_obj.dtype),\n {\n k: dtypes.get_fill_value(v.dtype)\n for k, v in xarray_obj.coords.items()\n if k not in xarray_obj.dims\n },\n dims=[],\n name=xarray_obj.name,\n attrs=xarray_obj.attrs,\n )\n else: # pragma: no cover\n raise AssertionError\n return res\n\n\ndef _is_one_or_none(obj):\n return obj == 1 or obj is None\n\n\ndef _consolidate_slices(slices):\n \"\"\"Consolidate adjacent slices in a list of slices.\"\"\"\n result = []\n last_slice = slice(None)\n for slice_ in slices:\n if not isinstance(slice_, slice):\n raise ValueError(f\"list element is not a slice: {slice_!r}\")\n if (\n result\n and last_slice.stop == slice_.start\n and _is_one_or_none(last_slice.step)\n and _is_one_or_none(slice_.step)\n ):\n last_slice = slice(last_slice.start, slice_.stop, slice_.step)\n result[-1] = last_slice\n else:\n result.append(slice_)\n last_slice = slice_\n return result\n\n\ndef _inverse_permutation_indices(positions):\n \"\"\"Like inverse_permutation, but also handles slices.\n\n Parameters\n ----------\n positions : list of ndarray or slice\n If slice objects, all are assumed to be slices.\n\n Returns\n -------\n np.ndarray of indices or None, if no permutation is necessary.\n \"\"\"\n if not positions:\n return None\n\n if isinstance(positions[0], slice):\n positions = _consolidate_slices(positions)\n if positions == slice(None):\n return None\n positions = [np.arange(sl.start, sl.stop, sl.step) for sl in positions]\n\n return nputils.inverse_permutation(np.concatenate(positions))\n\n\nclass _DummyGroup:\n \"\"\"Class for keeping track of grouped dimensions without coordinates.\n\n Should not be user visible.\n \"\"\"\n\n __slots__ = (\"name\", \"coords\", \"size\")\n\n def __init__(self, obj, name, coords):\n self.name = name\n self.coords = coords\n self.size = obj.sizes[name]\n\n @property\n def dims(self):\n return (self.name,)\n\n @property\n def ndim(self):\n return 1\n\n @property\n def values(self):\n return range(self.size)\n\n @property\n def shape(self):\n return (self.size,)\n\n def __getitem__(self, key):\n if isinstance(key, tuple):\n key = key[0]\n return self.values[key]\n\n\ndef _ensure_1d(group, obj):\n if group.ndim != 1:\n # try to stack the dims of the group into a single dim\n orig_dims = group.dims\n stacked_dim = \"stacked_\" + \"_\".join(orig_dims)\n # these dimensions get created by the stack operation\n inserted_dims = [dim for dim in group.dims if dim not in group.coords]\n # the copy is necessary here, otherwise read only array raises error\n # in pandas: https://github.com/pydata/pandas/issues/12813\n group = group.stack(**{stacked_dim: orig_dims}).copy()\n obj = obj.stack(**{stacked_dim: orig_dims})\n else:\n stacked_dim = None\n inserted_dims = []\n return group, obj, stacked_dim, inserted_dims\n\n\ndef _unique_and_monotonic(group):\n if isinstance(group, _DummyGroup):\n return True\n index = safe_cast_to_index(group)\n return index.is_unique and index.is_monotonic\n\n\ndef _apply_loffset(grouper, result):\n \"\"\"\n (copied from pandas)\n if loffset is set, offset the result index\n\n This is NOT an idempotent routine, it will be applied\n exactly once to the result.\n\n Parameters\n ----------\n result : Series or DataFrame\n the result of resample\n \"\"\"\n\n needs_offset = (\n isinstance(grouper.loffset, (pd.DateOffset, datetime.timedelta))\n and isinstance(result.index, pd.DatetimeIndex)\n and len(result.index) > 0\n )\n\n if needs_offset:\n result.index = result.index + grouper.loffset\n\n grouper.loffset = None\n\n\nclass GroupBy:\n \"\"\"A object that implements the split-apply-combine pattern.\n\n Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over\n (unique_value, grouped_array) pairs, but the main way to interact with a\n groupby object are with the `apply` or `reduce` methods. You can also\n directly call numpy methods like `mean` or `std`.\n\n You should create a GroupBy object by using the `DataArray.groupby` or\n `Dataset.groupby` methods.\n\n See Also\n --------\n Dataset.groupby\n DataArray.groupby\n \"\"\"\n\n __slots__ = (\n \"_full_index\",\n \"_inserted_dims\",\n \"_group\",\n \"_group_dim\",\n \"_group_indices\",\n \"_groups\",\n \"_obj\",\n \"_restore_coord_dims\",\n \"_stacked_dim\",\n \"_unique_coord\",\n \"_dims\",\n )\n\n def __init__(\n self,\n obj,\n group,\n squeeze=False,\n grouper=None,\n bins=None,\n restore_coord_dims=True,\n cut_kwargs=None,\n ):\n \"\"\"Create a GroupBy object\n\n Parameters\n ----------\n obj : Dataset or DataArray\n Object to group.\n group : DataArray\n Array with the group values.\n squeeze : bool, optional\n If \"group\" is a coordinate of object, `squeeze` controls whether\n the subarrays have a dimension of length 1 along that coordinate or\n if the dimension is squeezed out.\n grouper : pandas.Grouper, optional\n Used for grouping values along the `group` array.\n bins : array-like, optional\n If `bins` is specified, the groups will be discretized into the\n specified bins by `pandas.cut`.\n restore_coord_dims : bool, default: True\n If True, also restore the dimension order of multi-dimensional\n coordinates.\n cut_kwargs : dict, optional\n Extra keyword arguments to pass to `pandas.cut`\n\n \"\"\"\n if cut_kwargs is None:\n cut_kwargs = {}\n from .dataarray import DataArray\n\n if grouper is not None and bins is not None:\n raise TypeError(\"can't specify both `grouper` and `bins`\")\n\n if not isinstance(group, (DataArray, IndexVariable)):\n if not hashable(group):\n raise TypeError(\n \"`group` must be an xarray.DataArray or the \"\n \"name of an xarray variable or dimension.\"\n f\"Received {group!r} instead.\"\n )\n group = obj[group]\n if len(group) == 0:\n raise ValueError(f\"{group.name} must not be empty\")\n\n if group.name not in obj.coords and group.name in obj.dims:\n # DummyGroups should not appear on groupby results\n group = _DummyGroup(obj, group.name, group.coords)\n\n if getattr(group, \"name\", None) is None:\n group.name = \"group\"\n\n group, obj, stacked_dim, inserted_dims = _ensure_1d(group, obj)\n (group_dim,) = group.dims\n\n expected_size = obj.sizes[group_dim]\n if group.size != expected_size:\n raise ValueError(\n \"the group variable's length does not \"\n \"match the length of this variable along its \"\n \"dimension\"\n )\n\n full_index = None\n\n if bins is not None:\n if duck_array_ops.isnull(bins).all():\n raise ValueError(\"All bin edges are NaN.\")\n binned = pd.cut(group.values, bins, **cut_kwargs)\n new_dim_name = group.name + \"_bins\"\n group = DataArray(binned, group.coords, name=new_dim_name)\n full_index = binned.categories\n\n if grouper is not None:\n index = safe_cast_to_index(group)\n if not index.is_monotonic:\n # TODO: sort instead of raising an error\n raise ValueError(\"index must be monotonic for resampling\")\n full_index, first_items = self._get_index_and_items(index, grouper)\n sbins = first_items.values.astype(np.int64)\n group_indices = [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [\n slice(sbins[-1], None)\n ]\n unique_coord = IndexVariable(group.name, first_items.index)\n elif group.dims == (group.name,) and _unique_and_monotonic(group):\n # no need to factorize\n group_indices = np.arange(group.size)\n if not squeeze:\n # use slices to do views instead of fancy indexing\n # equivalent to: group_indices = group_indices.reshape(-1, 1)\n group_indices = [slice(i, i + 1) for i in group_indices]\n unique_coord = group\n else:\n if group.isnull().any():\n # drop any NaN valued groups.\n # also drop obj values where group was NaN\n # Use where instead of reindex to account for duplicate coordinate labels.\n obj = obj.where(group.notnull(), drop=True)\n group = group.dropna(group_dim)\n\n # look through group to find the unique values\n group_as_index = safe_cast_to_index(group)\n sort = bins is None and (not isinstance(group_as_index, pd.MultiIndex))\n unique_values, group_indices = unique_value_groups(\n group_as_index, sort=sort\n )\n unique_coord = IndexVariable(group.name, unique_values)\n\n if len(group_indices) == 0:\n if bins is not None:\n raise ValueError(\n f\"None of the data falls within bins with edges {bins!r}\"\n )\n else:\n raise ValueError(\n \"Failed to group data. Are you grouping by a variable that is all NaN?\"\n )\n\n # specification for the groupby operation\n self._obj = obj\n self._group = group\n self._group_dim = group_dim\n self._group_indices = group_indices\n self._unique_coord = unique_coord\n self._stacked_dim = stacked_dim\n self._inserted_dims = inserted_dims\n self._full_index = full_index\n self._restore_coord_dims = restore_coord_dims\n\n # cached attributes\n self._groups = None\n self._dims = None\n\n @property\n def dims(self):\n if self._dims is None:\n self._dims = self._obj.isel(\n **{self._group_dim: self._group_indices[0]}\n ).dims\n\n return self._dims\n\n @property\n def groups(self):\n \"\"\"\n Mapping from group labels to indices. The indices can be used to index the underlying object.\n \"\"\"\n # provided to mimic pandas.groupby\n if self._groups is None:\n self._groups = dict(zip(self._unique_coord.values, self._group_indices))\n return self._groups\n\n def __getitem__(self, key):\n \"\"\"\n Get DataArray or Dataset corresponding to a particular group label.\n \"\"\"\n return self._obj.isel({self._group_dim: self.groups[key]})\n\n def __len__(self):\n return self._unique_coord.size\n\n def __iter__(self):\n return zip(self._unique_coord.values, self._iter_grouped())\n\n def __repr__(self):\n return \"{}, grouped over {!r}\\n{!r} groups with labels {}.\".format(\n self.__class__.__name__,\n self._unique_coord.name,\n self._unique_coord.size,\n \", \".join(format_array_flat(self._unique_coord, 30).split()),\n )\n\n def _get_index_and_items(self, index, grouper):\n from .resample_cftime import CFTimeGrouper\n\n s = pd.Series(np.arange(index.size), index)\n if isinstance(grouper, CFTimeGrouper):\n first_items = grouper.first_items(index)\n else:\n first_items = s.groupby(grouper).first()\n _apply_loffset(grouper, first_items)\n full_index = first_items.index\n if first_items.isnull().any():\n first_items = first_items.dropna()\n return full_index, first_items\n\n def _iter_grouped(self):\n \"\"\"Iterate over each element in this group\"\"\"\n for indices in self._group_indices:\n yield self._obj.isel(**{self._group_dim: indices})\n\n def _infer_concat_args(self, applied_example):\n if self._group_dim in applied_example.dims:\n coord = self._group\n positions = self._group_indices\n else:\n coord = self._unique_coord\n positions = None\n (dim,) = coord.dims\n if isinstance(coord, _DummyGroup):\n coord = None\n return coord, dim, positions\n\n def _binary_op(self, other, f, reflexive=False):\n g = f if not reflexive else lambda x, y: f(y, x)\n applied = self._yield_binary_applied(g, other)\n return self._combine(applied)\n\n def _yield_binary_applied(self, func, other):\n dummy = None\n\n for group_value, obj in self:\n try:\n other_sel = other.sel(**{self._group.name: group_value})\n except AttributeError:\n raise TypeError(\n \"GroupBy objects only support binary ops \"\n \"when the other argument is a Dataset or \"\n \"DataArray\"\n )\n except (KeyError, ValueError):\n if self._group.name not in other.dims:\n raise ValueError(\n \"incompatible dimensions for a grouped \"\n f\"binary operation: the group variable {self._group.name!r} \"\n \"is not a dimension on the other argument\"\n )\n if dummy is None:\n dummy = _dummy_copy(other)\n other_sel = dummy\n\n result = func(obj, other_sel)\n yield result\n\n def _maybe_restore_empty_groups(self, combined):\n \"\"\"Our index contained empty groups (e.g., from a resampling). If we\n reduced on that dimension, we want to restore the full index.\n \"\"\"\n if self._full_index is not None and self._group.name in combined.dims:\n indexers = {self._group.name: self._full_index}\n combined = combined.reindex(**indexers)\n return combined\n\n def _maybe_unstack(self, obj):\n \"\"\"This gets called if we are applying on an array with a\n multidimensional group.\"\"\"\n if self._stacked_dim is not None and self._stacked_dim in obj.dims:\n obj = obj.unstack(self._stacked_dim)\n for dim in self._inserted_dims:\n if dim in obj.coords:\n del obj.coords[dim]\n obj._indexes = propagate_indexes(obj._indexes, exclude=self._inserted_dims)\n return obj\n\n def fillna(self, value):\n \"\"\"Fill missing values in this object by group.\n\n This operation follows the normal broadcasting and alignment rules that\n xarray uses for binary arithmetic, except the result is aligned to this\n object (``join='left'``) instead of aligned to the intersection of\n index coordinates (``join='inner'``).\n\n Parameters\n ----------\n value\n Used to fill all matching missing values by group. Needs\n to be of a valid type for the wrapped object's fillna\n method.\n\n Returns\n -------\n same type as the grouped object\n\n See Also\n --------\n Dataset.fillna\n DataArray.fillna\n \"\"\"\n return ops.fillna(self, value)\n\n def quantile(\n self, q, dim=None, interpolation=\"linear\", keep_attrs=None, skipna=True\n ):\n \"\"\"Compute the qth quantile over each array in the groups and\n concatenate them together into a new array.\n\n Parameters\n ----------\n q : float or sequence of float\n Quantile to compute, which must be between 0 and 1\n inclusive.\n dim : ..., str or sequence of str, optional\n Dimension(s) over which to apply quantile.\n Defaults to the grouped dimension.\n interpolation : {\"linear\", \"lower\", \"higher\", \"midpoint\", \"nearest\"}, default: \"linear\"\n This optional parameter specifies the interpolation method to\n use when the desired quantile lies between two data points\n ``i < j``:\n\n * linear: ``i + (j - i) * fraction``, where ``fraction`` is\n the fractional part of the index surrounded by ``i`` and\n ``j``.\n * lower: ``i``.\n * higher: ``j``.\n * nearest: ``i`` or ``j``, whichever is nearest.\n * midpoint: ``(i + j) / 2``.\n skipna : bool, optional\n Whether to skip missing values when aggregating.\n\n Returns\n -------\n quantiles : Variable\n If `q` is a single quantile, then the result is a\n scalar. If multiple percentiles are given, first axis of\n the result corresponds to the quantile. In either case a\n quantile dimension is added to the return array. The other\n dimensions are the dimensions that remain after the\n reduction of the array.\n\n See Also\n --------\n numpy.nanquantile, numpy.quantile, pandas.Series.quantile, Dataset.quantile\n DataArray.quantile\n\n Examples\n --------\n >>> da = xr.DataArray(\n ... [[1.3, 8.4, 0.7, 6.9], [0.7, 4.2, 9.4, 1.5], [6.5, 7.3, 2.6, 1.9]],\n ... coords={\"x\": [0, 0, 1], \"y\": [1, 1, 2, 2]},\n ... dims=(\"x\", \"y\"),\n ... )\n >>> ds = xr.Dataset({\"a\": da})\n >>> da.groupby(\"x\").quantile(0)\n <xarray.DataArray (x: 2, y: 4)>\n array([[0.7, 4.2, 0.7, 1.5],\n [6.5, 7.3, 2.6, 1.9]])\n Coordinates:\n * y (y) int64 1 1 2 2\n quantile float64 0.0\n * x (x) int64 0 1\n >>> ds.groupby(\"y\").quantile(0, dim=...)\n <xarray.Dataset>\n Dimensions: (y: 2)\n Coordinates:\n quantile float64 0.0\n * y (y) int64 1 2\n Data variables:\n a (y) float64 0.7 0.7\n >>> da.groupby(\"x\").quantile([0, 0.5, 1])\n <xarray.DataArray (x: 2, y: 4, quantile: 3)>\n array([[[0.7 , 1. , 1.3 ],\n [4.2 , 6.3 , 8.4 ],\n [0.7 , 5.05, 9.4 ],\n [1.5 , 4.2 , 6.9 ]],\n <BLANKLINE>\n [[6.5 , 6.5 , 6.5 ],\n [7.3 , 7.3 , 7.3 ],\n [2.6 , 2.6 , 2.6 ],\n [1.9 , 1.9 , 1.9 ]]])\n Coordinates:\n * y (y) int64 1 1 2 2\n * quantile (quantile) float64 0.0 0.5 1.0\n * x (x) int64 0 1\n >>> ds.groupby(\"y\").quantile([0, 0.5, 1], dim=...)\n <xarray.Dataset>\n Dimensions: (y: 2, quantile: 3)\n Coordinates:\n * quantile (quantile) float64 0.0 0.5 1.0\n * y (y) int64 1 2\n Data variables:\n a (y, quantile) float64 0.7 5.35 8.4 0.7 2.25 9.4\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n out = self.map(\n self._obj.__class__.quantile,\n shortcut=False,\n q=q,\n dim=dim,\n interpolation=interpolation,\n keep_attrs=keep_attrs,\n skipna=skipna,\n )\n return out\n\n def where(self, cond, other=dtypes.NA):\n \"\"\"Return elements from `self` or `other` depending on `cond`.\n\n Parameters\n ----------\n cond : DataArray or Dataset\n Locations at which to preserve this objects values. dtypes have to be `bool`\n other : scalar, DataArray or Dataset, optional\n Value to use for locations in this object where ``cond`` is False.\n By default, inserts missing values.\n\n Returns\n -------\n same type as the grouped object\n\n See Also\n --------\n Dataset.where\n \"\"\"\n return ops.where_method(self, cond, other)\n\n def _first_or_last(self, op, skipna, keep_attrs):\n if isinstance(self._group_indices[0], integer_types):\n # NB. this is currently only used for reductions along an existing\n # dimension\n return self._obj\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=True)\n return self.reduce(op, self._group_dim, skipna=skipna, keep_attrs=keep_attrs)\n\n def first(self, skipna=None, keep_attrs=None):\n \"\"\"Return the first element of each group along the group dimension\"\"\"\n return self._first_or_last(duck_array_ops.first, skipna, keep_attrs)\n\n def last(self, skipna=None, keep_attrs=None):\n \"\"\"Return the last element of each group along the group dimension\"\"\"\n return self._first_or_last(duck_array_ops.last, skipna, keep_attrs)\n\n def assign_coords(self, coords=None, **coords_kwargs):\n \"\"\"Assign coordinates by group.\n\n See Also\n --------\n Dataset.assign_coords\n Dataset.swap_dims\n \"\"\"\n coords_kwargs = either_dict_or_kwargs(coords, coords_kwargs, \"assign_coords\")\n return self.map(lambda ds: ds.assign_coords(**coords_kwargs))\n\n\ndef _maybe_reorder(xarray_obj, dim, positions):\n order = _inverse_permutation_indices(positions)\n\n if order is None or len(order) != xarray_obj.sizes[dim]:\n return xarray_obj\n else:\n return xarray_obj[{dim: order}]\n\n\nclass DataArrayGroupBy(GroupBy, DataArrayGroupbyArithmetic):\n \"\"\"GroupBy object specialized to grouping DataArray objects\"\"\"\n\n __slots__ = ()\n\n def _iter_grouped_shortcut(self):\n \"\"\"Fast version of `_iter_grouped` that yields Variables without\n metadata\n \"\"\"\n var = self._obj.variable\n for indices in self._group_indices:\n yield var[{self._group_dim: indices}]\n\n def _concat_shortcut(self, applied, dim, positions=None):\n # nb. don't worry too much about maintaining this method -- it does\n # speed things up, but it's not very interpretable and there are much\n # faster alternatives (e.g., doing the grouped aggregation in a\n # compiled language)\n stacked = Variable.concat(applied, dim, shortcut=True)\n reordered = _maybe_reorder(stacked, dim, positions)\n return self._obj._replace_maybe_drop_dims(reordered)\n\n def _restore_dim_order(self, stacked):\n def lookup_order(dimension):\n if dimension == self._group.name:\n (dimension,) = self._group.dims\n if dimension in self._obj.dims:\n axis = self._obj.get_axis_num(dimension)\n else:\n axis = 1e6 # some arbitrarily high value\n return axis\n\n new_order = sorted(stacked.dims, key=lookup_order)\n return stacked.transpose(*new_order, transpose_coords=self._restore_coord_dims)\n\n def map(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"Apply a function to each array in the group and concatenate them\n together into a new array.\n\n `func` is called like `func(ar, *args, **kwargs)` for each array `ar`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the array. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped array after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : callable\n Callable to apply to each array.\n shortcut : bool, optional\n Whether or not to shortcut evaluation under the assumptions that:\n\n (1) The action of `func` does not depend on any of the array\n metadata (attributes or coordinates) but only on the data and\n dimensions.\n (2) The action of `func` creates arrays with homogeneous metadata,\n that is, with the same dimensions and attributes.\n\n If these conditions are satisfied `shortcut` provides significant\n speedup. This should be the case for many common groupby operations\n (e.g., applying numpy ufuncs).\n *args : tuple, optional\n Positional arguments passed to `func`.\n **kwargs\n Used to call `func(ar, **kwargs)` for each array `ar`.\n\n Returns\n -------\n applied : DataArray or DataArray\n The result of splitting, applying and combining this array.\n \"\"\"\n grouped = self._iter_grouped_shortcut() if shortcut else self._iter_grouped()\n applied = (maybe_wrap_array(arr, func(arr, *args, **kwargs)) for arr in grouped)\n return self._combine(applied, shortcut=shortcut)\n\n def apply(self, func, shortcut=False, args=(), **kwargs):\n \"\"\"\n Backward compatible implementation of ``map``\n\n See Also\n --------\n DataArrayGroupBy.map\n \"\"\"\n warnings.warn(\n \"GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.map(func, shortcut=shortcut, args=args, **kwargs)\n\n def _combine(self, applied, shortcut=False):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n if shortcut:\n combined = self._concat_shortcut(applied, dim, positions)\n else:\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n\n if isinstance(combined, type(self._obj)):\n # only restore dimension order for arrays\n combined = self._restore_dim_order(combined)\n # assign coord when the applied function does not return that coord\n if coord is not None and dim not in applied_example.dims:\n if shortcut:\n coord_var = as_variable(coord)\n combined._coords[coord.name] = coord_var\n else:\n combined.coords[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def reduce(\n self, func, dim=None, axis=None, keep_attrs=None, shortcut=True, **kwargs\n ):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : ..., str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n def reduce_array(ar):\n return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)\n\n check_reduce_dims(dim, self.dims)\n\n return self.map(reduce_array, shortcut=shortcut)\n\n\nclass DatasetGroupBy(GroupBy, DatasetGroupbyArithmetic):\n\n __slots__ = ()\n\n def map(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"Apply a function to each Dataset in the group and concatenate them\n together into a new Dataset.\n\n `func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`\n in this group.\n\n Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how\n to stack together the datasets. The rule is:\n\n 1. If the dimension along which the group coordinate is defined is\n still in the first grouped item after applying `func`, then stack\n over this dimension.\n 2. Otherwise, stack over the new dimension given by name of this\n grouping (the argument to the `groupby` function).\n\n Parameters\n ----------\n func : callable\n Callable to apply to each sub-dataset.\n args : tuple, optional\n Positional arguments to pass to `func`.\n **kwargs\n Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.\n\n Returns\n -------\n applied : Dataset or DataArray\n The result of splitting, applying and combining this dataset.\n \"\"\"\n # ignore shortcut if set (for now)\n applied = (func(ds, *args, **kwargs) for ds in self._iter_grouped())\n return self._combine(applied)\n\n def apply(self, func, args=(), shortcut=None, **kwargs):\n \"\"\"\n Backward compatible implementation of ``map``\n\n See Also\n --------\n DatasetGroupBy.map\n \"\"\"\n\n warnings.warn(\n \"GroupBy.apply may be deprecated in the future. Using GroupBy.map is encouraged\",\n PendingDeprecationWarning,\n stacklevel=2,\n )\n return self.map(func, shortcut=shortcut, args=args, **kwargs)\n\n def _combine(self, applied):\n \"\"\"Recombine the applied objects like the original.\"\"\"\n applied_example, applied = peek_at(applied)\n coord, dim, positions = self._infer_concat_args(applied_example)\n combined = concat(applied, dim)\n combined = _maybe_reorder(combined, dim, positions)\n # assign coord when the applied function does not return that coord\n if coord is not None and dim not in applied_example.dims:\n combined[coord.name] = coord\n combined = self._maybe_restore_empty_groups(combined)\n combined = self._maybe_unstack(combined)\n return combined\n\n def reduce(self, func, dim=None, keep_attrs=None, **kwargs):\n \"\"\"Reduce the items in this group by applying `func` along some\n dimension(s).\n\n Parameters\n ----------\n func : callable\n Function which can be called in the form\n `func(x, axis=axis, **kwargs)` to return the result of collapsing\n an np.ndarray over an integer valued axis.\n dim : ..., str or sequence of str, optional\n Dimension(s) over which to apply `func`.\n axis : int or sequence of int, optional\n Axis(es) over which to apply `func`. Only one of the 'dimension'\n and 'axis' arguments can be supplied. If neither are supplied, then\n `func` is calculated over all dimension for each group item.\n keep_attrs : bool, optional\n If True, the datasets's attributes (`attrs`) will be copied from\n the original object to the new one. If False (default), the new\n object will be returned without attributes.\n **kwargs : dict\n Additional keyword arguments passed on to `func`.\n\n Returns\n -------\n reduced : Array\n Array with summarized data and the indicated dimension(s)\n removed.\n \"\"\"\n if dim is None:\n dim = self._group_dim\n\n if keep_attrs is None:\n keep_attrs = _get_keep_attrs(default=False)\n\n def reduce_dataset(ds):\n return ds.reduce(func, dim, keep_attrs, **kwargs)\n\n check_reduce_dims(dim, self.dims)\n\n return self.map(reduce_dataset)\n\n def assign(self, **kwargs):\n \"\"\"Assign data variables by group.\n\n See Also\n --------\n Dataset.assign\n \"\"\"\n return self.map(lambda ds: ds.assign(**kwargs))\n" ]
[ [ "numpy.concatenate", "numpy.arange", "pandas.factorize", "pandas.cut" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
NiklasHoltmeyer/FashionDatasets
[ "a9309f90abd6bff739ecffafd69cf52506f2cb97" ]
[ "fashiondatasets/deepfashion2/helper/pairs/deep_fashion_2_pairs_generator.py" ]
[ "import os\nimport random\n\nfrom fashionscrapper.utils.list import flatten, distinct\n\nfrom fashiondatasets.deepfashion2.helper.pairs._aggregate_collections import load_aggregated_annotations, \\\n DeepFashion_DF_Helper, load_image_quadruplets_csv_path, splits, load_info_path\nimport pandas as pd\nfrom tqdm.auto import tqdm\nimport tensorflow as tf\n\nfrom fashiondatasets.deepfashion2.helper.pairs.similar_embeddings import find_top_k\nfrom fashiondatasets.own.helper.mappings import preprocess_image\nfrom fashiondatasets.utils.logger.defaultLogger import defaultLogger\n\nlogger = defaultLogger(\"fashion_pair_gen\")\n\nclass DeepFashion2PairsGenerator:\n def __init__(self, _base_path, embedding, number_possibilites=32, split_suffix=\"\", **kwargs):\n self.base_path = _base_path\n self.threads = kwargs.get(\"threads\", None)\n self.kwargs = kwargs\n\n self.df_helper = {}\n self.complementary_cat_ids = {}\n\n self.embedding = embedding\n self.split_suffix = split_suffix\n self.embedding_storage = None\n\n self.number_possibilites = number_possibilites\n\n def full_image_path(self, split, x):\n return os.path.join(self.base_path, split + self.split_suffix, \"images\", str(x).zfill(6) + \".jpg\")\n\n def _load(self, split):\n annotations_info = load_aggregated_annotations(self.base_path, _split=split)\n complementary_cat_ids, images_info = annotations_info[\"complementary_cat_ids\"], annotations_info[\"images_info\"]\n\n if self.kwargs.get(\"shuffle\", True):\n images_info = images_info.sample(frac=1)\n\n df_helper = DeepFashion_DF_Helper(images_info)\n\n self.df_helper[split] = df_helper\n self.complementary_cat_ids[split] = complementary_cat_ids\n\n @staticmethod\n def walk_anchor_positive_possibilites(df_helper):\n for a_img_id in df_helper.user.image_ids:\n anchor = df_helper.user.by_image_id[a_img_id]\n pair_id = anchor[\"pair_id\"]\n\n possibles_positives = df_helper.shop.by_pair_id[pair_id]\n yield anchor, possibles_positives\n\n @staticmethod\n def choose_possibility_by_round_robin(possibilities):\n for i in range(100): # <- 100 retries\n choice = possibilities.pop(0) # take first Item -> Push it to the End of the List -> Round Robin\n possibilities.append(choice)\n return choice\n\n @staticmethod\n def choose_possibility_by_distance(pivot, possibilities, most_similar):\n possibilites_embeddings = [x[\"embedding\"] for x in possibilities]\n pivot_embedding = [pivot[\"embedding\"]]\n\n idx = find_top_k(pivot_embedding, possibilites_embeddings, most_similar=most_similar, k=1)[0]\n return possibilities[idx]\n\n def choose_possibility(self, pivot, possibilities, most_similar):\n if not self.embedding or len(possibilities) == 1:\n return self.choose_possibility_by_round_robin(possibilities)\n return self.choose_possibility_by_distance(pivot, possibilities, reverse=most_similar)\n\n def build_anchor_positives(self, split):\n self._load(split)\n df_helper = self.df_helper[split]\n\n image_paths = []\n anchor_possible_positives = list(self.walk_anchor_positive_possibilites(df_helper))\n\n for anchor, possibles_positives in self.walk_anchor_positive_possibilites(df_helper):\n i_path = self.full_image_path(split, anchor[\"image_id\"])\n image_paths.append(i_path)\n\n image_ds = tf.data.Dataset.from_tensor_slices(list(image_paths)) \\\n .map(preprocess_image((224, 224))) \\\n .batch(64, drop_remainder=False) \\\n .prefetch(tf.data.AUTOTUNE)\n\n embeddings = []\n for batch in tqdm(image_ds, desc=\"Anchor-Embeddings\"):\n embeddings.extend(self.embedding(batch))\n\n assert len(embeddings) == len(image_paths)\n\n anchor_possible_positives_embeddings = [(anchor, possibles_positives, embedding) for\n ((anchor, possibles_positives), embedding) in\n zip(anchor_possible_positives, embeddings)]\n\n possible_positive_images = flatten([x[1] for x in anchor_possible_positives_embeddings])\n possible_positive_images_ids = distinct([x[\"image_id\"] for x in possible_positive_images])\n possible_positive_images = [self.full_image_path(split, x) for x in possible_positive_images_ids]\n\n pp_image_ds = tf.data.Dataset.from_tensor_slices(list(possible_positive_images)) \\\n .map(preprocess_image((224, 224))) \\\n .batch(128, drop_remainder=False) \\\n .prefetch(tf.data.AUTOTUNE)\n\n pp_embeddings = []\n for batch in tqdm(pp_image_ds, desc=\"Positives Embeddings\"):\n pp_embeddings.extend(self.embedding(batch))\n\n assert len(pp_embeddings) == len(possible_positive_images)\n\n self.embedding_storage = {id: embedding for id, embedding in zip(possible_positive_images_ids, pp_embeddings)}\n\n anchor_positives = []\n for anchor, possibles_positives, embedding in anchor_possible_positives_embeddings:\n anchor[\"embedding\"] = embedding\n for pp in possibles_positives:\n pp[\"embedding\"] = self.embedding_storage[pp[\"image_id\"]]\n positive = self.choose_possibility(anchor, possibles_positives, most_similar=False)\n anchor_positives.append((anchor, positive))\n\n assert len(anchor_positives) == len(anchor_possible_positives)\n\n return anchor_positives\n\n def build_anchor_positive_negatives(self, split):\n \"\"\"\n Negative from Same Category. 50/50 Chance of the image being from Shop or Consumer\n \"\"\"\n anchor_positives = self.build_anchor_positives(split)\n df_helper = self.df_helper[split]\n anchor_positives_negative_possibilities = []\n for idx, (a, p) in tqdm(enumerate(anchor_positives), desc=\"APN\", total=len(anchor_positives)):\n cat_id = a[\"categories_in_image_idx\"]\n pair_id = a[\"pair_id\"]\n possible_negatives = df_helper.shop.by_cat_id[cat_id]\n possible_negatives = list(filter(lambda d: pair_id != d[\"pair_id\"], possible_negatives))\n possible_negatives = random.sample(possible_negatives,\n min(self.number_possibilites, len(possible_negatives)))\n anchor_positives_negative_possibilities.append((a, p, possible_negatives))\n\n embedding_jobs = []\n for a, p, possible_negatives in anchor_positives_negative_possibilities:\n image_ids = (map(lambda d: d[\"image_id\"], possible_negatives))\n image_ids = list(filter(lambda id: id not in self.embedding_storage.keys(), image_ids))\n embedding_jobs.extend(image_ids)\n\n if len(embedding_jobs) > 0:\n image_paths = list(map(lambda x: self.full_image_path(split, x), embedding_jobs))\n negative_1_ds = tf.data.Dataset.from_tensor_slices(image_paths) \\\n .map(preprocess_image((224, 224))) \\\n .batch(64, drop_remainder=False) \\\n .prefetch(tf.data.AUTOTUNE)\n\n embeddings = []\n for batch in tqdm(negative_1_ds, desc=\"N2-Embeddings\"):\n embeddings.extend(self.embedding(batch))\n\n self.embedding_storage.update({id: embedding for id, embedding in\n zip(embedding_jobs, embeddings)})\n\n # 1312\n raise Exception(\"TODO Embedd new Negatives\")\n # if this Exp. occours embedd all Images from withing Embedding_jobs and append them to\n # self::embedding_storage\n ###\n apn = []\n for a, p, possible_negatives in anchor_positives_negative_possibilities:\n for pp in possible_negatives:\n pp[\"embedding\"] = self.embedding_storage[pp[\"image_id\"]]\n if len(possible_negatives) > 1:\n negative = self.choose_possibility(a, possible_negatives, most_similar=True)\n else:\n negative = possible_negatives\n if all([a, p, negative]):\n apn.append((a, p, negative))\n return apn\n\n def build_anchor_positive_negative1_negative2(self, split, validate=False):\n \"\"\"\n Negative1 from Same Category. 50/50 Chance of the image being from Shop or Consumer\n Negative2 from different Category. 50/50 Chance of Image being from Shop or Consumer\n :param split:\n :return:\n \"\"\"\n\n apn = self.build_anchor_positive_negatives(split)\n complementary_cat_ids = self.complementary_cat_ids[split]\n df_helper = self.df_helper[split]\n apn_possiblen2 = []\n\n def _complementary_cat_ids_(cat_id, depth=0):\n if depth > 0:\n _cat_id = \"/\".join(cat_id.split(\"/\")[:-depth])\n if len(_cat_id) < 1:\n return None\n else:\n _cat_id = cat_id\n\n cat_ids = complementary_cat_ids.get(_cat_id, None)\n if cat_ids:\n return cat_ids\n return _complementary_cat_ids_(cat_id, depth + 1)\n\n for idx, (anchor, positive, negative) in enumerate(apn):\n cat_id = anchor[\"categories_in_image\"]\n pair_id = anchor[\"pair_id\"]\n complementary_cat_ids_ = _complementary_cat_ids_(cat_id, 0)\n\n if complementary_cat_ids_ is None:\n continue\n\n if len(complementary_cat_ids_) < 1:\n raise Exception(\"#Todo 8964654\") # <- shouldn't occur\n\n possible_cat = complementary_cat_ids_.pop(0)\n complementary_cat_ids_.append(possible_cat)\n\n # if idx % 2 == 0:\n # possible_negatives2 = df_helper.user.by_items_in_img[possible_cat]\n # else:\n # possible_negatives2 = df_helper.shop.by_items_in_img[possible_cat]\n possible_negatives2 = df_helper.user.by_items_in_img[possible_cat]\n\n if len(possible_negatives2) < 1:\n raise Exception(\"#Todo #213213\")\n\n _a_id = anchor[\"categories_in_image_idx\"]\n\n possible_negatives2 = filter(\n lambda d: d[\"categories_in_image_idx\"] != pair_id and d[\"categories_in_image_idx\"] != _a_id,\n possible_negatives2)\n possible_negatives2 = list(possible_negatives2)\n possible_negatives2 = random.sample(possible_negatives2,\n min(self.number_possibilites, len(possible_negatives2)))\n apn_possiblen2.append((anchor, positive, negative, possible_negatives2))\n\n #\n possible_negatives_2 = distinct(flatten([[y[\"image_id\"] for y in x[3]] for x in apn_possiblen2]))\n image_ids = list(filter(lambda id: id not in self.embedding_storage.keys(), possible_negatives_2))\n image_paths = list(map(lambda x: self.full_image_path(split, x), image_ids))\n\n negative_2_ds = tf.data.Dataset.from_tensor_slices(image_paths) \\\n .map(preprocess_image((224, 224))) \\\n .batch(64, drop_remainder=False) \\\n .prefetch(tf.data.AUTOTUNE)\n\n embeddings = []\n for batch in tqdm(negative_2_ds, desc=\"N2-Embeddings\"):\n embeddings.extend(self.embedding(batch))\n\n self.embedding_storage.update({id: embedding for id, embedding in zip(image_ids, embeddings)})\n\n apnn = []\n for anchor, positive, negative, possible_negatives2 in apn_possiblen2:\n if not (type(negative) == dict) and len(negative) == 1:\n negative = negative[0]\n for pp in possible_negatives2:\n pp[\"embedding\"] = self.embedding_storage[pp[\"image_id\"]]\n\n assert type(possible_negatives2) == list\n\n if len(possible_negatives2) == 1:\n negative2 = possible_negatives2[0]\n elif len(possible_negatives2) > 1:\n negative2 = self.choose_possibility(negative, possible_negatives2, most_similar=True)\n else:\n negative2 = None\n if all([anchor, positive, negative, negative2]):\n apnn.append((anchor, positive, negative, negative2))\n\n assert len(apnn) <= len(apn) and (len(apnn) / len(apn)) > 0.9, f\"Couldn't build enough Pairs. \" \\\n f\"{100 * len(apnn) / len(apn):.0f}% Successful \"\n if validate:\n self.validate_apnn(apnn, split)\n return apnn\n\n def validate_apnn(self, apnn, split):\n assert all([all(d) for d in apnn]), \"At least one None in Data\"\n data_sources = {\"a\": {\"user\": 0, \"shop\": 0}, \"p\": {\"user\": 0, \"shop\": 0}, \"n1\": {\"user\": 0, \"shop\": 0},\n \"n2\": {\"user\": 0, \"shop\": 0}}\n for d in apnn:\n # checking cat_id\n a_cid, p_cid, n1_cid, n2_cid = list(map(lambda d: d[\"categories_in_image_idx\"], d))\n\n assert n2_cid not in [a_cid, p_cid, n1_cid], f\"Negative2 in APN! APN: {n2_cid} N2 {[a_cid, p_cid, n1_cid]}\"\n assert a_cid == p_cid == n1_cid, f\"A, P, N1 should have same Category. A: {a_cid} P: {p_cid} N1: {n1_cid}\"\n\n # checking pair_id\n a_pid, p_id, n1_pid, n2_pid = list(map(lambda d: d[\"pair_id\"], d))\n assert a_pid == p_id, f\"A and P must be of same Item! Pair-ID (A): {a_pid} - (P): {p_id}\"\n\n assert n1_pid not in [a_pid, n2_pid], \"A/P and N1 shouldn't be of same Item!\"\n assert n2_pid not in [a_pid], \"A/P and N2 shouldn't be of same Item!\"\n\n a_source, p_source, n1_sourced, n2_source = list(map(lambda d: d[\"source\"], d))\n data_sources[\"a\"][a_source] += 1\n data_sources[\"p\"][p_source] += 1\n data_sources[\"n1\"][n1_sourced] += 1\n data_sources[\"n2\"][n2_source] += 1\n\n logger.debug(f\"Validate APNN ({len(apnn)} Pairs) Consisting:\")\n z_fill_length = len(f\"{len(apnn)}\")\n info_txt = load_info_path(self.base_path, split)\n\n lines = []\n for item, _dict in data_sources.items():\n total = _dict['user'] + _dict['shop']\n user_ratio, shop_ratio = _dict['user'] / total, _dict['shop'] / total\n user_ratio, shop_ratio = 100 * user_ratio, 100 * shop_ratio\n user_ratio, shop_ratio = f\"{user_ratio: .0f}%\", f\"{shop_ratio: .0f}%\"\n user_ratio = (\" \" * (5 - len(user_ratio))) + user_ratio # <- padding\n shop_ratio = (\" \" * (5 - len(shop_ratio))) + shop_ratio\n ratio = f\"{user_ratio} User-Images. {shop_ratio} In-Shop Images.\"\n\n line = (item + \" \" f\"\\t{str(_dict['user']).zfill(z_fill_length)} User \" +\n f\"and {str(_dict['shop']).zfill(z_fill_length)} Shop Images.\"\n + \" \" + ratio)\n lines.append(line + \"\\n\")\n logger.debug(f\"Write Infos to: {info_txt}\")\n with open(info_txt, \"w+\") as f:\n f.writelines(lines)\n\n @staticmethod\n def pair_only_keep_image_id(apnn):\n only_keep_img_id = lambda i: str(i[\"image_id\"]).zfill(6)\n only_img_id_over_pairs = lambda p: list(map(only_keep_img_id, p))\n return list(map(only_img_id_over_pairs, apnn))\n\n @staticmethod\n def save_pairs_to_csv(base_path, split, apnn):\n apnn_ids = DeepFashion2PairsGenerator.pair_only_keep_image_id(apnn)\n if len(apnn_ids[0]) == 4:\n header = [\"a\", \"p\", \"n1\", \"n2\"]\n elif len(apnn_ids[0]) < 4:\n header = [\"a\", \"p\", \"n\"][:len(apnn_ids[0])]\n else:\n raise Exception(f\"Pairs consisting of {len(apnn_ids[0])} Items not Supported.\")\n quadruplets_csv_path = load_image_quadruplets_csv_path(base_path, split)\n df = pd.DataFrame(apnn_ids, columns=header)\n df.to_csv(quadruplets_csv_path, index=False)\n\n @staticmethod\n def load_pairs_from_csv(base_path, split, force=False, nrows=None):\n quadruplets_csv_path = load_image_quadruplets_csv_path(base_path, split)\n\n if force and quadruplets_csv_path.exists():\n quadruplets_csv_path.unlink()\n\n if not quadruplets_csv_path.exists():\n apnn = DeepFashion2PairsGenerator(base_path).build_anchor_positive_negative1_negative2(split)\n DeepFashion2PairsGenerator.save_pairs_to_csv(base_path, split, apnn)\n\n return pd.read_csv(quadruplets_csv_path, nrows=nrows)\n\n\nif __name__ == \"__main__\":\n base_path = f\"F:\\workspace\\datasets\\deep_fashion_256\"\n print(splits)\n # for split in splits: #\"train\"\n apnn = DeepFashion2PairsGenerator(base_path,\n embedding=None,\n split_suffix=\"_256\",\n nrows=32).build_anchor_positive_negative1_negative2(\"train\")\n # DeepFashionPairsGenerator.save_pairs_to_csv(base_path, split, apnn)\n # DeepFashionPairsGenerator(base_path).build_anchor_positive_negative1_negative2(splits[1])\n" ]
[ [ "tensorflow.data.Dataset.from_tensor_slices", "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
mathreader/DeepKoopman
[ "26197fb131ac9bed6b5bd6a4d1171fc109b4f170" ]
[ "helperfns.py" ]
[ "import datetime\nimport pickle\nimport time\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\ndef stack_data(data, num_shifts, len_time):\n \"\"\"Stack data from a 2D array into a 3D array.\n\n Arguments:\n data -- 2D data array to be reshaped\n num_shifts -- number of shifts (time steps) that losses will use (maximum is len_time - 1)\n len_time -- number of time steps in each trajectory in data\n\n Returns:\n data_tensor -- data reshaped into 3D array, shape: num_shifts + 1, num_traj * (len_time - num_shifts), n\n\n Side effects:\n None\n \"\"\"\n nd = data.ndim\n if nd > 1:\n n = data.shape[1]\n else:\n data = (np.asmatrix(data)).getT()\n n = 1\n num_traj = int(data.shape[0] / len_time)\n\n new_len_time = len_time - num_shifts\n\n data_tensor = np.zeros([num_shifts + 1, num_traj * new_len_time, n])\n\n for j in np.arange(num_shifts + 1):\n for count in np.arange(num_traj):\n data_tensor_range = np.arange(count * new_len_time, new_len_time + count * new_len_time)\n data_tensor[j, data_tensor_range, :] = data[count * len_time + j: count * len_time + j + new_len_time, :]\n\n return data_tensor\n\n\ndef choose_optimizer(params, regularized_loss, trainable_var):\n \"\"\"Choose which optimizer to use for the network training.\n\n Arguments:\n params -- dictionary of parameters for experiment\n regularized_loss -- loss, including regularization\n trainable_var -- list of trainable TensorFlow variables\n\n Returns:\n optimizer -- optimizer from TensorFlow Class optimizer\n\n Side effects:\n None\n\n Raises ValueError if params['opt_alg'] is not 'adam', 'adadelta', 'adagrad', 'adagradDA', 'ftrl', 'proximalGD',\n 'proximalAdagrad', or 'RMS'\n \"\"\"\n if params['opt_alg'] == 'adam':\n optimizer = tf.train.AdamOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)\n elif params['opt_alg'] == 'adadelta':\n if params['decay_rate'] > 0:\n optimizer = tf.train.AdadeltaOptimizer(params['learning_rate'], params['decay_rate']).minimize(\n regularized_loss,\n var_list=trainable_var)\n else:\n # defaults 0.001, 0.95\n optimizer = tf.train.AdadeltaOptimizer(params['learning_rate']).minimize(regularized_loss,\n var_list=trainable_var)\n elif params['opt_alg'] == 'adagrad':\n # also has initial_accumulator_value parameter\n optimizer = tf.train.AdagradOptimizer(params['learning_rate']).minimize(regularized_loss,\n var_list=trainable_var)\n elif params['opt_alg'] == 'adagradDA':\n # Be careful when using AdagradDA for deep networks as it will require careful initialization of the gradient\n # accumulators for it to train.\n optimizer = tf.train.AdagradDAOptimizer(params['learning_rate'], tf.get_global_step()).minimize(\n regularized_loss,\n var_list=trainable_var)\n elif params['opt_alg'] == 'ftrl':\n # lots of hyperparameters: learning_rate_power, initial_accumulator_value,\n # l1_regularization_strength, l2_regularization_strength\n optimizer = tf.train.FtrlOptimizer(params['learning_rate']).minimize(regularized_loss, var_list=trainable_var)\n elif params['opt_alg'] == 'proximalGD':\n # can have built-in reg.\n optimizer = tf.train.ProximalGradientDescentOptimizer(params['learning_rate']).minimize(regularized_loss,\n var_list=trainable_var)\n elif params['opt_alg'] == 'proximalAdagrad':\n # initial_accumulator_value, reg.\n optimizer = tf.train.ProximalAdagradOptimizer(params['learning_rate']).minimize(regularized_loss,\n var_list=trainable_var)\n elif params['opt_alg'] == 'RMS':\n # momentum, epsilon, centered (False/True)\n if params['decay_rate'] > 0:\n optimizer = tf.train.RMSPropOptimizer(params['learning_rate'], params['decay_rate']).minimize(\n regularized_loss,\n var_list=trainable_var)\n else:\n # default decay_rate 0.9\n optimizer = tf.train.RMSPropOptimizer(params['learning_rate']).minimize(regularized_loss,\n var_list=trainable_var)\n else:\n raise ValueError(\"chose invalid opt_alg %s in params dict\" % params['opt_alg'])\n return optimizer\n\n\ndef check_progress(start, best_error, params):\n \"\"\"Check on the progress of the network training and decide if it's time to stop.\n\n Arguments:\n start -- time that experiment started\n best_error -- best error so far in training\n params -- dictionary of parameters for experiment\n\n Returns:\n finished -- 0 if should continue training, 1 if should stop training\n save_now -- 0 if don't need to save results, 1 if should save results\n\n Side effects:\n May update params dict: stop_condition, been5min, been20min, been40min, been1hr, been2hr, been3hr, been4hr,\n beenHalf\n \"\"\"\n finished = 0\n save_now = 0\n\n current_time = time.time()\n\n if not params['been5min']:\n # only check 5 min progress once\n if current_time - start > 5 * 60:\n if best_error > params['min_5min']:\n print(\"too slowly improving in first five minutes: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first 5 min'\n finished = 1\n return finished, save_now\n else:\n print(\"been 5 minutes, err = %.15f < %.15f\" % (best_error, params['min_5min']))\n params['been5min'] = best_error\n if not params['been20min']:\n # only check 20 min progress once\n if current_time - start > 20 * 60:\n if best_error > params['min_20min']:\n print(\"too slowly improving in first 20 minutes: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first 20 min'\n finished = 1\n return finished, save_now\n else:\n print(\"been 20 minutes, err = %.15f < %.15f\" % (best_error, params['min_20min']))\n params['been20min'] = best_error\n if not params['been40min']:\n # only check 40 min progress once\n if current_time - start > 40 * 60:\n if best_error > params['min_40min']:\n print(\"too slowly improving in first 40 minutes: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first 40 min'\n finished = 1\n return finished, save_now\n else:\n print(\"been 40 minutes, err = %.15f < %.15f\" % (best_error, params['min_40min']))\n params['been40min'] = best_error\n if not params['been1hr']:\n # only check 1 hr progress once\n if current_time - start > 60 * 60:\n if best_error > params['min_1hr']:\n print(\"too slowly improving in first hour: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first hour'\n finished = 1\n return finished, save_now\n else:\n print(\"been 1 hour, err = %.15f < %.15f\" % (best_error, params['min_1hr']))\n save_now = 1\n params['been1hr'] = best_error\n if not params['been2hr']:\n # only check 2 hr progress once\n if current_time - start > 2 * 60 * 60:\n if best_error > params['min_2hr']:\n print(\"too slowly improving in first two hours: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first two hours'\n finished = 1\n return finished, save_now\n else:\n print(\"been 2 hours, err = %.15f < %.15f\" % (best_error, params['min_2hr']))\n save_now = 1\n params['been2hr'] = best_error\n if not params['been3hr']:\n # only check 3 hr progress once\n if current_time - start > 3 * 60 * 60:\n if best_error > params['min_3hr']:\n print(\"too slowly improving in first three hours: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first three hours'\n finished = 1\n return finished, save_now\n else:\n print(\"been 3 hours, err = %.15f < %.15f\" % (best_error, params['min_3hr']))\n save_now = 1\n params['been3hr'] = best_error\n if not params['been4hr']:\n # only check 4 hr progress once\n if current_time - start > 4 * 60 * 60:\n if best_error > params['min_4hr']:\n print(\"too slowly improving in first four hours: err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving in first four hours'\n finished = 1\n return finished, save_now\n else:\n print(\"been 4 hours, err = %.15f < %.15f\" % (best_error, params['min_4hr']))\n save_now = 1\n params['been4hr'] = best_error\n\n if not params['beenHalf']:\n # only check halfway progress once\n if current_time - start > params['max_time'] / 2:\n if best_error > params['min_halfway']:\n print(\"too slowly improving 1/2 of way in: val err %.15f\" % best_error)\n params['stop_condition'] = 'too slowly improving halfway in'\n finished = 1\n return finished, save_now\n else:\n print(\"Halfway through time, err = %.15f < %.15f\" % (best_error, params['min_halfway']))\n params['beenHalf'] = best_error\n\n if current_time - start > params['max_time']:\n params['stop_condition'] = 'past max time'\n finished = 1\n return finished, save_now\n\n return finished, save_now\n\n\ndef save_files(sess, csv_path, train_val_error, params, weights, biases):\n \"\"\"Save error files, weights, biases, and parameters.\n\n Arguments:\n sess -- TensorFlow session\n csv_path -- string for path to save error file as csv\n train_val_error -- table of training and validation errors\n params -- dictionary of parameters for experiment\n weights -- dictionary of weights for all networks\n biases -- dictionary of biases for all networks\n\n Returns:\n None (but side effect of saving files and updating params dict.)\n\n Side effects:\n Save train_val_error, each weight W, each bias b, and params dict to file.\n Update params dict: minTrain, minTest, minRegTrain, minRegTest\n \"\"\"\n np.savetxt(csv_path, train_val_error, delimiter=',')\n\n for key, value in weights.items():\n np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')\n for key, value in biases.items():\n np.savetxt(csv_path.replace('error', key), np.asarray(sess.run(value)), delimiter=',')\n params['minTrain'] = np.min(train_val_error[:, 0])\n params['minTest'] = np.min(train_val_error[:, 1])\n params['minRegTrain'] = np.min(train_val_error[:, 2])\n params['minRegTest'] = np.min(train_val_error[:, 3])\n print(\"min train: %.12f, min val: %.12f, min reg. train: %.12f, min reg. val: %.12f\" % (\n params['minTrain'], params['minTest'], params['minRegTrain'], params['minRegTest']))\n save_params(params)\n\n\ndef save_params(params):\n \"\"\"Save parameter dictionary to file.\n\n Arguments:\n params -- dictionary of parameters for experiment\n\n Returns:\n None\n\n Side effects:\n Saves params dict to pkl file\n \"\"\"\n with open(params['model_path'].replace('ckpt', 'pkl'), 'wb') as f:\n pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)\n\n\ndef set_defaults(params):\n \"\"\"Set defaults and make some checks in parameters dictionary.\n\n Arguments:\n params -- dictionary of parameters for experiment\n\n Returns:\n None (but side effect of updating params dict)\n\n Side effects:\n May update params dict\n\n Raises KeyError if params is missing data_name, len_time, data_train_len, delta_t, widths, hidden_widths_omega,\n num_evals, num_real, or num_complex_pairs\n Raises ValueError if num_evals != 2 * num_complex_pairs + num_real\n \"\"\"\n # defaults related to dataset\n if 'data_name' not in params:\n raise KeyError(\"Error: must give data_name as input to main\")\n if 'len_time' not in params:\n raise KeyError(\"Error, must give len_time as input to main\")\n if 'data_train_len' not in params:\n raise KeyError(\"Error, must give data_train_len as input to main\")\n if 'delta_t' not in params:\n raise KeyError(\"Error, must give delta_t as input to main\")\n\n # defaults related to saving results\n if 'folder_name' not in params:\n print(\"setting default: using folder named 'results'\")\n params['folder_name'] = 'results'\n if 'exp_suffix' not in params:\n print(\"setting default name of experiment\")\n params['exp_suffix'] = '_' + datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S_%f\")\n if 'model_path' not in params:\n print(\"setting default path for model\")\n exp_name = params['data_name'] + params['exp_suffix']\n params['model_path'] = \"./%s/%s_model.ckpt\" % (params['folder_name'], exp_name)\n\n # defaults related to network architecture\n if 'widths' not in params:\n raise KeyError(\"Error, must give widths as input to main\")\n print(params['widths'])\n if 'hidden_widths_omega' not in params:\n raise KeyError(\"Error, must give hidden_widths for omega net\")\n params['widths_omega_complex'] = [1, ] + params['hidden_widths_omega'] + [2, ]\n params['widths_omega_real'] = [1, ] + params['hidden_widths_omega'] + [1, ]\n print(params['widths_omega_complex'])\n print(params['widths_omega_real'])\n\n if 'act_type' not in params:\n print(\"setting default: activation function is ReLU\")\n params['act_type'] = 'relu'\n\n if 'num_evals' not in params:\n raise KeyError(\"Error, must give number of evals: num_evals\")\n if 'num_real' not in params:\n raise KeyError(\"Error, must give number of real eigenvalues: num_real\")\n if 'num_complex_pairs' not in params:\n raise KeyError(\"Error, must give number of pairs of complex eigenvalues: num_complex_pairs\")\n if params['num_evals'] != (2 * params['num_complex_pairs'] + params['num_real']):\n raise ValueError(\"Error, num_evals must equal 2*num_compex_pairs + num_real\")\n\n params['d'] = len(params['widths']) # d must be calculated like this\n\n # defaults related to initialization of parameters\n if 'seed' not in params:\n random_seed = 1; #np.random.randint(2 ** 30)\n print(\"setting default: choosing random seed of %d and saving to params\" % random_seed)\n params['seed'] = random_seed\n if 'dist_weights' not in params:\n print(\"setting default: distribution for weights on main net is tn (truncated normal)\")\n params['dist_weights'] = 'tn'\n if 'dist_weights_omega' not in params:\n print(\"setting default: distribution for weights on auxiliary net is tn (truncated normal)\")\n params['dist_weights_omega'] = 'tn'\n if 'dist_biases' not in params:\n print(\"setting default: biases in main net will be init. to default number\")\n params['dist_biases'] = 0\n if 'dist_biases_omega' not in params:\n print(\"setting default: biases in auxiliary net will be init. to default number\")\n params['dist_biases_omega'] = 0\n\n if 'scale' not in params:\n print(\"setting default: scale for weights in main net is 0.1 (applies to tn distribution)\")\n params['scale'] = 0.1\n if 'scale_omega' not in params:\n print(\"setting default: scale for weights in omega net is 0.1 (applies to tn distribution)\")\n params['scale_omega'] = 0.1\n\n if isinstance(params['dist_weights'], str):\n params['dist_weights'] = [params['dist_weights']] * (len(params['widths']) - 1)\n if isinstance(params['dist_biases'], int):\n params['dist_biases'] = [params['dist_biases']] * (len(params['widths']) - 1)\n if isinstance(params['dist_weights_omega'], str):\n params['dist_weights_omega'] = [params['dist_weights_omega']] * (len(params['widths_omega_real']) - 1)\n if isinstance(params['dist_biases_omega'], int):\n params['dist_biases_omega'] = [params['dist_biases_omega']] * (len(params['widths_omega_real']) - 1)\n\n # defaults related to loss function\n if 'auto_first' not in params:\n params['auto_first'] = 0\n if 'relative_loss' not in params:\n print(\"setting default: loss is not relative\")\n params['relative_loss'] = 0\n\n if 'shifts' not in params:\n print(\"setting default: penalty on all shifts from 1 to num_shifts\")\n params['shifts'] = np.arange(params['num_shifts']) + 1\n if 'shifts_middle' not in params:\n print(\"setting default: penalty on all middle shifts from 1 to num_shifts_middle\")\n params['shifts_middle'] = np.arange(params['num_shifts_middle']) + 1\n params['num_shifts'] = len(params['shifts']) # must be calculated like this\n params['num_shifts_middle'] = len(params['shifts_middle']) # must be calculated like this\n\n if 'recon_lam' not in params:\n print(\"setting default: weight on reconstruction is 1.0\")\n params['recon_lam'] = 1.0\n if 'mid_shift_lam' not in params:\n print(\"setting default: weight on loss3 is 1.0\")\n params['mid_shift_lam'] = 1.0\n if 'L1_lam' not in params:\n print(\"setting default: L1_lam is .00001\")\n params['L1_lam'] = .00001\n if 'L2_lam' not in params:\n print(\"setting default: no L2 regularization\")\n params['L2_lam'] = 0.0\n if 'Linf_lam' not in params:\n print(\"setting default: no L_inf penalty\")\n params['Linf_lam'] = 0.0\n\n # defaults related to training\n if 'num_passes_per_file' not in params:\n print(\"setting default: 1000 passes per training file\")\n params['num_passes_per_file'] = 1000\n if 'num_steps_per_batch' not in params:\n print(\"setting default: 1 step per batch before moving to next training file\")\n params['num_steps_per_batch'] = 1\n if 'num_steps_per_file_pass' not in params:\n print(\"setting default: up to 1000000 steps per training file before moving to next one\")\n params['num_steps_per_file_pass'] = 1000000\n if 'learning_rate' not in params:\n print(\"setting default learning rate\")\n params['learning_rate'] = .003\n if 'opt_alg' not in params:\n print(\"setting default: use Adam optimizer\")\n params['opt_alg'] = 'adam'\n if 'decay_rate' not in params:\n print(\"setting default: decay_rate is 0 (applies to some optimizer algorithms)\")\n params['decay_rate'] = 0\n if 'batch_size' not in params:\n print(\"setting default: no batches (use whole training file at once)\")\n params['batch_size'] = 0\n\n # setting defaults related to keeping track of training time and progress\n if 'max_time' not in params:\n print(\"setting default: run up to 6 hours\")\n params['max_time'] = 6 * 60 * 60 # 6 hours\n if 'min_5min' not in params:\n params['min_5min'] = 10 ** (-2)\n print(\"setting default: must reach %f in 5 minutes\" % params['min_5min'])\n if 'min_20min' not in params:\n params['min_20min'] = 10 ** (-3)\n print(\"setting default: must reach %f in 20 minutes\" % params['min_20min'])\n if 'min_40min' not in params:\n params['min_40min'] = 10 ** (-4)\n print(\"setting default: must reach %f in 40 minutes\" % params['min_40min'])\n if 'min_1hr' not in params:\n params['min_1hr'] = 10 ** (-5)\n print(\"setting default: must reach %f in 1 hour\" % params['min_1hr'])\n if 'min_2hr' not in params:\n params['min_2hr'] = 10 ** (-5.25)\n print(\"setting default: must reach %f in 2 hours\" % params['min_2hr'])\n if 'min_3hr' not in params:\n params['min_3hr'] = 10 ** (-5.5)\n print(\"setting default: must reach %f in 3 hours\" % params['min_3hr'])\n if 'min_4hr' not in params:\n params['min_4hr'] = 10 ** (-5.75)\n print(\"setting default: must reach %f in 4 hours\" % params['min_4hr'])\n if 'min_halfway' not in params:\n params['min_halfway'] = 10 ** (-4)\n print(\"setting default: must reach %f in first half of time allotted\" % params['min_halfway'])\n\n # initializing trackers for how long the training has run\n params['been5min'] = 0\n params['been20min'] = 0\n params['been40min'] = 0\n params['been1hr'] = 0\n params['been2hr'] = 0\n params['been3hr'] = 0\n params['been4hr'] = 0\n params['beenHalf'] = 0\n\n\ndef num_shifts_in_stack(params):\n \"\"\"Calculate how many time points (shifts) will be used in loss functions.\n\n Arguments:\n params -- dictionary of parameters for experiment\n\n Returns:\n max_shifts_to_stack -- max number of shifts to use in loss functions\n\n Side effects:\n None\n \"\"\"\n max_shifts_to_stack = 1\n if params['num_shifts']:\n max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts']))\n if params['num_shifts_middle']:\n max_shifts_to_stack = max(max_shifts_to_stack, max(params['shifts_middle']))\n\n return max_shifts_to_stack\n" ]
[ [ "tensorflow.compat.v1.train.AdagradOptimizer", "tensorflow.compat.v1.train.RMSPropOptimizer", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.train.ProximalGradientDescentOptimizer", "numpy.min", "tensorflow.compat.v1.disable_v2_behavior", "numpy.arange", "tensorflow.compat.v1.train.ProximalAdagradOptimizer", "tensorflow.compat.v1.train.FtrlOptimizer", "tensorflow.compat.v1.get_global_step", "numpy.asmatrix", "numpy.savetxt", "tensorflow.compat.v1.train.AdadeltaOptimizer", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cpp-pm/boost
[ "38c6c8c07f2fcc42d573b10807fef27ec14930f8" ]
[ "libs/numeric/ublas/benchmarks/plot.py" ]
[ "#!/usr/bin/env python\n#\n# Copyright (c) 2018 Stefan Seefeld\n# All rights reserved.\n#\n# This file is part of Boost.uBLAS. It is made available under the\n# Boost Software License, Version 1.0.\n# (Consult LICENSE or http://www.boost.org/LICENSE_1_0.txt)\n\nimport argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass plot(object):\n\n def __init__(self, label, data):\n self.label = label\n self.data = data\n\n\ndef load_file(filename):\n\n lines = open(filename, 'r').readlines()\n label = lines[0][1:-1].strip()\n lines = [l.strip() for l in lines]\n lines = [l.split('#', 1)[0] for l in lines]\n lines = [l for l in lines if l]\n data = [l.split() for l in lines]\n return plot(label, list(zip(*data)))\n\n\ndef main(argv):\n\n parser = argparse.ArgumentParser(prog=argv[0], description='benchmark plotter')\n parser.add_argument('data', nargs='+', help='benchmark data to plot')\n parser.add_argument('--log', choices=['no', 'all', 'x', 'y'], help='use a logarithmic scale')\n args = parser.parse_args(argv[1:])\n runs = [load_file(d) for d in args.data]\n plt.title('Benchmark plot')\n plt.xlabel('size')\n plt.ylabel('time (s)')\n if args.log == 'all':\n plot = plt.loglog\n elif args.log == 'x':\n plot = plt.semilogx\n elif args.log == 'y':\n plot = plt.semilogy\n else:\n plot = plt.plot\n plots = [plot(r.data[0], r.data[1], label=r.label) for r in runs]\n plt.legend()\n plt.show()\n return True\n\n \nif __name__ == '__main__':\n\n import sys\n sys.exit(0 if main(sys.argv) else 1)\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cmcuervol/HydroBalbo
[ "0c70536305d12f6fb9fb8fe7ce7cdb08d88472af" ]
[ "04_QuantUncertain.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 11 18:00:30 2020\n\n@author: Andres\n\"\"\"\nimport os\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\nimport scipy.stats as st\nimport pylab as plt\n\nfrom Modules import Read\nfrom Modules.Utils import Listador, FindOutlier\nfrom Modules.FitStats import QuantilBestFit\nfrom Modules.ENSO import ONIdata, OuliersENSOjust\n\nfrom tqdm import tqdm\n\nONI = ONIdata()\nONI = ONI['Anomalie'].astype(float)\nENSO = ONI[np.where((ONI.values<=-0.5)|(ONI.values>=0.5))[0]]\n\n################################ INPUT #####################################\n\n# Est_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanData/PPT'))\nEst_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'CleanData/QDL'))\nPath_out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Ajustes'))\n\n\nEstaciones = Listador(Est_path,final='.csv')\n\nif Est_path.endswith('CleanSedimentos'):\n Path_out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'Sedimentos/Ajustes/'))\n Estaciones = Listador(Est_path, inicio='Trans',final='.csv')\n\n\nn_boots = int(1E2)\nsplit_fact = 0.8\nTr = np.array([2.33, 5, 10, 25, 50, 100, 200, 500, 1000])\nQuant = pd.DataFrame([], columns=Tr)\nUncer = pd.DataFrame([], columns=Tr)\n\nQ_LM = np.empty((len(Estaciones), len(Tr)), dtype=float)\nQ_MEL= np.empty((len(Estaciones), len(Tr)), dtype=float)\nu_LM = np.empty((len(Estaciones), len(Tr)), dtype=float)\nu_MEL= np.empty((len(Estaciones), len(Tr)), dtype=float)\ndist = np.empty(len(Estaciones), dtype='<U15')\n\npbar = tqdm(total=len(Estaciones), desc='Fittin station: ')\n\nfor i in range(len(Estaciones)):\n if Est_path.endswith('CleanSedimentos') == False:\n Meta = pd.read_csv(os.path.join(Est_path, Estaciones[i].split('.')[0]+'.meta'),index_col=0)\n Name = Meta.iloc[0].values[0]\n\n if Est_path.endswith('CleanNiveles'):\n Est = Name + 'NR'\n else:\n Est = Name+'Caudal' if Meta.iloc[-4].values[0]=='CAUDAL' else Name+'Nivel'\n\n serie = Read.EstacionCSV_pd(Estaciones[i], Est, path=Est_path)\n\n else:\n Est = Estaciones[i].split('_')[1].split('.csv')[0]\n serie = pd.read_csv(os.path.join(Est_path, Estaciones[i]), index_col=0)\n serie.index = pd.DatetimeIndex(serie.index)\n\n\n serie = OuliersENSOjust(serie, ENSO, lim_inf=0)\n\n serie = serie.groupby(lambda y : y.year).max()\n serie = serie[~np.isnan(serie.values)].values.ravel()\n\n if len(serie) == 0:\n continue\n try:\n Q_LM[i,:], Q_MEL[i,:], dist[i] = QuantilBestFit(serie, Tr)\n except:\n Q_LM [i,:] *= np.nan\n Q_MEL[i,:] *= np.nan\n dist[i] = 'fit_failure'\n\n unc_LM = np.empty((n_boots,len(Tr)),dtype=float)\n unc_MEL = np.empty((n_boots,len(Tr)),dtype=float)\n\n barra = tqdm(total=n_boots, desc=f'bootstraping {i}/{len(Estaciones)}: ')\n for b in range(n_boots):\n if split_fact*len(serie)<4:\n size = 4\n if len(serie)>10:\n size = int(0.5*len(serie))\n else:\n size = int(split_fact*len(serie))\n\n sample = np.random.choice(serie, size=size)\n try:\n unc_LM[b,:], unc_MEL[b,:], _ = QuantilBestFit(sample, Tr)\n except:\n unc_LM [b,:] *= np.nan\n unc_MEL[b,:] *= np.nan\n barra.update(1)\n barra.close()\n\n u_LM [i,:] = np.nanstd(unc_LM, ddof=1, axis=0)\n u_MEL[i,:] = np.nanstd(unc_MEL, ddof=1, axis=0)\n\n quant = pd.Series(Q_LM[i],name=Est+'_LM', index=Tr)\n Quant = Quant.append(quant)\n quant = pd.Series(Q_MEL[i],name=Est+'_MEL', index=Tr)\n Quant = Quant.append(quant)\n\n uncer = pd.Series(u_LM[i],name=Est+'_LM', index=Tr)\n Uncer = Uncer.append(uncer)\n uncer = pd.Series(u_MEL[i],name=Est+'_MEL', index=Tr)\n Uncer = Uncer.append(uncer)\n\n pbar.update(1)\n\npbar.close()\n\n\n\nif Est_path.endswith('PPT'):\n sufix = 'PPT'\nelif Est_path.endswith('QDL'):\n sufix = 'QDL'\nelse:\n sufix = ''\n\n\nQuant.to_csv(os.path.join(Path_out,f'CuantilesIncertidumbre_{sufix}.csv'))\nUncer.to_csv(os.path.join(Path_out,f'Incertidumbre_{sufix}.csv'))\n" ]
[ [ "pandas.Series", "numpy.random.choice", "numpy.isnan", "pandas.DatetimeIndex", "pandas.DataFrame", "numpy.nanstd", "numpy.array", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
infant-cognition-tampere/trial-srt-variability-analysis
[ "15cfe50f419df3fbac958a4d81da057154aaed40" ]
[ "tasks/lib/utils.py" ]
[ "\nimport gazelib\nimport numpy as np\nimport scipy as sp\n\ndef iter_sequence_files(input_files):\n for source_path in input_files:\n yield gazelib.containers.CommonV1(source_path)\n\ndef iter_target_periods(c):\n '''Parameters: c: CommonV1 object'''\n try:\n targets = c.iter_slices_by_tag('icl/experiment/reaction/period/target')\n except gazelib.containers.CommonV1.MissingTagException:\n # Skip empty and return empty generator\n # See http://stackoverflow.com/a/13243870/638546\n return\n\n for index, tgt in enumerate(targets):\n print('Target period #' + str(index).zfill(2))\n yield tgt.slice_first_microseconds(1000000)\n\ndef is_valid_saccade(srt, duration, mse):\n '''True if saccade is valid'''\n dur = duration\n return (100000 < srt and srt < 800000 and\n 10000 < dur and dur < 80000 and\n mse < 0.05)\n\ndef to_milliseconds(srt):\n return round(srt / 1000)\n\ndef get_srt_statistics(srts):\n # 1 / SRT is normally distributed\n n = len(srts)\n\n if n <= 0:\n return {\n 'n': n,\n 'mean': None,\n 'inv_mean': None,\n 'inv_std': None,\n 'inv_mean_ci95': [None, None],\n 'inv_mean_ci68': [None, None],\n 'mean_ci95': [None, None],\n 'mean_ci68': [None, None]\n }\n\n inv_srts = [1 / t for t in srts]\n inv_mean = np.mean(inv_srts)\n mean = 1 / inv_mean\n\n if n <= 1:\n return {\n 'n': n,\n 'mean': mean,\n 'inv_mean': inv_mean,\n 'inv_std': None,\n 'inv_mean_ci95': [None, None],\n 'inv_mean_ci68': [None, None],\n 'mean_ci95': [None, None],\n 'mean_ci68': [None, None]\n }\n\n inv_sigma = np.std(inv_srts)\n inv_interval_scale = inv_sigma / np.sqrt(n)\n inv_interval = sp.stats.norm.interval(0.95, loc=inv_mean,\n scale=inv_interval_scale)\n inv_interv68 = sp.stats.norm.interval(0.68, loc=inv_mean,\n scale=inv_interval_scale)\n inv_interval = list(inv_interval)\n inv_interv68 = list(inv_interv68)\n interval = list(reversed([1 / t for t in inv_interval]))\n interv68 = list(reversed([1 / t for t in inv_interv68]))\n\n return {\n 'n': len(srts),\n 'mean': mean,\n 'inv_mean': inv_mean,\n 'inv_std': inv_sigma,\n 'inv_mean_ci95': inv_interval,\n 'inv_mean_ci68': inv_interv68,\n 'mean_ci95': interval,\n 'mean_ci68': interv68\n }\n\ndef get_participant_id(sequences):\n '''\n Get first participant ID from a list of sequences\n '''\n first_seq = sequences[0]\n first_trial = first_seq[0]\n return first_trial['head_id']\n\ndef get_sequences_per_participant(sequences):\n '''\n Return list (A) of list (B), where list (B) contains all trial sequences\n of single participant. This helps us to split the data for participant-wise\n analysis.\n '''\n\n def get_participant_id(sequence):\n first_trial = sequence[0]\n return first_trial['head_id']\n\n parts = {}\n for sequence in sequences:\n # Skip empty sequences. There has been at least one.\n if len(sequence) == 0:\n continue\n\n i = get_participant_id(sequence)\n\n if i in parts:\n parts[i].append(sequence)\n else:\n # Add first sequence\n parts[i] = [sequence]\n\n # Convert dict to list\n return list(parts.values())\n" ]
[ [ "numpy.std", "scipy.stats.norm.interval", "numpy.mean", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aleaf/MFsetup
[ "85cc44eb767c595b19006e43587699be4771f818" ]
[ "mfsetup/tests/plot.py" ]
[ "import glob\nfrom pathlib import Path\n\nimport numpy as np\nimport rasterio\nfrom matplotlib import pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nfrom mfsetup import load_modelgrid\n\n\ndef make_lake_xsections(model, i_range, j_range,\n bathymetry_raster, datum, outpdf):\n\n grid = model.modelgrid\n top = model.dis.top.array\n botm = model.dis.botm.array\n idomain = model.dis.idomain.array\n\n i0, i1 = i_range\n j0, j1 = j_range\n\n with PdfPages(outpdf) as pdf:\n for i in range(i0, i1, 10):\n j_values = slice(j0, j1)\n j_edges = slice(j0, j1 + 1)\n x = grid.xcellcenters[i, j_edges]\n y = grid.ycellcenters[i, j_edges]\n\n with rasterio.open(bathymetry_raster) as src:\n bathy = np.squeeze(list(src.sample(zip(x, y))))\n bathy[(bathy == src.nodata) | (bathy == 0)] = np.nan\n bathy = datum - bathy\n\n x_edges = grid.xcellcenters[i, j_edges]\n z_edges = np.vstack([top[i, j_edges]] + [b for b in botm[:, i, j_edges]])\n\n plt.figure()\n plt.pcolormesh(x_edges, z_edges, idomain[:, i, j_values], cmap='Blues_r', shading='flat', edgecolors='k',\n lw=0.1)\n plt.plot(x_edges, bathy, color='r', label=bathymetry_raster)\n plt.title(f'Row {i}')\n plt.legend()\n pdf.savefig()\n plt.close()\n\n for j in range(j0, j1, 10):\n i_values = slice(i0, i1)\n i_edges = slice(i0, i1 + 1)\n x = grid.xcellcenters[i_edges, j]\n y = grid.ycellcenters[i_edges, j]\n\n with rasterio.open(bathymetry_raster) as src:\n bathy = np.squeeze(list(src.sample(zip(x, y))))\n bathy[(bathy == src.nodata) | (bathy == 0)] = np.nan\n bathy = datum - bathy\n\n x_edges = grid.ycellcenters[i_edges, j]\n z_edges = np.vstack([top[i_edges, j]] + [b for b in botm[:, i_edges, j]])\n\n plt.figure()\n plt.pcolormesh(x_edges, z_edges, idomain[:, i_values, j], cmap='Blues_r', shading='auto', edgecolors='k',\n lw=0.1)\n plt.plot(x_edges, bathy, color='r', label=bathymetry_raster)\n plt.title(f'Column {j}')\n plt.legend()\n pdf.savefig()\n plt.close()\n" ]
[ [ "matplotlib.backends.backend_pdf.PdfPages", "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.pcolormesh", "numpy.vstack", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidtvs/kaggle-hpaic
[ "546a98eb2b1591955ada731ab170084bad0e65bf" ]
[ "threshold_finder.py" ]
[ "import os\nfrom functools import partial\nimport torch\nimport torchvision.transforms as tf\nfrom argparse import ArgumentParser\nfrom core import evaluate\nimport data\nimport utils\n\n\ndef arguments():\n parser = ArgumentParser(\n description=(\n \"Human Protein Atlas Image Classification decision threshold search script\"\n )\n )\n parser.add_argument(\n \"--config\",\n \"-c\",\n type=str,\n default=\"config/example_kfold.json\",\n help=\"Path to the JSON configuration file. Default: config/example_kfold.json\",\n )\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n # Get script arguments and JSON configuration\n args = arguments()\n config = utils.load_json(args.config)\n\n # Configs that are used multiple times\n device = torch.device(config[\"device\"])\n print(\"Device:\", device)\n\n # Data transformations for validation\n image_size = (config[\"img_h\"], config[\"img_w\"])\n tf_val = tf.Compose([tf.Resize(image_size), tf.ToTensor()])\n print(\"Image size:\", image_size)\n print(\"Validation data transformation:\", tf_val)\n\n # Initialize the dataset; no need to set the transformation because it'll be\n # overwritten when creating the dataloaders\n print(\"Dataset configuration:\\n\", config[\"dataset\"])\n dataset = data.HPADatasetHDF5(**config[\"dataset\"])\n num_classes = len(dataset.label_to_name)\n print(\"No. classes:\", num_classes)\n print(\"Dataset size:\", len(dataset))\n\n # Initialize dataloaders\n dl_cfg = config[\"dataloader\"]\n print(\"Dataloader config:\\n\", dl_cfg)\n if dl_cfg[\"n_splits\"] > 1:\n # Split dataset into k-sets and get one dataloader for each set. Only the\n # validation sets are needed\n _, val_loaders = data.utils.kfold_loaders(\n dataset,\n dl_cfg[\"n_splits\"],\n dl_cfg[\"batch_size\"],\n tf_val=tf_val,\n num_workers=dl_cfg[\"workers\"],\n random_state=dl_cfg[\"random_state\"],\n )\n else:\n # Single dataset split into training and validation. Only the validation sets\n # are needed\n _, val_loader = data.utils.train_val_loaders(\n dataset,\n dl_cfg[\"val_size\"],\n dl_cfg[\"batch_size\"],\n tf_val=tf_val,\n num_workers=dl_cfg[\"workers\"],\n random_state=dl_cfg[\"random_state\"],\n )\n val_loaders = [val_loader]\n\n print(\"Validation dataloader(s):\", val_loaders)\n print(\"Validation dataloader(s) size:\", len(val_loaders[0].dataset))\n\n # Initialize the model\n net_cfg = config[\"model\"]\n print(\"Model config:\\n\", net_cfg)\n net = utils.get_model(net_cfg[\"name\"], num_classes, dropout_p=net_cfg[\"dropout_p\"])\n print(net)\n\n # Get list of metrics\n metrics = utils.get_metric_list(dataset)\n\n # Load the models from the specified checkpoint location\n checkpoint_dir = os.path.join(config[\"checkpoint_dir\"], config[\"name\"])\n print(\"Checkpoint directory:\", checkpoint_dir)\n knets = utils.load_kfold_models(net, checkpoint_dir)\n print(\"No. of models loaded from checkpoint:\", len(knets))\n\n # Search for the best thresholds for each fold\n print()\n print(\"-\" * 80)\n print(\"Searching for the best decision thresholds\")\n print(\"-\" * 80)\n results = {}\n th_search = utils.multi_find_threshold(\n knets, val_loaders, metrics[0], device=device\n )\n for idx, (single_th, class_th) in enumerate(th_search):\n print()\n print(\"-\" * 80)\n print(\"Fold {}/{}\".format(idx + 1, len(knets)))\n print(\"-\" * 80)\n print()\n\n # Create a new dictionary entry for each fold where the results will be stored\n # in a nested dictionary\n key = \"fold_\" + str(idx + 1)\n results[key] = {\"default\": {}, \"lb\": {}, \"single_best\": {}, \"class_best\": {}}\n\n # Score the model using the standard decision threshold (0.5) used during\n # training\n print(\"Evaluating using a threshold of 0.5 for reference\")\n metrics = evaluate(\n knets[idx], val_loaders[idx], metrics, output_fn=utils.sigmoid_threshold\n )\n results[key][\"default\"][\"threshold\"] = 0.5\n results[key][\"default\"][\"metrics\"] = str(metrics)\n print(metrics)\n print()\n\n # Score the model using a decision threshold of 0.3 (for some reason this does\n # really well on LB)\n print(\"Evaluating using a threshold of 0.3\")\n output_fn = partial(utils.sigmoid_threshold, threshold=0.3)\n metrics = evaluate(knets[idx], val_loaders[idx], metrics, output_fn=output_fn)\n results[key][\"lb\"][\"threshold\"] = 0.3\n results[key][\"lb\"][\"metrics\"] = str(metrics)\n print(metrics)\n print()\n\n # Display the best overall decision threshold and evaluate the model again. This\n # will show the improvement over the default threshold\n print(\"Best overall threshold:\\n\", single_th)\n output_fn = partial(utils.sigmoid_threshold, threshold=single_th)\n metrics = evaluate(knets[idx], val_loaders[idx], metrics, output_fn=output_fn)\n results[key][\"single_best\"][\"threshold\"] = single_th\n results[key][\"single_best\"][\"metrics\"] = str(metrics)\n print(metrics)\n print()\n\n # Same as above but now with per-class thresholds\n print(\"Best thresholds per class:\\n\", class_th)\n output_fn = partial(utils.sigmoid_threshold, threshold=class_th)\n metrics = evaluate(knets[idx], val_loaders[idx], metrics, output_fn=output_fn)\n results[key][\"class_best\"][\"threshold\"] = class_th\n results[key][\"class_best\"][\"metrics\"] = str(metrics)\n print(metrics)\n print()\n\n # Write the results dictionary to a json file inside checkpoint_dir\n json_path = os.path.join(checkpoint_dir, \"threshold.json\")\n utils.save_json(results, json_path)\n" ]
[ [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dstndstn/desimeter
[ "704ab999e901db0f4db569b8446576b98dc656a0" ]
[ "py/desimeter/match.py" ]
[ "\"\"\"\nUtility functions to match two catalogs of 2D coordinates\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.spatial import cKDTree as KDTree\nfrom desimeter.log import get_logger\n\n\ndef compute_triangles_with_arbitrary_orientation(x,y) :\n \"\"\"\n triangle vertices are ordered using the side length\n so it is independent of a possible rotation\n \"\"\"\n \n tk=[] # indices \n tr=[] # max side length ratio\n tc=[] # cosine of first vertex (after sorting according to side length)\n ts=[] # orientation\n \n nn=len(x)\n for i in range(nn) :\n for j in range(i+1,nn) :\n for k in range(j+1,nn) :\n # x y of vertices\n ijk=np.array([i,j,k])\n tx=x[ijk]\n ty=y[ijk]\n\n # sorting according to length (square)\n # is not working well for our case because\n # a lot of triangles are isosceles\n tl2=np.array([(tx[1]-tx[0])**2+(ty[1]-ty[0])**2,(tx[2]-tx[1])**2+(ty[2]-ty[1])**2,(tx[0]-tx[2])**2+(ty[0]-ty[2])**2])\n pairs=np.array([[0,1],[1,2],[0,2]])\n ii=np.argsort(tl2)\n ordering = np.zeros(3).astype(int)\n ordering[0] = np.intersect1d(pairs[ii[0]],pairs[ii[2]]) # vertex connected to shortest and longest side \n ordering[1] = np.intersect1d(pairs[ii[0]],pairs[ii[1]]) # vertex connected to shortest and intermediate side \n ordering[2] = np.intersect1d(pairs[ii[1]],pairs[ii[2]]) # vertex connected to intermediate and longest side\n \n ijk=ijk[ordering]\n tx=tx[ordering]\n ty=ty[ordering]\n \n length2=np.array([(tx[1]-tx[0])**2+(ty[1]-ty[0])**2,(tx[2]-tx[1])**2+(ty[2]-ty[1])**2,(tx[0]-tx[2])**2+(ty[0]-ty[2])**2])\n r=np.sqrt(np.max(length2)/np.min(length2)) # ratio of longest to shortest side\n \n c=((tx[1]-tx[0])*(tx[2]-tx[0])+(ty[1]-ty[0])*(ty[2]-ty[0]))/np.sqrt( ((tx[1]-tx[0])**2+(ty[1]-ty[0])**2)*((tx[2]-tx[0])**2+(ty[2]-ty[0])**2)) # cos of angle of first vertex\n\n s=((tx[1]-tx[0])*(ty[2]-ty[0])-(tx[2]-tx[0])*(ty[1]-ty[0]))/np.sqrt( ((tx[1]-tx[0])**2+(ty[1]-ty[0])**2)*((tx[2]-tx[0])**2+(ty[2]-ty[0])**2)) # orientation whether vertices are traversed in a clockwise or counterclock-wise sense\n\n \n tk.append(ijk)\n tr.append(r)\n tc.append(c)\n ts.append(s)\n return np.array(tk),np.array(tr),np.array(tc),np.array(ts)\n\n\n\ndef compute_triangles_with_fixed_orientation(x,y) :\n \"\"\"\n Triangle vertices are ordered using one of the coordinates.\n This makes this algorithm sensitive to the orientation of the coordinate frame,\n but more robust if we do not expect large rotations.\n It would however fail for situations where we don't know the rotation\n \"\"\"\n \n nn=len(x)\n ntri=(nn*(nn-1)*(nn-2))//6\n tk=np.zeros((ntri,3),dtype=int) # indices \n txyz=np.zeros((ntri,3),dtype=float) # properties of the shape and orientation of triangles\n \n # I can do this ordering once, outside of the loop on triangles, to got faster\n ordering = np.argsort(x) \n x=x[ordering]\n y=y[ordering]\n tx=np.zeros(3)\n ty=np.zeros(3)\n\n triangle_index=0\n for i in range(nn) :\n tx[0]=x[i]\n ty[0]=y[i]\n for j in range(i+1,nn) :\n tx[1]=x[j]\n ty[1]=y[j]\n for k in range(j+1,nn) :\n # x y of vertices\n tx[2]=x[k]\n ty[2]=y[k]\n length2=np.array([(tx[1]-tx[0])**2+(ty[1]-ty[0])**2,(tx[2]-tx[1])**2+(ty[2]-ty[1])**2,(tx[0]-tx[2])**2+(ty[0]-ty[2])**2])\n r=np.sqrt(np.max(length2)/np.min(length2)) # ratio of longest to shortest side\n tk[triangle_index]=ordering[[i,j,k]]\n txyz[triangle_index,0]=np.sqrt(np.max(length2)/np.min(length2)) # ratio of longest to shortest side\n txyz[triangle_index,1]=((tx[1]-tx[0])*(tx[2]-tx[0])+(ty[1]-ty[0])*(ty[2]-ty[0]))/np.sqrt( ((tx[1]-tx[0])**2+(ty[1]-ty[0])**2)*((tx[2]-tx[0])**2+(ty[2]-ty[0])**2)) # cos of angle of first vertex\n txyz[triangle_index,2]=(tx[1]-tx[0])/np.sqrt((tx[1]-tx[0])**2+(ty[1]-ty[0])**2) # cos of angle of first side\n triangle_index += 1\n return tk,txyz\n\ndef match_same_system(x1,y1,x2,y2,remove_duplicates=True) :\n \"\"\"\n match two catalogs, assuming the coordinates are in the same coordinate system (no transfo)\n Args:\n x1 : float numpy array of coordinates along first axis of cartesian coordinate system\n y1 : float numpy array of coordinates along second axis in same system\n x2 : float numpy array of coordinates along first axis in same system\n y2 : float numpy array of coordinates along second axis in same system\n \n returns:\n indices_2 : integer numpy array. if ii is a index array for entries in the first catalog, \n indices_2[ii] is the index array of best matching entries in the second catalog.\n (one should compare x1[ii] with x2[indices_2[ii]])\n negative indices_2 indicate unmatched entries\n distances : distances between pairs. It can be used to discard bad matches. \n\n \"\"\"\n xy1=np.array([x1,y1]).T\n xy2=np.array([x2,y2]).T\n tree2 = KDTree(xy2)\n distances,indices_2 = tree2.query(xy1,k=1)\n\n if remove_duplicates :\n unique_indices_2 = np.unique(indices_2)\n n_duplicates = np.sum(indices_2>=0)-np.sum(unique_indices_2>=0)\n if n_duplicates > 0 :\n for i2 in unique_indices_2 :\n jj=np.where(indices_2==i2)[0]\n if jj.size>1 :\n kk=np.argsort(distances[jj])\n indices_2[jj[kk[1:]]] = -1\n\n return indices_2,distances\n\ndef match_arbitrary_translation_dilatation(x1,y1,x2,y2) :\n \"\"\"\n Match two catalogs in different coordinate systems, 1 and 2, related by a translation, a dilatation, and possibly a \"small\" rotation\n The orientation of triangles is used for the match so the rotation has to be small.\n Inspired from http://articles.adsabs.harvard.edu/pdf/1986AJ.....91.1244G\n \n Args:\n x1 : float numpy array of coordinates along first axis of cartesian coordinate system 1\n y1 : float numpy array of coordinates along second axis of cartesian coordinate system 1\n x2 : float numpy array of coordinates along first axis of cartesian coordinate system 2\n y2 : float numpy array of coordinates along second axis of cartesian coordinate system 2\n \n returns:\n indices_2 : integer numpy array. if ii is a index array for entries in the first catalog, \n indices_2[ii] is the index array of best matching entries in the second catalog.\n (one should compare x1[ii] with x2[indices_2[ii]])\n negative values for unmatched entries.\n distance : distance between pairs of triangles. It can be used to discard bad matches. \n\n \"\"\"\n\n log = get_logger()\n \n # compute all possible triangles in both data sets\n # txyz are properties of the shape and orientation of the triangles\n log.debug(\"compute triangles\")\n tk1,txyz1 = compute_triangles_with_fixed_orientation(x1,y1)\n tk2,txyz2 = compute_triangles_with_fixed_orientation(x2,y2)\n \n log.debug(\"match triangles\")\n # match with kdtree triangles with same shape and orientation\n tree2=KDTree(txyz2)\n triangle_distances,triangle_indices_2 = tree2.query(txyz1,k=1)\n \n # now that we have match of triangles , need to match back catalog entries\n ranked_pairs = np.argsort(triangle_distances)\n \n indices_2 = -1*np.ones(x1.size,dtype=int)\n distances = np.zeros(x1.size)\n \n all_matched = False\n log.debug(\"match catalogs using pairs of triangles\")\n for p in ranked_pairs :\n\n k1=tk1[p] # incides (in x1,y1) of vertices of this triangle (size=3)\n k2=tk2[triangle_indices_2[p]] # incides (in x2,y2) of vertices of other triangle\n \n # check unmatched or equal\n if np.any((indices_2[k1]>=0)&(indices_2[k1]!=k2)) :\n log.warning(\"skip {} <=> {}\".format(k1,k2))\n continue\n indices_2[k1]=k2\n distances[k1]=triangle_distances[p]\n all_matched = (np.sum(indices_2>=0)==x1.size)\n if all_matched :\n log.debug(\"all matched\")\n break\n\n # check duplicates\n for i2 in np.unique(indices_2[indices_2>=0]) :\n ii=(indices_2==i2)\n if np.sum(ii) > 1 :\n log.warning(\"{} duplicates for i2={}\".format(np.sum(ii),i2))\n indices_2[ii]=-1\n \n return indices_2 , distances\n" ]
[ [ "numpy.sum", "numpy.sqrt", "numpy.unique", "numpy.min", "numpy.ones", "numpy.intersect1d", "numpy.max", "numpy.where", "numpy.any", "numpy.argsort", "numpy.array", "numpy.zeros", "scipy.spatial.cKDTree" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SaimaS786/ga-learner-dsmp-repo
[ "905dbe0ed110016a951656c5dad0884519185950" ]
[ "Amazon-Alexa-Reviews/code.py" ]
[ "# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Load the dataset\ndf = pd.read_csv(path, sep = \"\\t\")\n#print(dtype(date))\n#datetime.date()\ndf['date'] = pd.to_datetime(df['date'])\n\ndf['length'] = df['verified_reviews'].apply(len)\n\ndf.head()\n\n# Converting date attribute from string to datetime.date datatype \n\n\n# calculate the total length of word\n\n\n\n\n# --------------\n## Rating vs feedback\n\n# set figure size\nplt.figure(figsize=(15,7))\n\n# generate countplot\nsns.countplot(x=\"rating\", hue=\"feedback\", data=df)\n\n# display plot\nplt.show()\n\n## Product rating vs feedback\n\n\n# set figure size\nplt.figure(figsize=(15,7))\n\n# generate barplot\nsns.barplot(x=\"rating\", y=\"variation\", hue=\"feedback\", data=df, ci = None)\n\n# display plot\nplt.show()\n\n\n# --------------\n# import packages\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n\n# declare empty list 'corpus'\ncorpus=[]\n\n# for loop to fill in corpus\nfor i in range(0,3150):\n # retain alphabets\n review = re.sub('[^a-zA-Z]', ' ', df['verified_reviews'][i] )\n # convert to lower case\n review=review.lower()\n # tokenize\n review=review.split()\n # initialize stemmer object\n ps=PorterStemmer()\n # perform stemming\n review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n # join elements of list\n review=' '.join(review)\n # add to 'corpus'\n corpus.append(review)\nprint(corpus)\n\n\n\n# --------------\n# import libraries\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\n# Instantiate count vectorizer\ncv = CountVectorizer(max_features = 1500)\n\n# Independent variable\nX = cv.fit_transform(corpus)\n\n# dependent variable\ny = df['feedback']\n\n# Counts\ncount = y.value_counts()\nprint(count)\n\n# Split the dataset\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2, random_state = 0)\n\n\n\n\n# --------------\n# import packages\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score\n\n\n# Instantiate calssifier\nrf = RandomForestClassifier(random_state=2)\n\n# fit model on training data\nrf.fit(X_train,y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test)\n\n# calculate the accuracy score\nscore = accuracy_score(y_test,y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test,y_pred)\n\n# display 'score' and 'precision'\nprint('Accuracy:',score)\nprint('Precision:',precision)\n\n\n# --------------\n# --------------\n# import packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# Load the dataset\ndf = pd.read_csv(path,sep =\"\\t\")\nprint(df['date'].dtype)\n\ndf['date']= pd.to_datetime(df['date']) \nprint(df['date'].dtype)\n\ndf['lenght'] = df['verified_reviews'].apply(lambda x: len(x))\n\n\n\n\n\n# --------------\n## Rating vs feedback\n\nsns.countplot(x = 'rating', hue = 'feedback' , data = df)\nplt.show()\nsns.barplot(x = 'rating',y = 'variation' ,hue = 'feedback' , data = df)\nplt.show()\n\n\n\n# --------------\n# import packages\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\n\n# declare empty list 'corpus'\ncorpus=[]\n\n# for loop to fill in corpus\nfor i in range(0,3150):\n review = re.sub('[^a-zA-Z]', ' ', df['verified_reviews'][i] )\n review=review.lower()\n review=review.split()\n ps=PorterStemmer()\n review=[ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review=' '.join(review)\n corpus.append(review)\n\n \n \n\n\n\n\n# --------------\n# import libraries\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\n\ncv = CountVectorizer(max_features=1500)\n\nX = cv.fit_transform(corpus).toarray()\n\ny = df['feedback']\n\ncount = y.value_counts()\n\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2,random_state = 0)\n\n\n# --------------\n# import packages\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score\n\nrf = RandomForestClassifier(random_state=2)\n\nrf.fit(X_train,y_train)\n\ny_pred = rf.predict(X_test)\n\nscore = accuracy_score(y_test,y_pred)\n\nprecision = precision_score(y_test,y_pred)\n\nprint(score)\nprint(precision)\n\n\n# --------------\n# import packages\nfrom imblearn.over_sampling import SMOTE\n\n# Instantiate smote\nsmote = SMOTE(random_state=9)\n\n# fit_sample onm training data\nX_train, y_train = smote.fit_sample(X_train, y_train)\n\n# fit modelk on training data\nrf.fit(X_train, y_train)\n\n# predict on test data\ny_pred = rf.predict(X_test)\n\n# calculate the accuracy score\nscore = accuracy_score(y_test, y_pred)\n\n# calculate the precision\nprecision = precision_score(y_test, y_pred)\n\n# display precision and score\nprint(score, precision)\n\n\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "sklearn.ensemble.RandomForestClassifier", "sklearn.metrics.accuracy_score", "sklearn.metrics.precision_score", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.CountVectorizer", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
andraspatka/dsp-labs
[ "5b8842968aec2539a5cc83b7952cb91f93550ff4" ]
[ "fir/fir_example.py" ]
[ "from numpy import cos, sin, pi, absolute, arange\nfrom scipy.signal import kaiserord, lfilter, firwin, freqz\nfrom pylab import figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show\n\n\n#------------------------------------------------\n# Create a signal for demonstration.\n#------------------------------------------------\n\nsample_rate = 100.0\nnsamples = 400\nt = arange(nsamples) / sample_rate\nx = cos(2*pi*0.5*t) + 0.2*sin(2*pi*2.5*t+0.1) + \\\n 0.2*sin(2*pi*15.3*t) + 0.1*sin(2*pi*16.7*t + 0.1) + \\\n 0.1*sin(2*pi*23.45*t+.8)\n\n\n#------------------------------------------------\n# Create a FIR filter and apply it to x.\n#------------------------------------------------\n\n# The Nyquist rate of the signal.\nnyq_rate = sample_rate / 2.0\n\n# The desired width of the transition from pass to stop,\n# relative to the Nyquist rate. We'll design the filter\n# with a 5 Hz transition width.\nwidth = 5.0/nyq_rate\n\n# The desired attenuation in the stop band, in dB.\nripple_db = 60.0\n\n# Compute the order and Kaiser parameter for the FIR filter.\nN, beta = kaiserord(ripple_db, width)\n\n# The cutoff frequency of the filter.\ncutoff_hz = 10.0\n\n# Use firwin with a Kaiser window to create a lowpass FIR filter.\ntaps = firwin(N, cutoff_hz/nyq_rate, window=('kaiser', beta))\n\n# Use lfilter to filter x with the FIR filter.\nfiltered_x = lfilter(taps, 1.0, x)\n\n#------------------------------------------------\n# Plot the FIR filter coefficients.\n#------------------------------------------------\n\nfigure(1)\nplot(taps, 'bo-', linewidth=2)\ntitle('Filter Coefficients (%d taps)' % N)\ngrid(True)\n\n#------------------------------------------------\n# Plot the magnitude response of the filter.\n#------------------------------------------------\n\nfigure(2)\nclf()\nw, h = freqz(taps, worN=8000)\nplot((w/pi)*nyq_rate, absolute(h), linewidth=2)\nxlabel('Frequency (Hz)')\nylabel('Gain')\ntitle('Frequency Response')\nylim(-0.05, 1.05)\ngrid(True)\n\n# Upper inset plot.\nax1 = axes([0.42, 0.6, .45, .25])\nplot((w/pi)*nyq_rate, absolute(h), linewidth=2)\nxlim(0,8.0)\nylim(0.9985, 1.001)\ngrid(True)\n\n# Lower inset plot\nax2 = axes([0.42, 0.25, .45, .25])\nplot((w/pi)*nyq_rate, absolute(h), linewidth=2)\nxlim(12.0, 20.0)\nylim(0.0, 0.0025)\ngrid(True)\n\n#------------------------------------------------\n# Plot the original and filtered signals.\n#------------------------------------------------\n\n# The phase delay of the filtered signal.\ndelay = 0.5 * (N-1) / sample_rate\n\nfigure(3)\n# Plot the original signal.\nplot(t, x)\n# Plot the filtered signal, shifted to compensate for the phase delay.\nplot(t-delay, filtered_x, 'r-')\n# Plot just the \"good\" part of the filtered signal. The first N-1\n# samples are \"corrupted\" by the initial conditions.\nplot(t[N-1:]-delay, filtered_x[N-1:], 'g', linewidth=4)\n\nxlabel('t')\ngrid(True)\n\nshow()" ]
[ [ "numpy.absolute", "scipy.signal.freqz", "numpy.arange", "numpy.cos", "numpy.sin", "scipy.signal.kaiserord", "scipy.signal.lfilter", "scipy.signal.firwin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
Tiffany-HONG/easytext
[ "9c717d11240d96fab98b0532084ebb5c093d55bd" ]
[ "easytext/data/pretrained_vocabulary.py" ]
[ "#!/usr/bin/env python 3\n# -*- coding: utf-8 -*-\n\n#\n# Copyright (c) 2020 PanXu, Inc. All Rights Reserved\n#\n\"\"\"\n预训练好的词汇表,带有预训练好的词向量\n\nAuthors: panxu([email protected])\nDate: 2020/06/23 11:49:00\n\"\"\"\nimport os\nfrom typing import Iterable, List, Dict, Union\nimport torch\nfrom .vocabulary import IVocabulary, Vocabulary\n\nfrom easytext.data.pretrained_word_embedding_loader import PretrainedWordEmbeddingLoader\n\n\nclass PretrainedVocabulary(IVocabulary):\n \"\"\"\n 带有预训练词向量的词汇表。\n \"\"\"\n\n EMBEDDING_MATRIX_FILE_NAME = \"embedding_matrix.pt\"\n\n def __init__(self,\n vocabulary: Vocabulary,\n pretrained_word_embedding_loader: PretrainedWordEmbeddingLoader,\n ):\n \"\"\"\n 初始化\n :param vocabulary: 词汇表\n :param pretrained_word_embedding_loader: word embedding 载入器\n \"\"\"\n\n self._vocabulary = vocabulary\n\n if pretrained_word_embedding_loader is not None:\n\n if pretrained_word_embedding_loader.embedding_dict is None:\n pretrained_word_embedding_loader.load()\n\n embedding_dict = pretrained_word_embedding_loader.embedding_dict\n\n embeddings = list()\n\n for index in range(self._vocabulary.size):\n token = self._vocabulary.token(index)\n\n if token in embedding_dict:\n embeddings.append(embedding_dict[token])\n else:\n empty_vec = [0.] * pretrained_word_embedding_loader.embedding_dim\n embeddings.append(empty_vec)\n\n self._embedding_matrix = torch.tensor(embeddings, dtype=torch.float)\n else:\n self._embedding_matrix = None\n\n @property\n def embedding_matrix(self) -> torch.Tensor:\n \"\"\"\n 词向量 matrix\n \"\"\"\n return self._embedding_matrix\n\n def save_to_file(self, directory: str) -> \"PretrainedVocabulary\":\n self._vocabulary.save_to_file(directory)\n\n # 将 embedding matrix 存起来\n embedding_matrix_file_path = os.path.join(directory, PretrainedVocabulary.EMBEDDING_MATRIX_FILE_NAME)\n torch.save(self._embedding_matrix, embedding_matrix_file_path)\n\n return self\n\n @classmethod\n def from_file(cls, directory: str) -> \"PretrainedVocabulary\":\n vocabulary = Vocabulary.from_file(directory)\n\n pretrianed_vocabulary = cls(pretrained_word_embedding_loader=None,\n vocabulary=vocabulary)\n embedding_matrix_file_path = os.path.join(directory, PretrainedVocabulary.EMBEDDING_MATRIX_FILE_NAME)\n pretrianed_vocabulary._embedding_matrix = torch.load(embedding_matrix_file_path)\n\n return pretrianed_vocabulary\n\n def __len__(self):\n return len(self._vocabulary)\n\n @property\n def unk(self):\n return self._vocabulary.unk\n\n @property\n def padding(self):\n return self._vocabulary.padding\n\n @property\n def other_special_tokens(self):\n return self._vocabulary.other_special_tokens\n\n @property\n def padding_index(self) -> Union[None, int]:\n \"\"\"\n :return: 获取 padding 的 index, 如果 padding 没有设置,那么返回 None; 否则,返回实际的index.\n \"\"\"\n return self._vocabulary.padding_index\n\n def index(self, token: str) -> int:\n \"\"\"\n 获取token的index\n :param token: 输入的token\n :return: token 的 index\n \"\"\"\n\n return self._vocabulary.index(token)\n\n def token(self, index: int) -> str:\n \"\"\"\n 获取 index 的 token\n :param index: 指定 index\n :return: 当前 index 的 token\n \"\"\"\n return self._vocabulary.token(index)\n\n @property\n def size(self):\n return self._vocabulary.size\n\n" ]
[ [ "torch.tensor", "torch.load", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
KIT-MBS/pyREX
[ "cf3036400850f8b155399cd9444225352a34db08" ]
[ "pyrexMD/tests/test_misc.py" ]
[ "# @Author: Arthur Voronin <arthur>\n# @Date: 10.05.2021\n# @Filename: test_misc.py\n# @Last modified by: arthur\n# @Last modified time: 28.07.2021\n\n\nimport pyrexMD.misc as misc\nimport MDAnalysis as mda\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy.testing import assert_allclose\nfrom unittest.mock import patch\nimport pathlib\nimport pytest\nimport os\n\n\n# find main directory of pyrexMD\nposixpath = pathlib.Path(\".\").rglob(\"*core.py\") # generator for matching paths\npathname = posixpath.send(None).as_posix() # get first path name\nmain_dir = os.path.relpath(os.path.realpath(pathname).rstrip(\"core.py\")) # main directory of pyrexMD\n\n# set up test paths\ncwd = os.getcwd()\nprint(f\"cwd: {cwd}\")\npre = f\"{main_dir}/tests\"\npre2 = f\"{main_dir}/tests/files/figs/\"\npre3 = f\"{main_dir}/examples/files/protein/\"\npre4 = f\"{main_dir}/examples/files/pickle/\"\n\n################################################################################\n################################################################################\n### important functions\n\n\ndef test_round_object():\n A = [0.1234567890, 1.9876543210]\n val = misc.round_object(A, prec=3)\n expected = [0.123, 1.988]\n assert assert_allclose(val, expected) == None\n\n B = np.array([0.1234567890, 1.9876543210])\n val = misc.round_object(B, prec=3)\n expected = np.array([0.123, 1.988])\n assert assert_allclose(val, expected) == None\n return\n\n\ndef test_get_substrings():\n val = misc.get_substrings(\"split/this_string.into:parts\")\n expected = ['split', 'this', 'string', 'into', 'parts']\n assert val == expected\n\n val = misc.get_substrings(\"split/this_string.into:parts\", reverse=True)\n expected = ['parts', 'into', 'string', 'this', 'split']\n assert val == expected\n return\n\n\ndef test_split_lists():\n A = list(range(10))\n val = misc.split_lists(A, 2)\n expected = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 2, 4, 6, 8], [1, 3, 5, 7, 9])\n assert val == expected\n\n A = list(range(10))\n val = misc.split_lists(A, 2, remap=True)\n expected = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4])\n assert val == expected\n\n A = list(range(10))\n val = misc.split_lists(A, 4)\n expected = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 4, 5, 6, 8, 9], [3, 7])\n assert val == expected\n\n A = list(range(10))\n val = misc.split_lists(A, 4, remap=True)\n expected = ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7], [0, 1])\n assert val == expected\n\n # coverage\n misc.split_lists(10, 2)\n misc.split_lists(range(10), 2)\n with pytest.raises(TypeError):\n misc.split_lists(\"wrong_dtype\", 2)\n return\n\n\ndef test_get_precision():\n assert misc.get_precision(5.00000) == 1\n assert misc.get_precision(\"5.12300\") == 3\n return\n\n\ndef test_get_base():\n assert misc.get_base(\"random_plot.png\") == \"random_plot\"\n assert misc.get_base(\"/bla/blubb/random_plot.png\") == \"random_plot\"\n return\n\n\ndef test_get_extension():\n assert misc.get_extension(\"random_plot.png\") == \".png\"\n assert misc.get_extension(\"/bla/blubb/random_plot.png\") == \".png\"\n return\n\n\ndef test_insert_str():\n str1 = \"str1 is this\"\n str2 = \"str2 bla\"\n val = misc.insert_str(str1, str2, sep=\" \", loc=\"after\")\n expected = \"str1 str2 blais this\"\n assert val == expected\n\n val = misc.insert_str(str1, str2, sep=\" \", loc=\"before\")\n expected = \"str1str2 bla is this\"\n assert val == expected\n\n # coverage\n with pytest.raises(misc.Error):\n misc.insert_str(str1, str2, sep=\" \", loc=\"\")\n return\n\n\ndef test_read_file():\n val = misc.read_file(f\"{pre}/files/header_file1.txt\")\n expected = [np.array([1., 2., 3., 4., 5.]), np.array([1., 2., 3., 4., 5.])]\n assert assert_allclose(val, expected) == None\n\n val = misc.read_file(f\"{pre}/files/header_file2.txt\")\n expected = [np.array([1., 2., 3., 4., 5.]), np.array([10., 20., 30., 40., 50.])]\n assert assert_allclose(val, expected) == None\n\n # coverage\n misc.read_file(f\"{pre}/files/header_file1.txt\", skiprows=None, dtype=list)\n misc.read_file(f\"{pre}/files/2hba/2hba.score\", usecols=(0, 1), dtype=[int, float])\n return\n\n\ndef test_read_DCA_ile():\n val = misc.read_DCA_file(f\"{pre}/files/2hba/2hba.score\", n_DCA=50)\n expected = np.load(f\"{pre}/files/READ_DCA_FILE.npy\", allow_pickle=True)\n assert assert_allclose(val[0], expected[0]) == None\n assert assert_allclose(val[1], expected[1]) == None\n return\n\n\ndef test_get_PDBid():\n ref = mda.Universe(f\"{pre}/files/1l2y/1l2y_ref.pdb\")\n val = misc.get_PDBid(ref)\n assert val == \"1l2y\"\n\n # coverage\n misc.get_PDBid(\"wrong_dtype\")\n misc.get_PDBid(\"no_pdbid_in_string\")\n return\n\n\ndef test_get_slice_indices():\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_slice_indices(A, [1, 2]) == (1, 3)\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_slice_indices(A, [3, 9, 8]) == (3, 6)\n A = [0, 1, 2, 3, 9, 8, 7]\n with pytest.raises(misc.Error):\n assert misc.get_slice_indices(A, [3, 8, 8]) # no slice possible\n\n # coverage\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_slice_indices([1, 2], A) == (1, 3)\n return\n\n\ndef test_get_cutoff_array():\n array = list(range(10, 20))\n cut_min, cut_max = 12, 16\n cutoff_array, cutoff_array_ndx = misc.get_cutoff_array(array, cut_min=cut_min, cut_max=cut_max)\n assert (cutoff_array == [12, 13, 14, 15, 16])\n assert (cutoff_array_ndx == [2, 3, 4, 5, 6])\n\n # coverage\n cutoff_array, cutoff_array_ndx = misc.get_cutoff_array(array, cut_max=cut_max)\n assert (cutoff_array == [10, 11, 12, 13, 14, 15, 16])\n assert (cutoff_array_ndx == [0, 1, 2, 3, 4, 5, 6])\n return\n\n\ndef test_get_subarray_start_ndx():\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_subarray_start_ndx(A, [1, 2]) == 1\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_subarray_start_ndx(A, [3, 9, 8]) == 3\n A = [0, 1, 2, 3, 9, 8, 7]\n assert misc.get_subarray_start_ndx(A, [999]) == None\n\n with pytest.raises(ValueError):\n A = [0, 1, 2, 3, 9, 8, 7]\n misc.get_subarray_start_ndx([1, 2], A)\n return\n\n\ndef test_get_subarray():\n A = [0, 1, 2, 3, 9, 8, 7]\n assert (misc.get_subarray(A, [0, 1, 2]) == [0, 1, 2]).all()\n A = [0, 1, 2, 3, 9, 8, 7]\n assert (misc.get_subarray(A, [5, 6]) == [8, 7]).all()\n A = [0, 1, 2, 3, 9, 8, 7]\n assert (misc.get_subarray(A, [0, 2, 4]) == [0, 2, 9]).all()\n A = [0, 1, 2, 3, 9, 8, 7]\n assert (misc.get_subarray(A, [3, 2, 1]) == [3, 2, 1]).all()\n return\n\n\ndef test_get_sorted_array():\n A = [1, 3, 2, 4] # coverage: dtype\n val = misc.get_sorted_array(A)\n assert np.all(val[0] == [1, 2, 3, 4])\n assert np.all(val[1] == [0, 2, 1, 3])\n return\n\n\ndef test_get_ranked_array():\n A = np.array([1, 3, 2, 4])\n val = misc.get_ranked_array(A, reverse=False, verbose=False)\n assert (val[0] == [4, 3, 2, 1]).all()\n assert (val[1] == [3, 1, 2, 0]).all()\n\n A = [1, 3, 2, 4] # coverage: dtype\n val = misc.get_ranked_array(A, reverse=True, verbose=True)\n assert np.all(val[0] == [1, 2, 3, 4])\n assert np.all(val[1] == [0, 2, 1, 3])\n return\n\n\ndef test_get_percentile():\n data = list(range(10))\n assert misc.get_percentile(data, p=50) == 4.5\n assert misc.get_percentile(data, p=80) == 7.2\n return\n\n\ndef test_get_quantile():\n data = list(range(10))\n assert misc.get_quantile(data, p=0.5) == 4.5\n assert misc.get_quantile(data, p=0.8) == 7.2\n return\n\n\ndef test_autodetect_header():\n assert misc.autodetect_header(f\"{pre3}/1l2y.pdb\") == 145\n assert misc.autodetect_header(f\"{pre}/files/header_file1.txt\") == 8\n assert misc.autodetect_header(f\"{pre}/files/header_file2.txt\") == 10\n return\n\n\ndef test_CONFIG():\n default = {\"start\": None,\n \"stop\": 100,\n \"step\": 1,\n \"color\": \"red\"}\n change = {\"color\": \"blue\",\n \"marker\": \".\"}\n target = {\"start\": None,\n \"stop\": 100,\n \"step\": 1,\n \"color\": \"blue\",\n \"marker\": \".\"}\n cfg1 = misc.CONFIG(default, **change)\n cfg2 = misc.CONFIG(target)\n assert cfg1.items() == cfg2.items()\n\n # coverage\n cfg2()\n temp = cfg2.deepcopy()\n temp = cfg2.values()\n temp = cfg2.update_config({})\n\n default = {\"colors\": [\"g\", \"r\"]}\n alias_dict = {\"color_positive\": \"teal\",\n \"color_negative\": \"orange\"}\n cfg3 = misc.CONFIG(default, **alias_dict)\n cfg3.update_by_alias(alias=\"color_positive\", key=\"colors\", key_ndx=0, **alias_dict)\n cfg3.update_by_alias(alias=\"color_negative\", key=\"colors\", key_ndx=1, **alias_dict)\n assert cfg3.colors[0] == \"teal\"\n assert cfg3.colors[1] == \"orange\"\n return\n\n\n################################################################################\n################################################################################\n### coverage of less important functions\n\n\ndef test_HiddenPrints_ALL():\n with misc.HiddenPrints_ALL():\n print(\"will be hidden\")\n return\n\n\ndef test_timeit():\n t = misc.timeit()\n t = misc.timeit(t)\n return\n\n\ndef test_joinpath():\n misc.joinpath(\".\", \".\", realpath=False, create_dir=False)\n misc.joinpath(\".\", \"./\", realpath=False, create_dir=False)\n return\n\n\ndef test_rm():\n misc.rm(\"./temp.txt\", pattern=\"./temp.txt\")\n return\n\n\ndef test_cprint():\n misc.cprint(\"text messsage\", cprint_color=\"red\")\n with pytest.raises(TypeError):\n misc.cprint(0)\n return\n\n\ndef test_percent():\n assert misc.percent(5, 0) == 0.0\n return\n\n\ndef test_round_to_base():\n assert misc.round_to_base(2, base=5) == 0\n assert misc.round_to_base(3, base=5) == 5\n return\n\n\ndef test_norm_array():\n misc.norm_array([0, 1, 2])\n misc.norm_array(np.array([0, 1, 2]))\n return\n\n\ndef test_print_table():\n # coverage\n misc.print_table([[0, 1], [0, 1]], dtype=float)\n for i in range(1, 12):\n DATA = [[[0, 1] for _ in range(50)] for _ in range(i)]\n if i <= 10:\n misc.print_table(DATA)\n else:\n with pytest.raises(misc.Error):\n misc.print_table(DATA)\n return\n\n\ndef test_save_table():\n xdata = [0, 1, 2, 3]\n with pytest.raises(TypeError):\n misc.save_table(data=[xdata, xdata], filename=\"\")\n with pytest.raises(IndexError):\n misc.save_table(data=[xdata, xdata[:2]], save_as=\"./temp\") # unequal length\n\n misc.save_table(data=[xdata, xdata], filename=\"./temp\", header=\"#header line\") # append .log\n misc.rm(\"./temp.log\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_set_pad(mock_show):\n fig, ax = misc.figure()\n misc.set_pad(fig)\n misc.set_pad([ax, ax])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_legend(mock_show):\n fig, ax = misc.figure()\n plt.plot([0, 1], [0, 1], color=\"red\")\n plt.plot([2, 3], [2, 3], color=\"blue\")\n misc.legend()\n misc.legend(labels=[\"0\", \"1\"], handlecolors=[\"red\", \"blue\"])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_savefig(mock_show):\n fig, ax = misc.figure()\n misc.savefig(filename=None)\n return\n\n\ndef test_autoapply_limits():\n fig, ax = misc.figure()\n misc.autoapply_limits(fig)\n plt.plot([0, 1, 2], [0, 1, 2])\n misc.autoapply_limits(ax)\n\n obj = misc.pickle_load(f\"{pre4}/RMSD_PLOT.pickle\")\n misc.autoapply_limits(obj)\n plt.close(\"all\")\n return\n\n\ndef test_hide_plot():\n fig, ax = misc.figure()\n misc.hide_plot(fig)\n fig, ax = misc.figure()\n misc.hide_plot(num=fig.number)\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_pickle_load(mock_show):\n obj = misc.pickle_load(f\"{pre4}/RMSD_HIST.pickle\")\n obj = misc.pickle_load(f\"{pre4}/RMSD_PLOT.pickle\")\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_pickle_dump(mock_show):\n fig, ax = misc.figure()\n misc.pickle_dump(fig, save_as=\"./temp\") # appends .pickle\n misc.pickle_dump(fig, save_as=\"./temp\") # overwrite\n misc.pickle_dump([0, 1, 2], save_as=\"./temp\") # 'pickle.dumped object' instead of 'pickle.dumped figure'\n misc.rm(\"./temp.pickle\")\n plt.close(\"all\")\n\n # coverage\n with pytest.raises(TypeError):\n misc.pickle_dump(fig)\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_pickle_plot(mock_show):\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"], import_settings=False, xscale=\"linear\", yscale=\"linear\") # coverage\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"], save_as=\"./temp.png\")\n\n # coverage\n with pytest.raises(TypeError):\n fig, ax = misc.pickle_plot()\n\n misc.rm(\"./temp.png\")\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_align_limits(mock_show):\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"])\n misc.align_limits(ax[0], ax[1], apply_on=\"xy\", new_lim=[])\n misc.align_limits(ax[0], ax[1], apply_on=\"xy\", new_lim=[0, 1])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_align_ticks(mock_show):\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"])\n misc.align_ticks(ax[0], ax[1], apply_on=\"xy\", new_ticks=[])\n misc.align_ticks(ax[0], ax[1], apply_on=\"xy\", new_ticks=[0, 1])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_align_ticklabels(mock_show):\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"])\n misc.align_ticklabels(ax[0], ax[1], apply_on=\"xy\", new_ticklabels=[])\n misc.align_ticklabels(ax[0], ax[1], apply_on=\"xy\", new_ticklabels=[0, 1])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_apply_shared_axes(mock_show):\n fig, ax = misc.pickle_plot([f\"{pre4}/RMSD_PLOT.pickle\", f\"{pre4}/RMSD_HIST.pickle\"])\n # coverage\n misc.apply_shared_axes(ax, grid=[2, 1])\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_convert_ticklabels(mock_show):\n fig, ax = misc.figure(grid=[2, 1])\n plt.sca(ax[0])\n plt.plot(list(range(100)), list(range(100)))\n plt.sca(ax[1])\n plt.plot(list(range(100)), list(range(100)))\n misc.convert_ticklabels(ax, multiplier=10, apply_on=\"xy\", prec=1)\n misc.convert_ticklabels(ax, multiplier=10, apply_on=\"xy\", prec=0)\n plt.close(\"all\")\n return\n\n\ndef test_number_base_factorization():\n x = misc.number_base_factorization(123)\n x()\n x = misc.number_base_factorization(0.123)\n x()\n return\n\n\ndef test_setup_ticks():\n majorticks, minorticks = misc.setup_ticks(vmin=0, vmax=10, major_base=5, minor_base=None)\n majorticks, minorticks = misc.setup_ticks(vmin=0, vmax=10, major_base=5, minor_base=1)\n return\n\n\ndef test_setup_logscale_ticks():\n majorticks, minorticks = misc.setup_logscale_ticks(vmax=100)\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_set_logscale_ticks(mock_show):\n fig, ax = misc.figure()\n plt.plot(list(range(100)), list(range(100)))\n misc.set_logscale_ticks(ax, apply_on=\"xy\", vmax=None, minorticks=True)\n misc.set_logscale_ticks(ax, apply_on=\"xy\", vmax=20, minorticks=False)\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_create_cmap(mock_show):\n seq = [\"lightblue\", 2/6, \"lightgreen\", 3/6, \"yellow\", 4/6, \"orange\", 5/6, \"red\"]\n cmap = misc.create_cmap(seq, vmin=0, vmax=12)\n\n # coverage\n fig, ax = misc.figure()\n cmap = misc.create_cmap(seq, ax=ax)\n plt.close(\"all\")\n return\n\n\n@patch(\"matplotlib.pyplot.show\")\ndef test_add_cbar_ax(mock_show):\n fig, ax = misc.figure()\n # coverage\n cbar_ax = misc.add_cbar_ax(ax, bounds=[0, 0, 1, 0.05])\n cbar_ax = misc.add_cbar_ax(ax, location=\"right\", orientation=\"horizontal\")\n cbar_ax = misc.add_cbar_ax(ax, location=\"left\", orientation=\"horizontal\")\n cbar_ax = misc.add_cbar_ax(ax, location=\"top\", orientation=\"vertical\")\n cbar_ax = misc.add_cbar_ax(ax, location=\"bottom\", orientation=\"vertical\")\n plt.close(\"all\")\n return\n" ]
[ [ "matplotlib.pyplot.sca", "numpy.all", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "numpy.testing.assert_allclose", "numpy.load", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mylestunglee/aes
[ "7aa110c54e631fb8eeb94eedfcb2e63280eddcc7" ]
[ "scripts/plot_clear_profiles.py" ]
[ "import numpy as np\nimport csv\nimport matplotlib.pyplot as plt\n\nX = np.arange(1001)\n\ndef empty_lookup():\n\treturn [[] for _ in X]\n\ncpu = {\n\t'full': empty_lookup(),\n\t'partial': empty_lookup(),\n\t'naive': empty_lookup(),\n}\n\nmemory = {\n\t'full': empty_lookup(),\n\t'partial': empty_lookup(),\n\t'naive': empty_lookup(),\n}\n\nwith open('log.csv', 'r') as csvfile:\n\tspamreader = csv.reader(csvfile, delimiter=' ')\n\tfor row in spamreader:\n\t\ttype = row[0]\n\n\t\tif type in cpu:\n\t\t\tn = int(row[1])\n\t\t\ttime = float(row[2])\n\t\t\tmem = int(row[3])\n\n\t\t\tcpu[type][n].append(time)\n\t\t\tmemory[type][n].append(mem)\n\nY = np.arange(801)\n\ndef aggregate(data):\n\treturn {\n\t\tkey: [np.mean(values) for n, values in enumerate(data[key]) if n <= 800]\n\t\tfor key in data\n\t}\n\ndef quad_fit(data):\n\treturn {\n\t\tkey: np.poly1d(np.polyfit(Y,\n\t\t\t[np.mean(values) for n, values in enumerate(data[key]) if n <= 800],\n\t\t\t\t3))\n\t\tfor key in data\n\t}\n\n\"\"\"\nfit_cpu = quad_fit(memory)\n#agg_mem = aggregate(memory)\n\nplt.rcParams.update({'font.size': 16})\npattern = {\n\t'full': '-',\n\t'partial': '-',\n\t'naive': ':'\n}\n\nfor key in fit_cpu:\n\tplt.plot(Y, fit_cpu[key](Y) / 1024, label=key, linewidth=3, ls=pattern[key])\n\nplt.legend()\nplt.ylabel('maximum memory allocated (MiB)')\nplt.xlabel('$n$')\nplt.show()\n\"\"\"\n" ]
[ [ "numpy.arange", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
harriliu/marketing_mix_modeling
[ "2a80d229d9a8652111664dc2c010720d87d07d6b" ]
[ "stepwise_mkt_param/model_inference.py" ]
[ "from datetime import datetime, timedelta\nimport matplotlib.pyplot as plt \nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport math\n\nclass driver_analysis:\n \n def __init__(self, beta):\n self.beta = beta*(10**10)\n\n def get_sat_lvl(self, data, ds, alpha, media_var):\n '''\n Returns a indexed response curve with different saturation level:\n 1. Breakthrough\n 2. Optimal\n 3. Saturation Begin\n 4. Full Saturation\n\n Saturation level is calculated by taking 1st, 2nd, 3rd integral of the curve function\n Note saturation level is default at weekly level\n '''\n data[ds] = pd.to_datetime(data[ds])\n data['week'] = data[ds].map(lambda x:x - timedelta(days=x.isoweekday() % 7))\n data = data[['week', media_var]].groupby(\"week\").sum().reset_index()\n\n df_curve= pd.DataFrame()\n\n index=((np.mean(data[media_var])/10)*100)/np.max(data[media_var])\n df_curve['Index']=np.arange(0,300,index)\n df_curve['var_volume']=df_curve['Index']*np.max(data[media_var])/100\n\n def s_curve_chart (data, column_name, alpha, beta):\n media_input_index = data['Index']\n beta1 = np.float(beta*(10**-10))\n column_name1 = str(column_name)+'_alpha_'+str(alpha).replace('.','')\n data[column_name1] = round(beta1**(alpha**media_input_index),8)\n return column_name1\n\n df_curve['var_curve'] = s_curve_chart(df_curve,'var_volume',alpha, self.beta)\n df_curve['max_var'] = np.max(data[media_var])\n df_curve['mean_var'] = np.mean(data[media_var])\n df_curve.drop('var_curve',axis = 1,inplace = True)\n df_curve.sort_values(by = 'var_volume',inplace = True)\n\n\n ########################################################################\n ##########Calculate optimal point 1st derivative of the curve###########\n ########################################################################\n\n def deri_1st(data,var_column,index_column):\n data['deri_1st']=alpha**(data[index_column])*data[var_column]*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))\n deri_1st(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index')\n self.opt_x=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume']\n self.opt_y=df_curve[df_curve['deri_1st']==df_curve['deri_1st'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]\n df_curve['opt_x'] = self.opt_x\n df_curve['opt_y'] = self.opt_y\n\n ############################################################\n #######Calculate breakthrough point 2nd derivative #########\n ############################################################\n def deri_2nd(data,var_column,index_column,frist_column):\n data['deri_2nd']=data[frist_column]*np.log(alpha)+\\\n alpha**(2*data[index_column])*data[var_column]*\\\n np.log(alpha)*np.log(alpha)*np.log(np.float(self.beta*(10**-10)))*np.log(np.float(self.beta*(10**-10))) \n\n deri_2nd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st')\n self.bt_x=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume']\n self.bt_y=df_curve[df_curve['deri_2nd']==df_curve['deri_2nd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]\n df_curve['bt_x']=self.bt_x\n df_curve['bt_y']=self.bt_y\n\n ##################################################################\n #########Calculate saturation begins point 3rd derivative#########\n ##################################################################\n def deri_3rd(data,var_column,index_column,frist_column):\n data['deri_3rd']=data[frist_column]*(alpha**(2*data[index_column])*np.log(np.float(self.beta*(10**-10))**2)+\\\n 3*alpha**data[index_column]*np.log(np.float(self.beta*(10**-10)))+1) \n\n deri_3rd(df_curve,'var_volume_alpha_'+str(alpha).replace('.',''),'Index','deri_1st') \n self.sb_x=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume']\n self.sb_y=df_curve[df_curve['deri_3rd']==df_curve['deri_3rd'].max()]['var_volume_alpha_'+str(alpha).replace('.','')]\n df_curve['sb_x']=self.sb_x\n df_curve['sb_y']=self.sb_y\n\n #################################################\n #########Calculate full saturation point#########\n #################################################\n\n self.fs_x=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume'][0:1]\n self.fs_y=df_curve[df_curve['var_volume_alpha_'+str(alpha).replace('.','')]>=0.992]['var_volume_alpha_'+str(alpha).replace('.','')][0:1]\n df_curve['fs_x']=self.fs_x\n df_curve['fs_y']=self.fs_y\n\n return df_curve\n \n \n def readable_number(self, n):\n \n mill_lst = ['',' Thousand',' Million',' Billion',' Trillion']\n\n n = float(n)\n millidx = max(0,min(len(mill_lst)-1, int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))\n\n return '{:.1f}{}'.format(n / 10**(3 * millidx), mill_lst[millidx])\n\n def plot_sat_lvl(self, df_curve, model_df, ds, var):\n\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(30, 10))\n plt.style.use('ggplot')\n\n #plot curve line\n lm = sns.lineplot(x='var_volume', y = [col for col in df_curve.columns if \"alpha\" in col][0], \n data = df_curve, color = '#37536d', ax = ax1)\n\n # formatting number into readable format\n y_ticks = lm.get_yticks()\n x_ticks = lm.get_xticks()\n lm.set_yticklabels(['{:,.0%}'.format(i) for i in y_ticks])\n lm.set_xticklabels([self.readable_number(i) for i in x_ticks])\n\n # plot saturation levels\n ax1.plot(df_curve['bt_x'], df_curve['bt_y'],'ro',label=\"Break Through\",marker='o', markersize=10,color='m')\n ax1.plot(df_curve['opt_x'], df_curve['opt_y'], 'ro',label=\"Optimal\",marker='o', markersize=10,color='g')\n ax1.plot(df_curve['sb_x'], df_curve['sb_y'], 'ro',label=\"Satuation Begins\",marker='o', markersize=10,color='r')\n ax1.plot(df_curve['fs_x'], df_curve['fs_y'], 'ro',label=\"Full Satuation\",marker='o', markersize=10,color='c')\n # # Set plot options and show plot\n ax1.set_xlabel('Variable Volumes',fontsize=20)\n ax1.set_ylabel('Response Index',fontsize=20)\n ax1.set_title(var +' Response Curve',fontsize=20)\n ax1.legend(loc='center right', fancybox=False, framealpha=0)\n\n # creating dataframe for plotting volume against saturation level plot\n df_volume = pd.DataFrame()\n df_volume['period'] = pd.to_datetime(pd.to_datetime(model_df[ds]).map(lambda x:x.strftime(\"%Y-%m-%d\")))\n df_volume['week'] = df_volume['period'].map(lambda x:x - timedelta(days=x.isoweekday() % 7))\n df_volume['week'] = pd.to_datetime(df_volume['week']).map(lambda x:x.strftime(\"%Y-%m-%d\"))\n df_volume['var_volume'] = model_df[var]\n df_volume = df_volume[['week', 'var_volume']].groupby(\"week\").sum().reset_index()\n max_x=df_volume['var_volume'].max()\n\n\n df_volume['Optimal']=int(df_curve['opt_x'].unique()[1])\n df_volume['Break Through']=int(df_curve['bt_x'].unique()[1])\n df_volume['Satuation Begins']=int(df_curve['sb_x'].unique()[1])\n\n try:\n df_volume['Full Satuation']=int(df_curve['fs_x'].unique()[1])\n except:\n print('out of range')\n fs_x=0\n pass\n\n df_volume['Max'] = max_x\n df_volume['var_name'] = var\n\n # plot volume against saturation level\n textstr = '\\n'.join((\n r'Breakthrough: ${}'.format(self.readable_number(int(df_volume['Break Through'].unique()[0])), ),\n r'Optimal: ${}'.format(self.readable_number(int(df_volume['Optimal'].unique()[0])), ),\n r'Saturation Begins: ${}'.format(self.readable_number(int(df_volume['Satuation Begins'].unique()[0])),),\n r'Full Saturation: ${}'.format(self.readable_number(int(df_volume['Full Satuation'].unique()[0])),),\n\n ))\n\n ax2 = sns.barplot(x=df_volume['week'], y = df_volume['var_volume'], color = '#37536d', ax = ax2)\n y_ticks2 = ax2.get_yticks()\n ax2.set_yticklabels([self.readable_number(i) for i in y_ticks2])\n\n ax2.plot('week','Break Through',data=df_volume, color='m', linewidth=5,linestyle='dashed')\n ax2.plot('week','Optimal', data=df_volume, color='g', linewidth=5,linestyle='dashed')\n ax2.plot('week','Satuation Begins', data=df_volume, color='r', linewidth=5,linestyle='dashed')\n ax2.plot('week','Full Satuation', data=df_volume, color='c', linewidth=5,linestyle='dashed')\n ax2.set_title(var +' Volume Against Weekly Saturation Levels',fontsize=20)\n ax2.set_xlabel(\"Week\",fontsize=20)\n ax2.set_xticks(df_volume['week'])\n ax2.set_xticklabels(df_volume['week'], rotation=40, ha='right')\n ax2.set_ylabel(\"Volume\",fontsize=20)\n ax2.set_yticks(y_ticks2)\n\n props = dict(boxstyle='round', alpha=0.5)\n ax2.text(0.6, 0.95, textstr, transform=ax2.transAxes, fontsize=14,\n verticalalignment='top', bbox=props)\n ax2.legend(loc='upper right', fancybox=True, framealpha=5, bbox_to_anchor=(1, 0.95))\n\n plt.tight_layout(pad=5)\n plt.show()\n" ]
[ [ "numpy.log", "pandas.to_datetime", "matplotlib.pyplot.tight_layout", "numpy.arange", "matplotlib.pyplot.subplots", "pandas.DataFrame", "numpy.max", "numpy.mean", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "numpy.float" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
fmahdisoltani/multimodal_madam
[ "ef3545d05b29adc61958caa832a17aa0dff1f2fe" ]
[ "examples/classification/models/mlp.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n__all__ = ['mlp']\n\n\nclass MLP(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n n_hid = 1000\n n_out = 10\n self.l1 = nn.Linear(28*28, n_hid)\n self.l2 = nn.Linear(n_hid, n_hid)\n self.l3 = nn.Linear(n_hid, n_out)\n\n def forward(self, x: torch.Tensor):\n x = x.view([-1, 28*28])\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x\n\n\ndef mlp(**kwargs):\n model = MLP(**kwargs)\n return model\n\n" ]
[ [ "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joinquanter/jqfactor_analyzer
[ "090900a1f6ececeaa270fbed505d8a52a06680d7" ]
[ "jqfactor_analyzer/when.py" ]
[ "# -*- coding: utf-8 -*-\n\n\nimport datetime\n\nimport pandas as pd\n\n\nDateTime = datetime.datetime\nDate = datetime.date\nTime = datetime.time\nTimeDelta = datetime.timedelta\n\ntoday = datetime.date.today\nnow = datetime.datetime.now\n\n\ndef date2str(date, format='%Y-%m-%d'):\n return pd.to_datetime(date).strftime(format)\n" ]
[ [ "pandas.to_datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Julienbeaulieu/kaggle-computer-vision-competition
[ "7bc6bcb8b85d81ff1544040c403e356c0a3c8060" ]
[ "src/modeling/solver/evaluation.py" ]
[ "import torch\nimport pickle\nimport numpy as np\nfrom torch import nn\nfrom collections import Counter\nfrom yacs.config import CfgNode\nfrom typing import List, Dict, Union\nfrom sklearn.metrics import classification_report\n#from .loss import WeightedFocalLoss, SoftMaxCE\nfrom .loss import build_loss\n\nclass MultiHeadsEvaluation(nn.Module):\n def __init__(self, solver_cfg: CfgNode):\n super(MultiHeadsEvaluation, self).__init__()\n loss_cfg = solver_cfg.LOSS\n weights_path = solver_cfg.LABELS_WEIGHTS_PATH\n do_mixup = solver_cfg.MIXUP_AUGMENT\n if weights_path != '':\n weights_data = pickle.load(open(weights_path, 'rb'))\n grapheme_weights = weights_data['grapheme']\n vowel_weights = weights_data['vowel']\n consonant_weights = weights_data['consonant']\n else:\n grapheme_weights = None\n vowel_weights = None\n consonant_weights = None\n\n self.grapheme_loss_fn = build_loss(loss_cfg, do_mixup=do_mixup, weights=grapheme_weights, eps=loss_cfg.EPS, reduction=loss_cfg.REDUCTION, num_classes=168)\n self.vowel_loss_fn = build_loss(loss_cfg, do_mixup=do_mixup, weights=vowel_weights, eps=loss_cfg.EPS, reduction=loss_cfg.REDUCTION, num_classes=11)\n self.consonant_loss_fn = build_loss(loss_cfg, do_mixup=do_mixup, weights=consonant_weights, eps=loss_cfg.EPS, reduction=loss_cfg.REDUCTION, num_classes=7)\n self.do_mixup = do_mixup\n\n self.grapheme_logits_cache = []\n self.vowel_logits_cache = []\n self.consonant_logits_cache = []\n self.labels_cache = []\n self.acc_cache = []\n self.loss_cache = []\n\n def forward(self, grapheme_logits: torch.Tensor, vowel_logits: torch.Tensor, consonant_logits: torch.Tensor,\n labels: Union[tuple, torch.Tensor], js_divergence: bool = False) -> Dict:\n\n if self.do_mixup:\n labels, grapheme_labels, vowel_labels, consonant_labels = mixup_labels_helper(labels)\n else:\n grapheme_labels, vowel_labels, consonant_labels = labels[:, 0], labels[:, 1], labels[:, 2]\n\n grapheme_loss, grapheme_acc = self.grapheme_loss_fn(grapheme_logits, grapheme_labels, js_divergence)\n vowel_loss, vowel_acc = self.vowel_loss_fn(vowel_logits, vowel_labels, js_divergence)\n consonant_loss, consonant_acc = self.consonant_loss_fn(consonant_logits, consonant_labels, js_divergence)\n\n loss = grapheme_loss + vowel_loss + consonant_loss\n acc = (grapheme_acc + vowel_acc + consonant_acc) / 3\n\n eval_result = {\n 'grapheme_loss': grapheme_loss,\n 'grapheme_acc': grapheme_acc,\n 'vowel_loss': vowel_loss,\n 'vowel_acc': vowel_acc,\n 'consonant_loss': consonant_loss,\n 'consonant_acc': consonant_acc,\n 'loss': loss,\n 'acc': acc\n }\n # dump data in cache\n self.grapheme_logits_cache.append(grapheme_logits.detach().cpu().numpy())\n self.vowel_logits_cache.append(vowel_logits.detach().cpu().numpy())\n self.consonant_logits_cache.append(consonant_logits.detach().cpu().numpy())\n self.labels_cache.append(labels.detach().cpu().numpy())\n self.loss_cache.append(loss.detach().item())\n self.acc_cache.append(acc.detach().item())\n return eval_result\n\n def clear_cache(self):\n self.grapheme_logits_cache = []\n self.vowel_logits_cache = []\n self.consonant_logits_cache = []\n self.labels_cache = []\n self.loss_cache = []\n self.acc_cache = []\n\n def evalulate_on_cache(self):\n grapheme_logits_all = np.vstack(self.grapheme_logits_cache)\n vowel_logits_all = np.vstack(self.vowel_logits_cache)\n consonant_logits_all = np.vstack(self.consonant_logits_cache)\n labels_all = np.vstack(self.labels_cache)\n\n grapheme_preds = np.argmax(grapheme_logits_all, axis=1)\n vowels_preds = np.argmax(vowel_logits_all, axis=1)\n consonant_preds = np.argmax(consonant_logits_all, axis=1)\n\n grapheme_clf_result = classification_report(labels_all[:, 0], grapheme_preds, output_dict=True)\n vowels_clf_result = classification_report(labels_all[:, 1], vowels_preds, output_dict=True)\n consonant_clf_result = classification_report(labels_all[:, 2], consonant_preds, output_dict=True)\n kaggle_score = (grapheme_clf_result['macro avg']['recall'] * 2 + vowels_clf_result['macro avg']['recall'] +\n consonant_clf_result['macro avg']['recall']) / 4\n\n preds_labels = []\n for idx, grapheme_pred in enumerate(grapheme_preds):\n vowel_pred = vowels_preds[idx]\n consonant_pred = consonant_preds[idx]\n labels = labels_all[idx]\n entry = {\n 'grapheme_pred': grapheme_pred,\n 'vowel_pred': vowel_pred,\n 'consonant_pred': consonant_pred,\n 'grapheme_label': labels[0],\n 'vowel_label': labels[1],\n 'consonant_label': labels[2]\n }\n preds_labels.append(entry)\n\n grapheme_clf_result = clf_result_helper(grapheme_clf_result, preds_labels, 'grapheme_pred', 'grapheme_label')\n vowels_clf_result = clf_result_helper(vowels_clf_result, preds_labels, 'vowel_pred', 'vowel_label')\n consonant_clf_result = clf_result_helper(consonant_clf_result, preds_labels, 'consonant_pred',\n 'consonant_label')\n\n acc = np.mean(self.acc_cache)\n loss = np.mean(self.loss_cache)\n result = {\n 'grapheme_clf_result': grapheme_clf_result,\n 'vowel_clf_result': vowels_clf_result,\n 'consonant_clf_result': consonant_clf_result,\n 'kaggle_score': kaggle_score,\n 'preds_labels': preds_labels,\n 'acc': acc,\n 'loss': loss\n }\n return result\n\n\ndef mixup_labels_helper(labels: tuple):\n grapheme_labels, shuffled_grapheme_labels, vowel_labels, shuffled_vowel_labels, \\\n consonant_labels, shuffled_consonant_labels, lam = labels\n labels = torch.stack([grapheme_labels, vowel_labels, consonant_labels]).transpose(0, 1)\n\n grapheme_labels = (grapheme_labels, shuffled_grapheme_labels, lam)\n vowel_labels = (vowel_labels, shuffled_vowel_labels, lam)\n consonant_labels = (consonant_labels, shuffled_consonant_labels, lam)\n return labels, grapheme_labels, vowel_labels, consonant_labels\n\n\ndef clf_result_helper(clf_result: Dict, preds_labels: List, pred_key: str, label_key: str):\n \"\"\"\n a helper function get per class result the highest error class, and highest error class occurences\n :param clf_result: classfier result dict from classificaiton_report\n :param preds_labels: list of preds and labels\n :param pred_key: one of [grapheme_pred, vowel_pred, consonant_pred]\n :param label_key: one of [grapheme_label, vowel_label, consonant_label]\n :return: list view of clf result with some added info\n \"\"\"\n for k in clf_result.keys():\n if k not in ['accuracy', 'macro avg', 'weighted avg']:\n cls = int(k)\n preds_counts = Counter([x[pred_key] for x in preds_labels if x[label_key] == cls])\n preds_counts = [[k, preds_counts[k]] for k in preds_counts]\n incorrect_preds_counts = [x for x in preds_counts if x[0] != cls]\n if len(incorrect_preds_counts) > 0:\n highest_error_cls, highest_error_cls_num = sorted(incorrect_preds_counts, key=lambda x: x[1])[-1]\n else:\n highest_error_cls, highest_error_cls_num = -1, 0\n\n clf_result[k]['class'] = cls\n clf_result[k]['error_cls'] = highest_error_cls\n if clf_result[k]['support'] > 0:\n clf_result[k]['error_cls_rate'] = highest_error_cls_num / clf_result[k]['support']\n else:\n clf_result[k]['error_cls_rate'] = 0\n clf_result = [clf_result[k] for k in clf_result if k not in ['accuracy', 'macro avg', 'weighted avg']]\n return clf_result\n\n\ndef build_evaluator(solver_cfg: CfgNode):\n if solver_cfg.MIXUP_AUGMENT:\n nomixup_cfg = solver_cfg.clone()\n nomixup_cfg.MIXUP_AUGMENT = False\n return MultiHeadsEvaluation(nomixup_cfg), MultiHeadsEvaluation(solver_cfg)\n else:\n return MultiHeadsEvaluation(solver_cfg), None\n" ]
[ [ "numpy.argmax", "numpy.mean", "torch.stack", "sklearn.metrics.classification_report", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EFerriss/FTIR
[ "c355a56f4c1c831208b603ecd3d9dd8cbbb99299" ]
[ "pynams/styles.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 10:43:49 2015\n\n@author: Ferriss\n\nContains my most commonly used plotting style dictionaries (e.g., blue dots)\nand some frequently used plotting setups, e.g., 3 subplots\n\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1.parasite_axes import SubplotHost\nimport numpy as np\n\nstyle_points = {'color' : 'b', 'marker' : 'o', 'markersize' : 6,\n 'fillstyle' : 'full', 'linestyle' : 'none',}\nstyle_lightgreen = {'color' : 'lightgreen', 'linewidth' : 4}\nstyle_blue = {'color' : 'blue', 'linestyle' : '-', 'marker': None}\nstyle_points0 = {'color' : 'black', 'marker' : 'D', 'markersize' : 6,\n 'fillstyle' : 'full', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial',\n 'markerfacecolor' : 'k'}\nstyle_points1 = {'color' : 'red', 'marker' : '^', 'markersize' : 6,\n 'fillstyle' : 'full', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial',\n 'markerfacecolor' : 'red'}\nstyle_points2 = {'color' : 'indigo', 'marker' : 'o', 'markersize' : 6,\n 'fillstyle' : 'none', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial',\n 'markerfacecolor' : 'w'}\nstyle_points3 = {'color' : 'blue', 'marker' : 's', 'markersize' : 4,\n 'fillstyle' : 'none', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial'}\nstyle_points4 = {'color' : 'green', 'marker' : 'd', 'markersize' : 5,\n 'fillstyle' : 'none', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial'}\nstyle_points5 = {'color' : 'darkgoldenrod', 'marker' : 'D', 'markersize' : 4,\n 'fillstyle' : 'full', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial',\n 'markerfacecolor' : 'y'}\nstyle_points6 = {'color' : 'orangered', 'marker' : 'o', 'markersize' : 5,\n 'fillstyle' : 'none', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial'}\nstyle_points7 = {'color' : 'violet', 'marker' : 'v', 'markersize' : 7,\n 'fillstyle' : 'full', 'linestyle' : 'none', \n 'linewidth' : 1, 'alpha' : 1, 'label' : 'initial'}\nstyle_baseline = {'color' : 'k', 'linewidth' : 1, 'linestyle' :'-'}\nstyle_spectrum = {'color' : 'b', 'linewidth' : 3}\nstyle_spectrum_red = {'color' : 'r'}\nstyle_fitpeak = {'color' : 'g', 'linewidth' : 1}\nstyle_summed = {'color' : 'orangered', 'linewidth' : 2, 'linestyle' : '--'}\nstyle_profile = {'markeredgecolor' : 'black', 'linestyle' : '', 'marker' : 'o', \n 'markersize' : 10, 'markerfacecolor' : 'grey', 'alpha' : 0.5}\nstyle_initial = {'color' : 'blue', 'label' : 'initial', 'linestyle' : '--'} \nstyle_1a = {'linestyle' : '--', 'color' : 'k', 'marker' : None, 'linewidth' : 1}\nstyle_1 = {'linestyle' : '-', 'color' : 'k', 'marker' : None, 'linewidth' : 1}\nstyle_2 = {'color':'red', 'linewidth': 2.5, 'linestyle' : '-.'}\nstyle_2a = {'color':'green', 'linewidth': 1.5, 'linestyle' : '--'}\nstyle_3 = {'color':'orange', 'linewidth': 2., 'linestyle' : '--'}\nstyle_3a = {'color':'blue', 'linewidth': 1., 'linestyle' : '--'}\nstyle_4 = {'color':'yellow', 'linewidth':2.5, 'linestyle' : '-'}\nstyle_4a = {'color':'brown', 'linewidth':1.5, 'linestyle' : '-.'}\nstyle_5 = {'color':'green', 'linewidth':2.5, 'linestyle' : '-.'}\nstyle_6 = {'color':'cyan', 'linewidth':2., 'linestyle' : '--'}\nstyle_7 = {'color':'steelblue', 'linewidth':3., 'linestyle' : '-.'}\nstyle_8 = {'color':'violet', 'linewidth':2., 'linestyle' : '-'}\nstyle_grey = {'color' : 'grey', 'linewidth':4, 'linestyle' : '-'}\n\n## different profile directions\nstyle_Dx = {'fillstyle' : 'left', 'color' : 'red', 'markerfacecolor' : 'red'}\nstyle_Dy = {'fillstyle' : 'bottom', 'color' : 'green', \n 'markerfacecolor' : 'green' }\nstyle_Dz = {'fillstyle' : 'right', 'color' : 'blue', \n 'markerfacecolor' : 'blue'}\nstyle_Du = {'fillstyle' : 'none', 'color' : 'k', \n 'markerfacecolor' : 'white'}\nstyle_Dx_line = {'linestyle' : '--', 'color' : 'red'}\nstyle_Dy_line = {'linestyle' : '-.', 'color' : 'green'}\nstyle_Dz_line = {'linestyle' : ':', 'color' : 'blue'}\nstyle_Du_line = {'linestyle' : '-', 'color' : 'black'}\n\n\ndef get_iorient(orient):\n \"\"\"Converts x, y, z, u and a, b, c, u to 0, 1, 2, 3. \n This is a helper function for bound methods in class diffusivitiy \n and for determining thickness from raypath for wholeblocks\"\"\"\n if orient == 'x' or orient == 'a':\n iorient = 0\n elif orient == 'y' or orient == 'b':\n iorient = 1\n elif orient == 'z' or orient == 'c':\n iorient = 2\n elif orient == 'u' or orient == None:\n iorient = 3\n else:\n iorient = orient\n return iorient\n\n \ndef ylim_picker(spectrum, wn_xlim_left=4000, wn_xlim_right=3000, pad_top=0.1, \n pad_bot=0., raw_data=False):\n \"\"\"\n Takes a Spectrum object and returns reasonable min and max values for \n y-axis of plots based on the absorbance values for the specified wavenumber\n range and padded top and bottom with pad variable\n \"\"\"\n try:\n if spectrum.thickness_microns is None:\n absorbance = spectrum.abs_raw\n else:\n spectrum.start_at_zero(wn_xlim_left=wn_xlim_left,\n wn_xlim_right=wn_xlim_right)\n absorbance = spectrum.abs_full_cm\n except AttributeError:\n absorbance = spectrum.abs_raw\n \n idx_lo = (np.abs(spectrum.wn_full-wn_xlim_right)).argmin()\n idx_hi = (np.abs(spectrum.wn_full-wn_xlim_left)).argmin()\n \n y = absorbance[idx_lo:idx_hi]\n\n bottom = min(y) \n top = max(y)\n ylow = bottom - pad_bot\n yhigh = top + pad_top\n\n return ylow, yhigh\n \n\ndef make_line_style(direction, style_marker):\n \"\"\"Take direction and marker style and return line style dictionary\n that reflects the direction (x, y, z, or u for unoriented) with the \n color of the base style\"\"\"\n if direction == 'x':\n d = style_Dx_line\n if direction == 'y':\n d = style_Dy_line\n if direction == 'z':\n d = style_Dz_line\n if direction == 'u':\n d = style_Du_line\n d.update({'linewidth' : 2})\n return d\n\n\ndef plot_spectrum_outline(size_inches=(6, 6), shrinker=0.15,\n figaxis=None, wn_xlim_left=4000., \n wn_xlim_right=3000., pad_top=0.1, \n pad_bot=0., raw_data=False):\n \"\"\"\n Makes a standard figure outline for plotting FTIR spectra.\n Returns the figure and axis handles.\n \"\"\"\n if figaxis is None:\n f, ax = plt.subplots(figsize=size_inches)\n else:\n ax = figaxis\n ax.set_xlabel('Wavenumber (cm$^{-1})$')\n ax.set_ylabel('Absorbance (cm$^{-1})$')\n ax.set_xlim(wn_xlim_left, wn_xlim_right)\n ax.grid() \n box = ax.get_position()\n ax.set_position([box.x0 + box.width*shrinker, \n box.y0 + box.height*shrinker, \n box.width*(1.0-shrinker), \n box.height*(1.0-shrinker)])\n\n plt.setp(ax.get_xticklabels(), rotation=45)\n \n if figaxis is None:\n return f, ax\n \n\ndef plot_area_profile_outline(centered=True, peakwn=None,\n set_size=(6.5, 4), ytop=1.2, \n wholeblock=False, heights_instead=False,\n show_water_ppm=True):\n \"\"\"\n Set up area profile outline and style defaults. \n Default is for 0 to be the middle of the profile (centered=True).\n \"\"\"\n fig = plt.figure(figsize=set_size)\n ax = SubplotHost(fig, 1,1,1)\n fig.add_subplot(ax)\n\n ax_ppm = ax.twinx()\n ax_ppm.axis[\"top\"].major_ticklabels.set_visible(False)\n \n if show_water_ppm is True:\n pass\n else:\n ax_ppm.axis[\"right\"].major_ticklabels.set_visible(False) \n \n ax.set_xlabel('Position ($\\mu$m)')\n \n # Set y-label\n if wholeblock is True:\n if heights_instead is False:\n ax.set_ylabel('Area/Area$_0$')\n else:\n ax.set_ylabel('Height/Height$_0$') \n else:\n if heights_instead is False:\n ax.set_ylabel('Area (cm$^{-2}$)')\n else:\n ax.set_ylabel('Height (cm$^{-1}$)')\n\n ax.set_ylim(0, ytop)\n\n ax.grid()\n return fig, ax, ax_ppm\n\n\ndef plot_3panels_outline(style=None, ytop=1.2, figsize=(6.5, 2.5),\n shrinker=0.1, heights_instead=False,\n wholeblock=True, unit='microns'):\n \"\"\"Outline setup for 3 subplots for 3D profiles\"\"\"\n if style is None:\n style = style_lightgreen\n\n fig, axis3 = plt.subplots(nrows=1, ncols=3)\n fig.set_size_inches(figsize)\n\n for k in range(3):\n axis3[k].set_ylim(0, ytop)\n box = axis3[k].get_position()\n plt.setp(axis3[k].xaxis.get_majorticklabels(), rotation=45)\n axis3[k].set_position([box.x0 + box.width*shrinker, \n box.y0 + box.height*shrinker, \n box.width*(1.0-shrinker), \n box.height*(1.0-shrinker)])\n \n if wholeblock is True:\n if heights_instead is False:\n axis3[0].set_ylabel('Area/Area$_0$')\n else:\n axis3[0].set_ylabel('Height/Height$_0$')\n \n else:\n if heights_instead is False:\n axis3[0].set_ylabel('Area (cm$^{-2}$)')\n else:\n axis3[0].set_ylabel('Height (cm$^{-1}$)')\n \n axis3[0].set_xlabel('|| x')\n if unit == 'microns':\n axis3[1].set_xlabel('position ($\\mu$m) || y')\n elif unit == 'mm':\n axis3[1].set_xlabel('position (mm) || y')\n else:\n print('unit must = microns or mm')\n axis3[2].set_xlabel('|| z')\n plt.setp(axis3[1].get_yticklabels(), visible=False)\n plt.setp(axis3[2].get_yticklabels(), visible=False)\n return fig, axis3\n \n\ndef plot_3panels(positions_microns, area_profiles, lengths=None,\n styles3=[None, None, None], ytop=1.2, figaxis3=None, \n show_line_at_1=True, init=1., \n centered=True, unit='microns',\n percent_error=3., xerror=50., yerror=None,\n heights_instead=False, wholeblock=True,\n use_errorbar=False, scale=1.):\n \"\"\"\n Make 3 subplots for 3D and 3DWB profiles. The position and area profiles\n are passed in lists of three lists for a, b, and c.\n \"\"\"\n if figaxis3 is None:\n fig, axis3 = plot_3panels_outline(ytop=ytop, wholeblock=wholeblock,\n heights_instead=heights_instead,\n unit=unit)\n else:\n axis3 = figaxis3\n\n if lengths is None:\n lengths = np.ones(3)\n for k in range(3):\n lengths[k] = max(positions_microns[k] - min(positions_microns[k]))\n\n for k in range(3): \n x = positions_microns[k]\n if unit == 'mm':\n x = np.array(x) / 1000.\n y = np.array(area_profiles[k])\n\n if len(x) != len(y):\n print('Problem in plot_3panels')\n print('len(x):', len(x))\n print('len(y):', len(y))\n\n a = lengths[k] / 2.\n pos = x \n \n current_length = axis3[k].get_xlim()[1]\n if centered is True:\n if current_length < a:\n axis3[k].set_xlim(-a, a)\n else:\n if current_length < lengths[k]:\n axis3[k].set_xlim(0., lengths[k]) \n\n if show_line_at_1 is True:\n axis3[k].plot([-a, lengths[k]], [init, init], '--k')\n \n if styles3[k] is None:\n styles3[k] = style_lightgreen\n \n if np.isnan(y).any():\n axis3[k].text(0, axis3[k].get_ylim()[1]/2., \n 'nan values!\\n\\nProbably the\\ninitial area was 0',\n horizontalalignment='center', backgroundcolor='w',\n verticalalignment='center')\n \n elif np.isinf(y).any():\n infstring = ''.join(('inf values!\\n\\nProbably the',\n '\\ninitial area was 0\\nand a peak grew'))\n axis3[k].text(0, axis3[k].get_ylim()[1]/2., infstring,\n horizontalalignment='center', backgroundcolor='w',\n verticalalignment='center')\n\n else:\n if use_errorbar is True:\n if yerror is None:\n yerrorplot = np.array(y*scale) * percent_error/100.\n else:\n yerrorplot = np.ones_like(pos) * yerror\n axis3[k].errorbar(pos, y*scale, \n xerr=xerror, yerr=yerrorplot, **styles3[k])\n else:\n axis3[k].plot(pos, y*scale, **styles3[k])\n\n if figaxis3 is None:\n return fig, axis3 \n\n " ]
[ [ "numpy.ones_like", "numpy.abs", "numpy.isnan", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.array", "numpy.isinf", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HTaeha/Deep-Learning-based-Coding-Guide-onAspect
[ "adaada4de8448fdfc2e134bc7a75abdfe639111a" ]
[ "Logging/Analysis/make_vocabulary.py" ]
[ "# Required dependencies\n# 1. NLTK\n# 2. Gensim for word2vec\n# 3. Keras with tensorflow/theano backend\nimport random\nimport sys\nimport json\nimport codecs\nimport warnings\nwarnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport keras\nimport numpy as np\nnp.random.seed(1337)\nimport json, re, nltk, string\nimport sklearn.metrics as metrics\nfrom nltk.corpus import wordnet\nfrom gensim.models import Word2Vec\nfrom keras.preprocessing import sequence\nfrom keras.models import Model, model_from_json\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Input, merge\nfrom keras.optimizers import RMSprop\nfrom keras.utils import np_utils\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import svm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics import classification_report, confusion_matrix\nfrom sklearn.metrics import roc_curve, auc\n\n#========================================================================================\n# Initializing Hyper parameter\n#========================================================================================\n# Word2vec parameters\nmin_word_frequency_word2vec = int(sys.argv[3])\n\ninput_file = sys.argv[1]\nfiletype = sys.argv[2]\nfilename = input_file + '-' + filetype\nmodel_filename = filename \n\nmodel_count = 1\n\n# Load the word2vec model and vocabulary\nwordvec_path = \"Wordvec_Model/end2/\" + model_filename + \"_\" + str(model_count) + \".model\"\nwordvec_model = Word2Vec.load(wordvec_path)\nvocabulary = wordvec_model.wv.vocab\n\nw2c = dict()\nfor item in vocabulary:\n w2c[item] = vocabulary[item].count\nw2cSorted = sorted(w2c.items(), key=lambda x:x[1], reverse=True)\n\nfor i in range(len(w2cSorted)):\n print(w2cSorted[i])\nsys.exit(1)\n\ndef word_split(curr_data):\n result = []\n for word in curr_data:\n w_len = len(word)\n idx = 0\n while idx < w_len:\n if ord(word[idx]) - ord('0') <= 9 and 0 <= ord(word[idx]) - ord('0'):\n word = word[:idx] + ' ' + word[idx] + ' ' + word[idx+1:]\n idx += 2\n w_len += 2\n continue\n if word[idx] == '_':\n word = word[:idx] + ' ' + word[idx+1:]\n continue\n idx += 1\n w_len = len(word)\n idx = 0\n while idx < w_len:\n if word[idx].isupper():\n if idx == 0:\n pass\n elif idx == w_len -1:\n pass\n elif word[idx-1].islower():\n word = word[:idx] + ' ' + word[idx:]\n idx += 1\n w_len += 1\n elif word[idx+1].isupper() or word[idx+1] == ' ':\n pass\n else:\n word = word[:idx] + ' ' + word[idx:]\n idx += 1\n w_len += 1\n idx += 1\n word_split = word.split()\n for data in word_split:\n result.append(data.lower())\n return result\n" ]
[ [ "matplotlib.use", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamster112233/ICS_IDS
[ "dac6abc3c8d6e840a21adedcb9e8dcfaa304b499" ]
[ "MLModules/ABD/B_PCAQDA.py" ]
[ "import numpy as np\nfrom keras.utils import np_utils\nimport pandas as pd\nimport sys\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA\nfrom sklearn.decomposition import PCA\nimport os\nfrom sklearn.externals import joblib\nfrom sklearn.metrics import f1_score\n\ntrainName = sys.argv[1]\ntestName = sys.argv[2]\n\n# Create an object called iris with the iris Data\ndftrain = pd.read_csv(filepath_or_buffer=trainName, header=None, sep=',')\ndftest = pd.read_csv(filepath_or_buffer=testName, header=None, sep=',')\n\ncols = ['Proto']\nfor i in range(1,dftrain.shape[1]):\n cols.append('Byte' + str(i))\n\ndftrain.columns=cols\ndftrain.dropna(how=\"all\", inplace=True)\ndftrain.tail()\n\ndftest.columns=cols\ndftest.dropna(how=\"all\", inplace=True)\ndftest.tail()\n\nXtrain = dftrain.ix[:,1:dftrain.shape[1]].values\nYtrain = dftrain.ix[:,0].values\nXtest = dftest.ix[:,1:dftrain.shape[1]].values\nYtest = dftest.ix[:,0].values\n\nencoder = LabelEncoder()\nencoder.fit(Ytrain)\nencYtrain = encoder.transform(Ytrain)\n\nencoder = LabelEncoder()\nencoder.fit(Ytest)\nencYtest = encoder.transform(Ytest)\n\ndirectory = \"models/ABD/QDA/\"\nif not os.path.exists(directory):\n os.makedirs(directory)\n\nlogfile = directory + \"log-0.csv\"\nwith open(logfile, \"w\") as file:\n file.write(\"PCAlevel,acc,val_acc,f1\\n\")\n\nfscores = []\naccs = []\nfor q in xrange(1,151):\n pca = PCA(n_components=q)\n Xtrain_pca = pca.fit_transform(Xtrain)\n Xtest_pca = pca.transform(Xtest)\n\n clf = QDA(priors=None, reg_param=0.0)\n clf.fit(Xtrain_pca, encYtrain)\n\n trainPred = clf.predict(Xtrain_pca)\n testPred = clf.predict(Xtest_pca)\n\n score = 0.0\n for i in xrange(0, len(trainPred)):\n if trainPred[i] == encYtrain[i]:\n score += 1\n trainAcc = float(score) / len(trainPred)\n\n score = 0.0\n for i in xrange(0, len(testPred)):\n if testPred[i] == encYtest[i]:\n score += 1\n testAcc = float(score) / len(testPred)\n\n f1 = f1_score(encYtest, testPred)\n accs.append(testAcc)\n fscores.append(f1)\n\n print(\"Train \" + str(trainAcc))\n print(\"Test \" + str(testAcc))\n print(\"F1 \" + str(f1))\n\n with open(logfile, \"a\") as file:\n file.write(str(q) + \",\" + str(trainAcc) + \",\" + str(testAcc) + \",\" + str(f1) + \"\\n\")\n\n if q == 2:\n joblib.dump(clf, 'QDA2.pkl')\n\nprint(\"Val Acc max\" + str(max(accs)))\nprint(\"FMAX \" + str(max(fscores)))\n\n# print(str(q) + \":\" + str((float(score)/len(classesPred)*100)) + \"%\")\n#\n# preds = classesPred\n# if(len(preds) > 0):\n# \tpreds = np.array(list(encoder.inverse_transform(preds)))\n#\n# df = pd.crosstab(dftest['Proto'], preds, rownames=['Actual Protocol'], colnames=['Predicted Protocol'])\n# df.to_csv('ConfusionMatrixLDA.csv')\n\n" ]
[ [ "sklearn.externals.joblib.dump", "pandas.read_csv", "sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis", "sklearn.metrics.f1_score", "sklearn.preprocessing.LabelEncoder", "sklearn.decomposition.PCA" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Mind-the-Pineapple/tpot-age
[ "2969bfa6dc5c652d5b4f00f59e9b0b23869f6bef" ]
[ "BayOptPy/benchmark/plot_tpot_boosting.py" ]
[ "import os\nimport pickle\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use('Agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ttest_ind, friedmanchisquare\n\n\nfrom BayOptPy.helperfunctions import (set_publication_style,\n plot_confusion_matrix_boosting,\n ttest_ind_corrected)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-model',\n dest='model',\n help='Define if a classification or regression problem',\n choices=['regression', 'classification', 'classification2']\n )\nparser.add_argument('-generations',\n dest='generations',\n help='Specify number of generations to use',\n type=int,\n required=True\n )\nparser.add_argument('-analysis',\n dest='analysis',\n help='Specify which type of analysis to use',\n choices=['vanilla_combi',\n 'uniform_dist',\n 'preprocessing',\n 'population',\n 'mutation'],\n required=True\n )\nargs = parser.parse_args()\n\ndef barplot_annotate_brackets(num1, num2, data, center, height, yerr=None, dh=.05, barh=.05, fs=None, maxasterix=None):\n \"\"\"\n Annotate barplot with p-values.\n\n :param num1: number of left bar to put bracket over\n :param num2: number of right bar to put bracket over\n :param data: string to write or number for generating asterixes\n :param center: centers of all bars (like plt.bar() input)\n :param height: heights of all bars (like plt.bar() input)\n :param yerr: yerrs of all bars (like plt.bar() input)\n :param dh: height offset over bar / bar + yerr in axes coordinates (0 to 1)\n :param barh: bar height in axes coordinates (0 to 1)\n :param fs: font size\n :param maxasterix: maximum number of asterixes to write (for very small p-values)\n \"\"\"\n\n if type(data) is str:\n text = data\n else:\n # * is p < 0.05\n # ** is p < 0.005\n # *** is p < 0.0005\n # etc.\n text = ''\n p = .05\n\n while data < p:\n text += '*'\n p /= 10.\n\n if maxasterix and len(text) == maxasterix:\n break\n\n if len(text) == 0:\n text = 'n. s.'\n\n lx, ly = center[num1], height[num1]\n rx, ry = center[num2], height[num2]\n\n if yerr:\n ly += yerr[num1]\n ry += yerr[num2]\n\n ax_y0, ax_y1 = plt.gca().get_ylim()\n dh *= (ax_y1 - ax_y0)\n barh *= (ax_y1 - ax_y0)\n\n y = max(ly, ry) + dh\n\n barx = [lx, lx, rx, rx]\n bary = [y, y+barh, y+barh, y]\n mid = ((lx+rx)/2, y+barh)\n\n plt.plot(barx, bary, c='black')\n\n kwargs = dict(ha='center', va='bottom')\n if fs is not None:\n kwargs['fontsize'] = fs\n\n plt.text(*mid, text, **kwargs)\n\n\n# Settings\n#----------------------------------------------------------------------------\nset_publication_style()\nclasses = np.array(['young', 'old', 'adult'], dtype='U10')\n\nif (args.model == 'regression'):\n if args.analysis == 'preprocessing':\n print('Pre-processing analysis')\n preprocessing_types = [\n 'vanilla',\n 'feat_selec',\n 'feat_combi',\n 'vanilla_combi']\n ind = np.arange(0, len(preprocessing_types))\n df = pd.DataFrame(columns=['mae_test', 'r_test', 'preprocessing',\n ])\n for preprocessing in preprocessing_types:\n save_path = '/code/BayOptPy/tpot_%s/Output/%s/age/%03d_generations' \\\n %(args.model, preprocessing, args.generations)\n with open(os.path.join(save_path, 'tpot_all_seeds.pckl'), 'rb') as handle:\n tpot_results = pickle.load(handle)\n tpot_results['preprocessing'] = preprocessing\n tpot_results['mean_flatten'] = np.ndarray.flatten(tpot_results['mae_test'][:10, :])\n tpot_results['mae_test'] = tpot_results['mae_test'][:10, :]\n # save information to dataframe\n df = df.append(tpot_results, ignore_index=True)\n\n # Calculate mean for every\n # Plot MAE\n plt.figure(figsize=(10,15))\n plt.bar(ind,\n [np.mean(df['mean_flatten'][0]),\n np.mean(df['mean_flatten'][1]),\n np.mean(df['mean_flatten'][2]),\n np.mean(df['mean_flatten'][3])\n ],\n yerr=[np.std(df['mean_flatten'][0]),\n np.std(df['mean_flatten'][1]),\n np.std(df['mean_flatten'][2]),\n np.mean(df['mean_flatten'][3])\n ],\n color=['b', 'r', 'g', 'orange']\n )\n plt.xticks(ind, (preprocessing_types))\n plt.ylim([4, 5])\n plt.yticks(np.arange(4, 5, .2))\n plt.ylabel('MAE')\n plt.savefig(os.path.join(save_path, 'MAE_preprocessinge.eps'))\n\n data = [df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2], df['mean_flatten'][3]]\n\n plt.figure()\n sns.swarmplot(data=data)\n plt.ylabel('MAE')\n plt.xticks(ind, (preprocessing_types))\n plt.savefig(os.path.join(save_path, 'MAE_preprocessing_box.eps'))\n\n # Print statistics\n f, p = friedmanchisquare(df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2])\n print('Statisitcs')\n print('F-value %.3f' %f)\n print('p-value: %.3f' %p)\n\n print('Try Bengio Test')\n t, p_t = ttest_ind_corrected(df['mae_test'][1], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][1], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n\n elif args.analysis == 'population':\n print('Population analysis')\n preprocessing_types = [\n '00010',\n '00100',\n '01000']\n ind = np.arange(0, len(preprocessing_types))\n df = pd.DataFrame(columns=['mae_test', 'r_test', 'preprocessing',\n ])\n for preprocessing in preprocessing_types:\n save_path = '/code/BayOptPy/tpot_%s/Output/%s/age/%s_population_size/%03d_generations' \\\n %(args.model, args.analysis, preprocessing, args.generations)\n with open(os.path.join(save_path, 'tpot_all_seeds.pckl'), 'rb') as handle:\n tpot_results = pickle.load(handle)\n tpot_results['preprocessing'] = preprocessing\n tpot_results['mean_flatten'] = np.ndarray.flatten(tpot_results['mae_test'])\n # save information to dataframe\n df = df.append(tpot_results, ignore_index=True)\n\n # Calculate mean for every\n # Plot MAE\n plt.figure(figsize=(10,15))\n plt.bar(ind,\n [np.mean(df['mean_flatten'][0]),\n np.mean(df['mean_flatten'][1]),\n np.mean(df['mean_flatten'][2])],\n yerr=[np.std(df['mean_flatten'][0]),\n np.std(df['mean_flatten'][1]),\n np.std(df['mean_flatten'][2]),\n ],\n color=['b', 'r', 'g']\n )\n plt.xticks(ind, (preprocessing_types))\n plt.ylim([4, 5])\n plt.yticks(np.arange(4, 5, .2))\n plt.ylabel('MAE')\n plt.savefig(os.path.join(save_path, 'MAE_preprocessinge.eps'))\n\n data = [df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2]]\n\n plt.figure()\n sns.swarmplot(data=data)\n plt.ylabel('MAE')\n plt.yticks(np.arange(4.3, 4.9, .1))\n plt.xticks(ind, (preprocessing_types))\n plt.savefig(os.path.join(save_path, 'MAE_preprocessing_box.eps'))\n\n # Print statistics\n f, p = friedmanchisquare(df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2])\n print('Statisitcs')\n print('F-value %.3f' %f)\n print('p-value: %.3f' %p)\n\n print('Try Bengio Test')\n t, p_t = ttest_ind_corrected(df['mae_test'][1], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][1], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n\n elif args.analysis == 'mutation':\n print('Mutation analysis')\n mutation_types = ['0.1_mut_0.9_cross',\n '0.5_mut_0.5_cross',\n '0.9_mut_0.1_cross']\n ind = np.arange(0, len(mutation_types))\n df = pd.DataFrame(columns=['mae_test', 'r_test', 'preprocessing',\n ])\n for mutation in mutation_types:\n save_path = '/code/BayOptPy/tpot_%s/Output/%s/age/%03d_generations/%s' \\\n %(args.model, args.analysis, args.generations, mutation)\n with open(os.path.join(save_path, 'tpot_all_seeds.pckl'), 'rb') as handle:\n tpot_results = pickle.load(handle)\n tpot_results['preprocessing'] = mutation\n tpot_results['mean_flatten'] = np.ndarray.flatten(tpot_results['mae_test'][:10, :])\n tpot_results['mae_test'] = tpot_results['mae_test'][:10, :]\n # save information to dataframe\n df = df.append(tpot_results, ignore_index=True)\n\n # Calculate mean for every\n # Plot MAE\n plt.figure(figsize=(10,15))\n plt.bar(ind,\n [np.mean(df['mean_flatten'][0]),\n np.mean(df['mean_flatten'][1]),\n np.mean(df['mean_flatten'][2])],\n yerr=[np.std(df['mean_flatten'][0]),\n np.std(df['mean_flatten'][1]),\n np.std(df['mean_flatten'][2]),\n ],\n color=['b', 'r', 'g']\n )\n plt.xticks(ind, (mutation_types))\n plt.ylim([4, 5])\n plt.yticks(np.arange(4, 5, .2))\n plt.ylabel('MAE')\n plt.savefig(os.path.join(save_path, 'MAE_preprocessinge.eps'))\n\n data = [df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2]]\n\n plt.figure()\n sns.swarmplot(data=data)\n plt.ylabel('MAE')\n plt.yticks(np.arange(4.3, 4.9, .1))\n plt.xticks(ind, (mutation_types))\n plt.savefig(os.path.join(save_path, 'MAE_preprocessing_box.eps'))\n\n # Print statistics\n f, p = friedmanchisquare(df['mean_flatten'][0], df['mean_flatten'][1],\n df['mean_flatten'][2])\n print('Friedman Statisitcs')\n print('F-value %.3f' %f)\n print('p-value: %.3f' %p)\n\n print('Try Bengio Test')\n t, p_t = ttest_ind_corrected(df['mae_test'][1], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][2], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n t, p_t = ttest_ind_corrected(df['mae_test'][0], df['mae_test'][1], k=10,\n r=10)\n print('T: %.3f and p: %.f' %(t, p_t))\n\n else:\n # Load the dat from the saved pickle\n save_path = '/code/BayOptPy/tpot_%s/Output/%s/age/%03d_generations' \\\n %(args.model,args.analysis, args.generations)\n with open(os.path.join(save_path, 'tpot_all_seeds.pckl'), 'rb') as handle:\n tpot_results = pickle.load(handle)\n\n with open(os.path.join(save_path, 'rvr_all_seeds.pckl'), 'rb') as handle:\n rvr_results = pickle.load(handle)\n\n # MAE - Validation plot\n #----------------------------------------------------------------------------\n # Do some statistics to see if the results from tpot is significantly differen from rvr\n print('Test dataset')\n print('-------------------------------------------------------------------')\n print('MAE analysis')\n ind = np.arange(2)\n t, prob = ttest_ind_corrected(tpot_results['mae_test'][:10],\n rvr_results['mae_test'][:10], k=10, r=10)\n\n # Test how it would be with the standat TPOT\n seed_tpot_flatten = np.ndarray.flatten(tpot_results['mae_test'])\n seed_rvr_flatten = np.ndarray.flatten(rvr_results['mae_test'])\n\n t_old, prob_old = ttest_ind(seed_tpot_flatten, seed_rvr_flatten)\n print('T old method')\n print('T-statistics: %.3f, p-value: %.10f' %(t_old, prob_old))\n\n print('Mean over the different seeds')\n print('Mean %.3f Std %.5f MAE Test TPOT' %(np.mean(tpot_results['mae_test']),\n np.std(tpot_results['mae_test'])))\n print('Mean %.3f Std %.5f MAE Test RVR' %(np.mean(rvr_results['mae_test']),\n np.std(rvr_results['mae_test'])))\n print('T-statistics: %.3f, p-value: %.10f' %(t, prob))\n\n plt.figure(figsize=(10,15))\n plt.bar(ind,\n [np.mean(tpot_results['mae_test']), np.mean(rvr_results['mae_test'])],\n yerr=[np.std(tpot_results['mae_test']),\n np.std(tpot_results['mae_test'])],\n color=['b', 'r']\n )\n barplot_annotate_brackets(0, 1, '**', ind,\n height=[np.mean(tpot_results['mae_test']),\n np.mean(rvr_results['mae_test'])])\n plt.xticks(ind, ('TPOT', 'RVR'))\n plt.ylim([0, 6])\n plt.yticks(np.arange(0, 6, .5))\n plt.ylabel('MAE')\n plt.savefig(os.path.join(save_path, 'MAE_bootstrap_test.eps'))\n plt.close()\n\n\n # Pearsons Correlation Analysis\n #----------------------------------------------------------------------------\n # Pearsons Correlation - test plot\n print('Pearsons Correlation: Test dataset')\n # t, prob = ttest_ind(tpot_results['r_test'], rvr_results['r_test'])\n t, prob = ttest_ind_corrected(tpot_results['r_test'][:10],\n rvr_results['r_test'][:10],\n k=10, r=10)\n print('T-statistics: %.3f, p-value: %.25f' %(t, prob))\n print('Mean %.3f Std %.5f Pearsons TPOT' %(np.mean(tpot_results['r_test']),\n np.std(tpot_results['r_test'])))\n print('Mean %.3f Std %.5f Pearsons RVR' %(np.mean(rvr_results['r_test']),\n np.std(rvr_results['r_test'])))\n plt.figure(figsize=(10,15))\n plt.bar(ind,\n [np.mean(tpot_results['r_test']),\n np.mean(rvr_results['r_test'])],\n yerr=[np.std(tpot_results['r_test']),\n np.std(tpot_results['r_test'])],\n color=['b', 'r']\n )\n plt.xticks(ind, ('TPOT', 'RVR'))\n plt.ylim([0, 1.1])\n plt.yticks(np.arange(0, 1.1, .1))\n barplot_annotate_brackets(0, 1, 'p<.001', ind,\n height=[np.mean(tpot_results['r_test']),\n np.mean(rvr_results['r_test'])])\n plt.ylabel('Pearson\\'s Correlation')\n plt.savefig(os.path.join(save_path, 'r_bootstrap_test.eps'))\n plt.close()\n\n\nelif args.model == 'classification':\n # Load the dat from the saved pickle\n save_path = '/code/BayOptPy/tpot_%s/Output/vanilla_combi/age/%03d_generations/' \\\n %(args.model, args.generations)\n\n with open(os.path.join(save_path, 'tpot_all_seeds.pckl'), 'rb') as handle:\n tpot_results = pickle.load(handle)\n\n with open(os.path.join(save_path, 'rvc_all_seeds.pckl'), 'rb') as handle:\n rvc_results = pickle.load(handle)\n\n # Do some statistics to see if the results from tpot is significantly differen from rvr\n print('--------------------------------------------------------')\n print('Confusion Matrix - Test dataset')\n print('--------------------------------------------------------')\n t, prob = ttest_ind(tpot_results['confusion_matrix_test'],\n rvc_results['confusion_matrix_test'], axis=0)\n print('T-statistics:')\n print(t)\n print('p-value: ')\n print(prob)\n\n\n plot_confusion_matrix_boosting(\n np.mean(tpot_results['confusion_matrix_test'], axis=0),\n np.std(tpot_results['confusion_matrix_test'], axis=0),\n classes=classes,\n title='TPOT_test')\n\n plt.savefig(os.path.join(save_path, 'tpot_test_boosting.eps'))\n\n plot_confusion_matrix_boosting(\n np.mean(rvc_results['confusion_matrix_test'], axis=0),\n np.std(rvc_results['confusion_matrix_test'], axis=0),\n classes=classes,\n title='RVC_test')\n plt.savefig(os.path.join(save_path, 'rvc_test_boosting.eps'))\n\n\n print('--------------------------------------------------------')\n print('Accuracy - Test dataset')\n print('--------------------------------------------------------')\n\n print('Mean Accuracy - tpot:')\n print(tpot_results['score_test'])\n print('Mean Accuracy - rvc:')\n print(rvc_results['score_test'])\n t, prob = ttest_ind(tpot_results['score_test'],\n rvc_results['score_test'], axis=0)\n print('TPOT - boostrap: %.3f +- %.3f' %(np.mean(tpot_results['score_test']),\n np.std(tpot_results['score_test'])))\n print('RVC - boostrap: %.3f +- %.3f' %(np.mean(rvc_results['score_test']),\n np.std(rvc_results['score_test'])))\n print('T-statistics:')\n print(t)\n print('p-value: ')\n print(prob)\n\n print('--------------------------------------------------------')\n print('Accuracy - Validation dataset')\n print('--------------------------------------------------------')\n print('Mean Accuracy - tpot: ')\n print(tpot_results['score_test'])\n print('Mean Accuracy - rvc:')\n print(rvc_results['score_test'])\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.use", "matplotlib.pyplot.ylim", "numpy.arange", "numpy.ndarray.flatten", "pandas.DataFrame", "matplotlib.pyplot.plot", "scipy.stats.friedmanchisquare", "matplotlib.pyplot.ylabel", "numpy.std", "numpy.mean", "matplotlib.pyplot.close", "matplotlib.pyplot.text", "numpy.array", "scipy.stats.ttest_ind", "matplotlib.pyplot.xticks", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "0.16", "1.8" ], "tensorflow": [] } ]
lantunes/cellpylib
[ "3868a79f02983e8eeb2ac51b1e2fc378acec5780" ]
[ "demos/wireworld_diodes_demo.py" ]
[ "import cellpylib as cpl\nimport numpy as np\nfrom matplotlib.colors import ListedColormap\n\n\ndef wireworld_rule(n, c, t):\n current_activity = n[1][1]\n if current_activity == 0: # empty\n return 0\n if current_activity == 1: # electron head\n return 2\n if current_activity == 2: # electron tail\n return 3\n if current_activity == 3: # conductor\n electron_head_count = np.count_nonzero(n == 1)\n return 1 if electron_head_count == 1 or electron_head_count == 2 else 3\n\n\ncellular_automata = np.array([[\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0],\n [2, 1, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3],\n [0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0],\n [2, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3],\n [0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n]])\n\ncellular_automata = cpl.evolve2d(cellular_automata, timesteps=15,\n apply_rule=wireworld_rule, neighbourhood=\"Moore\")\n\ncpl.plot2d_animate(cellular_automata, show_grid=True, show_margin=False, scale=0.3,\n colormap=ListedColormap([\"black\", \"blue\", \"red\", \"yellow\"]))" ]
[ [ "numpy.array", "matplotlib.colors.ListedColormap", "numpy.count_nonzero" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
christofferaakre/shakespeare
[ "c2563d19232465edbda2edaeebb7b93f491512d2" ]
[ "shakespeare.py" ]
[ "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader, Dataset, WeightedRandomSampler\n\n# set to True to enable training\n# if set to False, training is skipped,\n# and the weights are loaded from the session storage.\n# this way, you can train the network once, and then\n# set train = False, and experiment with it without\n# training it again\n\ntrain = True\n\n# how many characters to predict at once when training\nseq_length = 100\n\nhidden_size = 250\nnum_layers = 1\n\n# training 10 epochs took me about 15 minutes with GPU acceleration\nn_epochs = 10\nlr = 0.01\n\n# path in session storage to save state to\nPATH = './shakespeare_net_pth'\n\n# use GPU if available, else use CPU\ndevice = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')\nprint(device)\n\n\nclass CustomDataset(Dataset):\n def __init__(self, data_file: str):\n self.data = open(data_file, 'r').read()\n vocab = sorted(set(self.data))\n self.vocab_size = len(vocab)\n self.char2idx = {ch: idx for idx, ch in enumerate(vocab)}\n self.idx2char = {idx: ch for idx, ch in enumerate(vocab)}\n def __len__(self):\n return len(self.data)\n def __getitem__(self, i):\n x = self.char2idx[self.data[i]]\n x = torch.tensor([x])\n x = F.one_hot(x, num_classes=self.vocab_size)\n \n # FloatTensor because the input needs to be type Float\n x = x.type(torch.FloatTensor)\n \n # return next character, or current character if there is no\n # next character\n t = self.char2idx[self.data[i + (i < (self.__len__() - 1))]]\n \n t = torch.tensor([t])\n return (x.to(device), t.to(device))\n\nclass Model(nn.Module):\n def __init__(self, vocab_size, hidden_size, num_layers = 1):\n super(Model, self).__init__()\n self.n_layers = num_layers\n self.vocab_size = vocab_size\n # input shape: (seq_length, 1, vocab_size)\n # output shape: (seq_length, 1, hidden_size)\n self.lstm = nn.LSTM(self.vocab_size,\n hidden_size,\n num_layers,\n batch_first=False\n )\n # input shape: (N, *, hidden_size)\n # output shape: (N, *, vocab_size)\n self.linear = nn.Linear(hidden_size, vocab_size, bias=True)\n\n def forward(self, input, states_0=None):\n output, (hn, cn) = self.lstm(input, states_0)\n scores = self.linear(output)\n return scores, (hn, cn)\n\n def generate_sample(self, x, length=500):\n x = x.view(1, 1, self.vocab_size)\n h = torch.zeros(self.n_layers, 1, hidden_size).to(device)\n c = torch.zeros(self.n_layers, 1, hidden_size).to(device)\n text = ''\n for i in range(length):\n scores, (h, c) = self.forward(x, (h, c))\n probs = F.softmax(scores, dim=2).view(self.vocab_size)\n pred = torch.tensor(list(WeightedRandomSampler(probs, 1, replacement=True)))\n x = F.one_hot(pred, num_classes=self.vocab_size)\n x = x.view(1, 1, self.vocab_size).type(torch.FloatTensor).to(device)\n next_character = idx2char[pred.item()]\n text += next_character\n return text\n\n def init_state(self):\n return (\n torch.zeros(num_layers, 1, hidden_size).to(device),\n torch.zeros(num_layers, 1, hidden_size).to(device)\n )\n\ndataset = CustomDataset(data_file='drive/MyDrive/colab/shakespeare.txt')\nchar2idx = dataset.char2idx\nidx2char = dataset.idx2char\nvocab_size = dataset.vocab_size\n\nloader = DataLoader(dataset=dataset,\n # we are not actually batching the data,\n # we are just doing one training example at a\n # time. this is just a trick\n # so we can use Datalaoder to cut up\n # the data nicely for us so we don't have\n # to do it ourselves\n batch_size=seq_length,\n # data shuffling can be useful in many instances,\n # but in this case it would fuck up everything\n # since each example is a single character (or 100)\n # because of the 'batching', and we need to preserve\n # context\n shuffle=False\n )\n\n\n\nmodel = Model(vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_layers=num_layers\n ).to(device)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(params=model.parameters(), lr=lr)\n\n# TRAINING\n\nsample_input = None\n\nn_batches = len(dataset) // seq_length\nprint(f'n_batches: {n_batches}')\n\nif train:\n for epoch in range(n_epochs):\n state = model.init_state()\n i = 0\n for char, next_char in loader: \n #x = x.view(seq_length, 1, vocab_size).to(device)\n pred, (h, c) = model(char, state)\n loss = criterion(pred.squeeze(dim=1), next_char.squeeze(dim=1))\n\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n\n state = (h.detach(), c.detach())\n\n i += 1\n\n if i % 100 == 0:\n print(f'Epoch: {epoch+1} / {n_epochs} batch {i+1} / {n_batches} Loss: {loss.item()}')\n if i % 500 == 0:\n sample_input = char[0]\n sample = model.generate_sample(sample_input, length=500)\n print(sample)\n\n\n torch.save(model.state_dict(), PATH)\n print(f'finished training and saved state to {PATH}')\n\n# load state\nmodel = Model(vocab_size=vocab_size,\n hidden_size=hidden_size,\n num_layers=num_layers\n ).to(device)\nmodel.load_state_dict(torch.load(PATH))\n\nprint(sample)\n# generate a bunch of text and save it to output.txt\nwith open('output.txt', 'w') as file:\n for i in range(100):\n sample = model.generate_sample(char[1], length=1000)\n file.write(sample)\n file.write('\\n---------------------------------------\\n')\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.nn.functional.softmax", "torch.load", "torch.nn.LSTM", "torch.nn.functional.one_hot", "torch.zeros", "torch.utils.data.DataLoader", "torch.utils.data.WeightedRandomSampler", "torch.tensor", "torch.nn.Linear", "torch.cuda.is_available", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sanchit2843/Indian_LPR
[ "b817fe56ce325c5bd464904f8695b78a14487386" ]
[ "src/object_detection/demo_video.py" ]
[ "# test/inference script\nimport numpy as np\nimport torch\nfrom torch import nn\nimport argparse\nimport time\nimport os\nimport sys\nimport cv2\nfrom torchvision import models\nfrom tqdm import tqdm\nfrom model.fcos import FCOSDetector\nfrom torchvision import transforms\nimport matplotlib.pyplot as plt\n\n\ndef frame_extract(path):\n vidObj = cv2.VideoCapture(path)\n success = 1\n while success:\n success, image = vidObj.read()\n if success:\n yield image\n\n\ntransformation = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()])\n\n\ndef normalize(image):\n return transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )(transformation(image))\n\n\ndef preprocess_image(image):\n image = normalize(image)\n return torch.unsqueeze(image, dim=0)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--video_path\",\n type=str,\n required=True,\n default=\"./\",\n help=\"Path where all the test images are located or you can give path to video, it will break into each frame and write as a video\",\n )\n\n parser.add_argument(\n \"--weights_path\",\n type=str,\n required=True,\n default=\"./\",\n help=\"Path to weights for which inference needs to be done\",\n )\n\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"./\",\n help=\"path to output saved video\",\n )\n\n class Config:\n # backbone\n pretrained = True\n freeze_stage_1 = True\n freeze_bn = True\n\n # fpn\n fpn_out_channels = 256\n use_p5 = False\n\n # head\n class_num = 2\n use_GN_head = True\n prior = 0.01\n add_centerness = True\n cnt_on_reg = True\n\n strides = [4, 8, 16, 32, 64, 128]\n limit_range = [\n [-1, 32],\n [32, 64],\n [64, 128],\n [128, 256],\n [256, 512],\n [512, 999999],\n ]\n # inference\n score_threshold = 0.3\n nms_iou_threshold = 0.2\n max_detection_boxes_num = 150\n\n args = parser.parse_args()\n\n model = FCOSDetector(mode=\"inference\", config=Config)\n model.load_state_dict(\n torch.load(\n args.path_to_weights,\n map_location=torch.device(\"cpu\"),\n )\n )\n\n model = model.eval().cuda()\n\n current_video = cv2.VideoCapture(args.path_to_images)\n fps = current_video.get(cv2.CAP_PROP_FPS)\n\n for idx, frame in enumerate(tqdm(frame_extract(args.path_to_images))):\n if idx == 0:\n out_video = cv2.VideoWriter(\n os.path.join(\n args.output_dir, args.path_to_images.split(\"/\")[-1][:-3] + \"avi\"\n ),\n cv2.VideoWriter_fourcc(\"M\", \"J\", \"P\", \"G\"),\n fps,\n (\n frame.shape[0],\n frame.shape[1],\n ),\n )\n\n image = preprocess_image(frame)\n\n with torch.no_grad():\n out = model(image.cuda())\n scores, classes, boxes = out\n boxes = boxes[0].cpu().numpy().tolist()\n classes = classes[0].cpu().numpy().tolist()\n scores = scores[0].cpu().numpy().tolist()\n\n for box in boxes:\n pt1 = (int(box[0]), int(box[1]))\n pt2 = (int(box[2]), int(box[3]))\n frame = cv2.rectangle(frame, pt1, pt2, (255, 0, 0), thickness=3)\n out_video.write(frame)\n out_video.release()\n\n\nif __name__ == \"__main__\":\n main()" ]
[ [ "torch.device", "torch.no_grad", "torch.unsqueeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ArtificialZeng/pyswarms
[ "66b3cf1f5cf335f1297fe8d6098e44dbd109fb3f" ]
[ "pyswarms/utils/plotters/formatters.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nPlot Formatters\n\nThis module implements helpful classes to format your plots or create meshes.\n\"\"\"\n\n# Import modules\nimport numpy as np\nfrom attr import attrib, attrs\nfrom attr.validators import instance_of\nfrom matplotlib import cm, colors\n\n\n@attrs\nclass Designer(object):\n \"\"\"Designer class for specifying a plot's formatting and design\n\n You can use this class for specifying design-related customizations to\n your plot. This can be passed in various functions found in the\n :mod:`pyswarms.utils.plotters` module.\n\n .. code-block :: python\n\n from pyswarms.utils.plotters import plot_cost_history\n from pyswarms.utils.plotters.formatters import Designer\n\n # Set title_fontsize into 20\n my_designer = Designer(title_fontsize=20)\n\n # Assuming we already had an optimizer ready\n plot_cost_history(cost_history, designer=my_designer)\n\n Attributes\n ----------\n figsize : tuple (default is :code:`(10,8)`)\n Overall figure size.\n title_fontsize : str, int, or float (default is :code:`large`)\n Size of the plot's title.\n text_fontsize : str, int, or float (default is :code:`medium`)\n Size of the plot's labels and legend.\n legend : str (default is :code:`Cost`)\n Label to show in the legend. For cost histories, it states\n the label of the line plot.\n label : str, list, or tuple (default is :code:`['x-axis', 'y-axis', 'z-axis']`)\n Label to show in the x, y, or z-axis. For a 3D plot, please pass\n an iterable with three elements.\n limits : list (default is :code:`[(-1, 1), (-1, 1), (-1, 1)]`)\n The x-, y-, z- limits of the axes. Pass an iterable with the number of elements\n representing the number of axes.\n colormap : matplotlib.cm.Colormap (default is :code:`cm.viridis`)\n Colormap for contour plots\n \"\"\"\n\n # Overall plot design\n figsize = attrib(type=tuple, validator=instance_of(tuple), default=(10, 8))\n title_fontsize = attrib(\n validator=instance_of((str, int, float)), default=\"large\"\n )\n text_fontsize = attrib(\n validator=instance_of((str, int, float)), default=\"medium\"\n )\n legend = attrib(validator=instance_of(str), default=\"Cost\")\n label = attrib(\n validator=instance_of((str, list, tuple)),\n default=[\"x-axis\", \"y-axis\", \"z-axis\"],\n )\n limits = attrib(\n validator=instance_of((list, tuple)),\n default=[(-1, 1), (-1, 1), (-1, 1)],\n )\n colormap = attrib(\n validator=instance_of(colors.Colormap), default=cm.viridis\n )\n\n\n@attrs\nclass Animator(object):\n \"\"\"Animator class for specifying animation behavior\n\n You can use this class to modify options on how the animation will be run\n in the :func:`pyswarms.utils.plotters.plot_contour` and\n :func:`pyswarms.utils.plotters.plot_surface` methods.\n\n .. code-block :: python\n\n from pyswarms.utils.plotters import plot_contour\n from pyswarms.utils.plotters.formatters import Animator\n\n # Do not repeat animation\n my_animator = Animator(repeat=False)\n\n # Assuming we already had an optimizer ready\n plot_contour(pos_history, animator=my_animator)\n\n Attributes\n ----------\n interval : int (default is :code:`80`)\n Sets the interval or speed into which the animation is played.\n repeat_delay : int, float (default is :code:`None`)\n Sets the delay before repeating the animation again.\n repeat : bool (default is :code:`True`)\n Pass :code:`False` if you don't want to repeat the animation.\n \"\"\"\n\n interval = attrib(type=int, validator=instance_of(int), default=80)\n repeat_delay = attrib(default=None)\n repeat = attrib(type=bool, validator=instance_of(bool), default=True)\n\n\n@attrs\nclass Mesher(object):\n \"\"\"Mesher class for plotting contours of objective functions\n\n This class enables drawing a surface plot of a given objective function.\n You can customize how this plot is drawn with this class. Pass an instance\n of this class to enable meshing.\n\n .. code-block :: python\n\n from pyswarms.utils.plotters import plot_surface\n from pyswarms.utils.plotters.formatters import Mesher\n from pyswarms.utils.functions import single_obj as fx\n\n # Use sphere function\n my_mesher = Mesher(func=fx.sphere)\n\n # Assuming we already had an optimizer ready\n plot_surface(pos_history, mesher=my_mesher)\n\n Attributes\n ----------\n func : callable\n Objective function to plot a surface of.\n delta : float (default is :code:`0.001`)\n Number of steps when generating the surface plot\n limits : list, tuple (default is :code:`[(-1,1), (-1,1)]`)\n The range, in each axis, where the mesh will be drawn.\n levels : list or int (default is :code:`np.arange(-2.0, 2.0, 0.070)`)\n Levels on which the contours are shown. If :code:`int` is passed,\n then `matplotlib` automatically computes for the level positions.\n alpha : float (default is :code:`0.3`)\n Transparency of the surface plot\n limits : list (default is :code:`[(-1, 1), (-1, 1)]`)\n The x-, y-, z- limits of the axes. Pass an iterable with the number of elements\n representing the number of axes.\n \"\"\"\n\n func = attrib()\n # For mesh creation\n delta = attrib(type=float, default=0.001)\n limits = attrib(\n validator=instance_of((list, tuple)), default=[(-1, 1), (-1, 1)]\n )\n levels = attrib(type=list, default=np.arange(-2.0, 2.0, 0.070))\n # Surface transparency\n alpha = attrib(type=float, validator=instance_of(float), default=0.3)\n\n def compute_history_3d(self, pos_history):\n \"\"\"Compute a 3D position matrix\n\n The first two columns are the 2D position in the x and y axes\n respectively, while the third column is the fitness on that given\n position.\n\n Parameters\n ----------\n pos_history : numpy.ndarray\n Two-dimensional position matrix history of shape\n :code:`(iterations, n_particles, 2)`\n\n Returns\n -------\n numpy.ndarray\n 3D position matrix of shape :code:`(iterations, n_particles, 3)`\n \"\"\"\n fitness = np.array(list(map(self.func, pos_history)))\n return np.dstack((pos_history, fitness))\n" ]
[ [ "numpy.arange", "numpy.dstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
huangshunliang/Lasagne_h
[ "359ea1b9f12678c3523c0cb100f646528d49df9e" ]
[ "lasagne/tests/test_nonlinearities.py" ]
[ "import pytest\nimport numpy as np\nimport theano.tensor as T\n\n\nclass TestNonlinearities(object):\n def linear(self, x):\n return x\n\n def rectify(self, x):\n return x * (x > 0)\n\n def leaky_rectify(self, x):\n return x * (x > 0) + 0.01 * x * (x < 0)\n\n def leaky_rectify_0(self, x):\n return self.rectify(x)\n\n def elu(self, x, alpha=1):\n return np.where(x > 0, x, alpha * (np.expm1(x)))\n\n def selu(self, x, alpha=1, lmbda=1):\n return lmbda * np.where(x > 0, x, alpha * np.expm1(x))\n\n def selu_paper(self, x):\n return self.selu(x,\n alpha=1.6732632423543772848170429916717,\n lmbda=1.0507009873554804934193349852946)\n\n def selu_rect(self, x):\n return self.selu(x, alpha=0, lmbda=1)\n\n def selu_custom(self, x):\n return self.selu(x, alpha=0.12, lmbda=1.21)\n\n def softplus(self, x):\n return np.log1p(np.exp(x))\n\n def sigmoid(self, x):\n return 1 / (1 + np.exp(-x))\n\n def tanh(self, x):\n return np.tanh(x)\n\n def scaled_tanh(self, x):\n return np.tanh(x)\n\n def scaled_tanh_p(self, x):\n return 2.27 * np.tanh(0.5 * x)\n\n def softmax(self, x):\n return (np.exp(x).T / np.exp(x).sum(-1)).T\n\n @pytest.mark.parametrize('nonlinearity',\n ['linear', 'rectify',\n 'leaky_rectify', 'elu',\n 'selu', 'selu_paper',\n 'selu_rect', 'selu_custom',\n 'sigmoid',\n 'tanh', 'scaled_tanh',\n 'softmax', 'leaky_rectify_0',\n 'scaled_tanh_p', 'softplus'])\n def test_nonlinearity(self, nonlinearity):\n import lasagne.nonlinearities\n\n if nonlinearity == 'leaky_rectify_0':\n from lasagne.nonlinearities import LeakyRectify\n theano_nonlinearity = LeakyRectify(leakiness=0)\n elif nonlinearity == 'scaled_tanh':\n from lasagne.nonlinearities import ScaledTanH\n theano_nonlinearity = ScaledTanH()\n elif nonlinearity == 'scaled_tanh_p':\n from lasagne.nonlinearities import ScaledTanH\n theano_nonlinearity = ScaledTanH(scale_in=0.5, scale_out=2.27)\n elif nonlinearity.startswith('selu'):\n from lasagne.nonlinearities import SELU, selu\n if nonlinearity == 'selu':\n theano_nonlinearity = SELU()\n elif nonlinearity == 'selu_paper':\n theano_nonlinearity = selu\n elif nonlinearity == 'selu_rect':\n theano_nonlinearity = SELU(scale=1, scale_neg=0)\n elif nonlinearity == 'selu_custom':\n theano_nonlinearity = SELU(scale=1.21, scale_neg=0.12)\n else:\n theano_nonlinearity = getattr(lasagne.nonlinearities,\n nonlinearity)\n np_nonlinearity = getattr(self, nonlinearity)\n\n X = T.matrix()\n X0 = lasagne.utils.floatX(np.random.uniform(-3, 3, (10, 10)))\n\n theano_result = theano_nonlinearity(X).eval({X: X0})\n np_result = np_nonlinearity(X0)\n\n assert np.allclose(theano_result, np_result)\n" ]
[ [ "numpy.allclose", "numpy.expm1", "numpy.random.uniform", "numpy.tanh", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ImmersiveS/housing
[ "810272aaa79c70911dde621fa2df6288c9e5e384" ]
[ "housing.py" ]
[ "from sklearn.datasets import load_boston\r\nfrom sklearn.preprocessing import scale\r\nfrom sklearn.cross_validation import KFold\r\nfrom sklearn.cross_validation import cross_val_score\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nimport numpy as np\r\n\r\nboston = load_boston()\r\nboston.data = scale(boston.data)\r\n\r\nkf = KFold(506, n_folds=5, shuffle=True, random_state=42)\r\n\r\np = 0\r\nscore = -100\r\nfor i in np.linspace(1, 10, 200):\r\n neigh = KNeighborsRegressor(n_neighbors=5, weights='distance', metric='minkowski', p=i)\r\n current_score = (cross_val_score(neigh, boston.data, boston.target, scoring='mean_squared_error', cv=kf)).mean()\r\n\r\n if max(current_score, score) == current_score:\r\n score = current_score\r\n p = i\r\nprint('The best parameter p for Minkowski distance metric is ' + str(p))\r\n\r\n" ]
[ [ "sklearn.cross_validation.cross_val_score", "numpy.linspace", "sklearn.neighbors.KNeighborsRegressor", "sklearn.datasets.load_boston", "sklearn.preprocessing.scale", "sklearn.cross_validation.KFold" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
paquet-a/netptune_concon
[ "9b2867557eea847821934e712b80d5bdd2712bef" ]
[ "neptune/utils.py" ]
[ "#\n# Copyright (c) 2019, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport functools\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\n\nIS_WINDOWS = hasattr(sys, 'getwindowsversion')\n\n\ndef map_values(f_value, dictionary):\n return dict(\n (k, f_value(v)) for k, v in dictionary.items()\n )\n\n\ndef map_keys(f_key, dictionary):\n return dict(\n (f_key(k), v) for k, v in dictionary.items()\n )\n\n\ndef as_list(value):\n if value is None or isinstance(value, list):\n return value\n else:\n return [value]\n\n\ndef align_channels_on_x(dataframe):\n channel_dfs, common_x = _split_df_by_stems(dataframe)\n return merge_dataframes([common_x] + channel_dfs, on='x', how='outer')\n\n\ndef get_channel_name_stems(columns):\n return list(set([col[2:] for col in columns]))\n\n\ndef merge_dataframes(dataframes, on, how='outer'):\n merged_df = functools.reduce(lambda left, right: \\\n pd.merge(left, right, on=on, how=how), dataframes)\n return merged_df\n\n\ndef is_float(value):\n try:\n _ = float(value)\n except ValueError:\n return False\n else:\n return True\n\n\ndef file_contains(filename, text):\n for line in open(filename):\n if text in line:\n return True\n return False\n\n\ndef in_docker():\n cgroup_file = '/proc/self/cgroup'\n return os.path.exists('./dockerenv') or (os.path.exists(cgroup_file) and file_contains(cgroup_file, text='docker'))\n\ndef is_notebook():\n try:\n # pylint: disable=pointless-statement,undefined-variable\n get_ipython\n return True\n except Exception:\n return False\n\ndef _split_df_by_stems(df):\n channel_dfs, x_vals = [], []\n for stem in get_channel_name_stems(df.columns):\n channel_df = df[['x_{}'.format(stem), 'y_{}'.format(stem)]]\n channel_df.columns = ['x', stem]\n channel_df = channel_df.dropna()\n channel_dfs.append(channel_df)\n x_vals.extend(channel_df['x'].tolist())\n common_x = pd.DataFrame({'x': np.unique(x_vals)}, dtype=float)\n return channel_dfs, common_x\n" ]
[ [ "pandas.merge", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
ducky777/posenet-python
[ "e27d37f31f5402c0770fee8b944f69a7c0289d03" ]
[ "tfjs_graph_converter/optimization.py" ]
[ "# SPDX-License-Identifier: MIT\n# Copyright © 2020 Patrick Levin\n\"\"\"Graph optimization functions\"\"\"\n\nfrom typing import List\n\nimport tensorflow as tf\n\nfrom tensorflow.compat.v1 import GraphKeys\nfrom tensorflow.core.framework.graph_pb2 import GraphDef\nfrom tensorflow.core.protobuf.config_pb2 import ConfigProto\nfrom tensorflow.core.protobuf.meta_graph_pb2 import SignatureDef\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.training.saver import export_meta_graph\n\nfrom tfjs_graph_converter import common as c\nfrom tfjs_graph_converter.graph_rewrite_util import replace_matching_nodes\nfrom tfjs_graph_converter.util import get_input_nodes, get_output_nodes\n\n\ndef _build_signature_def(graph: tf.Graph,\n input_nodes: list,\n output_nodes: list) -> SignatureDef:\n \"\"\"Build model signature (input- and output descriptions) for a graph\"\"\"\n signature_def = SignatureDef()\n\n def add_tensor(nodes, info):\n nodes[info.name].name = info.name\n if info.dtype is not None:\n dtype = dtypes.as_dtype(info.dtype)\n shape = tf.TensorShape(info.shape)\n nodes[info.name].dtype = dtype.as_datatype_enum\n nodes[info.name].tensor_shape.CopyFrom(shape.as_proto())\n\n for input_info in input_nodes:\n op = graph.get_operation_by_name(input_info.name)\n if op.type != c.TFJS_NODE_CONST_KEY:\n add_tensor(signature_def.inputs, input_info)\n for output_info in output_nodes:\n add_tensor(signature_def.outputs, output_info)\n return signature_def\n\n\ndef _to_node_name(tensor_name: str) -> str:\n \"\"\"Remove port from tensor name to give node name\"\"\"\n return tensor_name.split(':')[0]\n\n\ndef _mark_outputs_as_train_op(graph: tf.Graph,\n signature_def: SignatureDef) -> None:\n \"\"\"Mark output nodes as training ops, so the optimizer ignores them\"\"\"\n train_op = GraphKeys.TRAIN_OP\n for _, tensor in signature_def.outputs.items():\n name = _to_node_name(tensor.name)\n graph.add_to_collection(train_op, graph.get_operation_by_name(name))\n\n\ndef _remove_unused_control_flow_inputs(graph_def: GraphKeys) -> GraphDef:\n \"\"\"The graph optimizer marks unsused nodes, which we can remove\n from the graph\n \"\"\"\n def is_unused(node):\n return (node.op == c.TFJS_NODE_PLACEHOLDER_KEY\n and node.name.startswith('unused_control_flow_input'))\n\n result, _ = replace_matching_nodes(graph_def, is_unused, lambda _: [])\n return result\n\n\ndef _run_tf_optimizer(config: ConfigProto,\n graph: tf.Graph,\n signature_def: SignatureDef) -> GraphDef:\n \"\"\"Run the TF optimizer (\"grappler\") on a graph\"\"\"\n graph_def = graph.as_graph_def()\n meta_graph = export_meta_graph(graph_def=graph_def, graph=graph)\n meta_graph.signature_def['not_used_key'].CopyFrom(signature_def)\n return tf_optimizer.OptimizeGraph(config, meta_graph)\n\n\ndef _set_optimization_options(config: ConfigProto, options: List[str]) -> None:\n \"\"\"Set options for the graph optimizer\"\"\"\n rewriter_config = config.graph_options.rewrite_options\n rewriter_config.optimizers[:] = options\n\n\ndef optimize_graph(graph: tf.Graph, level=None) -> GraphDef:\n \"\"\"Optimise a tensorflow graph for inference after modification\n\n This function optimises the given graph for inference after the graph\n may have been modified to replace known, but unsupported operations.\n Optimisation might use multiple passes and aim at CPUs or GPUs.\n\n Args:\n graph: Tensorflow v1 graph (or wrapped v2 function) to be optimised\n level: optional optimisation level; currently unsupported\n\n Returns:\n Optimised ``GraphDef`` message for inference or format conversion\n \"\"\"\n inputs = get_input_nodes(graph)\n outputs = get_output_nodes(graph)\n signature_def = _build_signature_def(graph, inputs, outputs)\n _mark_outputs_as_train_op(graph, signature_def)\n config = ConfigProto()\n _set_optimization_options(config, [\n 'debug_stripper', 'remap', 'constfold', 'arithmetic', 'dependency'\n ])\n optimised_graph = _run_tf_optimizer(config, graph, signature_def)\n optimised_graph = _remove_unused_control_flow_inputs(optimised_graph)\n return optimised_graph\n" ]
[ [ "tensorflow.python.grappler.tf_optimizer.OptimizeGraph", "tensorflow.TensorShape", "tensorflow.core.protobuf.meta_graph_pb2.SignatureDef", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.training.saver.export_meta_graph", "tensorflow.core.protobuf.config_pb2.ConfigProto" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AdiletGaparov/sentiment-based-song-recommender
[ "e2c74278a409dbd8494743d0081bd15131181923" ]
[ "streamlit_app/nlp_showcase.py" ]
[ "import pandas as pd\nimport numpy as np\nimport streamlit as st\nimport time\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\"\"\"\n# Sentiment-based Music Recommender\n\"\"\"\n\ndef present_subj(option):\n subject_list_dict = {'subjective-20': '0-20% / Python, R, SQL',\n 'subjective-40': '20-40% / Hadoop, Spark, Streaming',\n 'subjective-60': '40-60% /Machine Learning',\n 'subjective-80': '60-80% / Data Visualization',\n 'subjective-100': '80-100% / Ethics, Agile, Design Thinking',\n 'all': 'All'}\n\n return subject_list_dict[option]\n\ndef present_polarity(option):\n\n level_list_dict = {'very-low': 'very low / I am ready',\n 'low': 'low / Need to recap few concepts',\n 'average': 'average / I still have few more days',\n 'high': 'high / Proficiency is a good grade',\n 'very-high': 'very high / God bless Gaussian curve at IE',\n 'all': 'All'}\n\n return level_list_dict[option]\n\ndef get_polarity_threshold(polarity_scores, level):\n if level == 'very-low':\n t_max = np.percentile(polarity_scores, 20)\n t_min = np.percentile(polarity_scores, 0)\n elif level == 'low':\n t_max = np.percentile(polarity_scores, 40)\n t_min = np.percentile(polarity_scores, 20)\n elif level == 'average':\n t_max = np.percentile(polarity_scores, 60)\n t_min = np.percentile(polarity_scores, 40)\n elif level == 'high':\n t_max = np.percentile(polarity_scores, 80)\n t_min = np.percentile(polarity_scores, 60)\n elif level == 'very-high':\n t_max = np.percentile(polarity_scores, 100)\n t_min = np.percentile(polarity_scores, 80)\n else:\n t_max = np.percentile(polarity_scores, 100)\n t_min = np.percentile(polarity_scores, 0)\n\n return np.round(t_min, 4), np.round(t_max, 4)\n\ndef get_filter(df, subject, genre):\n\n if genre == 'All':\n genre_list = df.genre.unique()\n else:\n genre_list = [genre]\n\n if subject == 'subjective-20':\n filter_array = (df['subjectivity_avg'] <= 0.2) & (df['genre'].isin(genre_list))\n elif subject == 'subjective-40':\n filter_array = (df['subjectivity_avg'] > 0.2) & (df['subjectivity_avg'] <= 0.4) & (df['genre'].isin(genre_list))\n elif subject == 'subjective-60':\n filter_array = (df['subjectivity_avg'] > 0.4) & (df['subjectivity_avg'] <= 0.6) & (df['genre'].isin(genre_list))\n elif subject == 'subjective-80':\n filter_array = (df['subjectivity_avg'] > 0.6) & (df['subjectivity_avg'] <= 0.8) & (df['genre'].isin(genre_list))\n elif subject == 'subjective-100':\n filter_array = (df['subjectivity_avg'] > 0.6) & (df['subjectivity_avg'] <= 0.8) & (df['genre'].isin(genre_list))\n else:\n filter_array = df['genre'].isin(genre_list)\n\n return filter_array\n\nlyrics = pd.read_csv('lyrics_sentiment.csv')\n\nsubject_list = ['all', 'subjective-20', 'subjective-40', 'subjective-60', 'subjective-80', 'subjective-100']\nlevel_list = ['all', 'very-low', 'low', 'average', 'high', 'very-high']\n\ngenre_list = ['All', 'Pop', 'Hip-Hop', 'Metal', 'Rock', 'Country', 'Electronic', 'Folk', 'Jazz', 'R&B', 'Indie']\n\nsubject = st.sidebar.selectbox('Subjectivity level / Choose the subject?', subject_list, format_func = present_subj)\nlevel = st.sidebar.selectbox('Polarity level / How desperate are you?', level_list, format_func = present_polarity)\ngenre = st.sidebar.selectbox('What genre?', genre_list)\n\nsubj_genre_filter = get_filter(lyrics, subject, genre)\npolarity_scores = list(lyrics.loc[subj_genre_filter, 'polarity_avg'].unique())\nt_min, t_max = get_polarity_threshold(polarity_scores, level)\n\nst.write(f'Polarity ranges: from {t_min} to {t_max}')\n\nlyrics = lyrics[subj_genre_filter]\nlyrics = lyrics.loc[(lyrics.polarity_avg >= t_min) & (lyrics.polarity_avg <= t_max)]\n\nsns.scatterplot(data=lyrics, x='polarity_avg', y='subjectivity_avg', hue='genre')\nst.pyplot()\n\nst.table(lyrics[['artist', 'song', 'year', 'polarity_avg', 'subjectivity_avg']].sort_values('polarity_avg').head(20))\n" ]
[ [ "numpy.round", "pandas.read_csv", "numpy.percentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ir5/onnx-chainer
[ "c4e4a900c612b3528df9ef7535b7f94c7eda2f8a" ]
[ "onnx_chainer/functions/normalization.py" ]
[ "import sys\n\nimport chainer\nimport numpy as np\n\nfrom onnx_chainer.functions.opset_version import support\nfrom onnx_chainer import onnx_helper\n\n\n@support((1, 6, 7))\ndef convert_BatchNormalization(\n func, opset_version, input_names, output_names, context):\n is_fixed_bn = len(func.inputs) > 3\n\n # NOTE(disktnk):\n # if `use_beta=False`, beta_param is None, `use_gamma=False` is same.\n beta_param = func.inputs[2].get_variable_or_none()\n gamma_param = func.inputs[1].get_variable_or_none()\n namedlink = context.get_link(beta_param) or context.get_link(gamma_param)\n\n if namedlink is not None:\n prefix, link = namedlink\n if is_fixed_bn:\n mean = link.avg_mean\n var = link.avg_var\n else:\n # on train mode, avg_mean would be updated, so make them from x\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n else:\n prefix = None\n if is_fixed_bn:\n mean = func.inputs[3].get_variable().array\n var = func.inputs[4].get_variable().array\n else:\n x = func.inputs[0].get_variable().array\n mean = x.mean(axis=func.axis)\n var = x.var(axis=func.axis)\n\n def add_param(v, suffix):\n if prefix is None:\n return context.add_param(v, suffix)\n else:\n return context.add_param(\n v, '{}_{}'.format(prefix, suffix), use_original_name=True)\n\n maen_name = add_param(mean, 'avg_mean')\n var_name = add_param(var, 'avg_var')\n if is_fixed_bn:\n input_names[3] = maen_name\n input_names[4] = var_name\n else:\n input_names.extend([maen_name, var_name])\n\n if beta_param is None:\n beta_name = add_param(np.zeros_like(mean, dtype=mean.dtype), 'beta')\n input_names[2] = beta_name\n if gamma_param is None:\n gamma_name = add_param(np.ones_like(mean, dtype=mean.dtype), 'gamma')\n input_names[1] = gamma_name\n\n momentum = getattr(func, 'decay', 0.)\n\n # TODO(disktnk): On definition of ONNX's BatchNormalization operator,\n # outputs one required output and four optional outputs. This converter\n # must make 5 values for output and return them.\n\n if opset_version == 1:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n consumed_inputs=[False, False, False, True, True],\n ),\n elif opset_version == 6:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n is_test=not chainer.config.train,\n ),\n elif opset_version == 7:\n return onnx_helper.make_node(\n 'BatchNormalization', input_names, output_names,\n epsilon=func.eps,\n momentum=momentum,\n ),\n\n\n@support((1, 6, 7))\ndef convert_FixedBatchNormalization(\n func, opset_version, input_names, output_names, context):\n return convert_BatchNormalization(\n func, opset_version, input_names, output_names, context)\n\n\ndef convert_LocalResponseNormalization(\n func, opset_version, input_names, output_names, context):\n size = int(func.n)\n return onnx_helper.make_node(\n 'LRN', input_names, output_names,\n alpha=float(func.alpha) * size,\n beta=float(func.beta),\n bias=float(func.k),\n size=size,\n ),\n\n\ndef convert_NormalizeL2(\n func, opset_version, input_names, output_names, context):\n if isinstance(func.axis, tuple) and len(func.axis) != 1:\n raise ValueError(\n 'Normalization along with multiple axes ({}) are not supported in '\n 'the ONNX\\'s LpNormalization operator.'.format(func.axis))\n if abs(func.eps - 1e-5) > sys.float_info.epsilon:\n # default value of F.normaize eps is 1e-5\n raise ValueError(\n '\\'eps\\' is not supported in the ONNX\\'s LpNormalization operator,'\n ' so that ONNX-Chainer does not accept custom values for \\'eps\\' '\n '({})'.format(func.eps))\n\n return onnx_helper.make_node(\n 'LpNormalization', input_names, output_names,\n axis=int(func.axis[0]),\n p=2,\n ),\n" ]
[ [ "numpy.ones_like", "numpy.zeros_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ysnan/motion_illusions
[ "1b7e8901cbd228a6bdfc8762f6d4756f62361b1f" ]
[ "examples/rotation_warp_image.py" ]
[ "###############################################################################\n#\n# File: rotation_warp_image.py\n#\n# Continuously warp an image by a small random rotation\n#\n# History:\n# 07-28-20 - Levi Burner - Created file\n#\n###############################################################################\n\nimport argparse\nimport time\nimport os\n\nimport cv2\nimport numpy as np\n\nimport motion_illusions.utils.flow_plot as flow_plot\nfrom motion_illusions.utils.image_tile import ImageTile\nfrom motion_illusions.utils.signal_plot import SignalPlot\nfrom motion_illusions.utils.rate_limit import RateLimit\nfrom motion_illusions.utils.time_iterator import TimeIterator\n\nfrom motion_illusions import rotation_translation_image_warp as warp\nfrom motion_illusions import opencv_optical_flow\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--image', dest='image_path', help='image to warp and display')\n parser.add_argument('--save_dir', dest='save_dir', help='folder to save generated image sequence')\n args = parser.parse_args()\n\n if args.image_path is None:\n raise ValueError('Program must be passed an image to warp')\n\n image = cv2.imread(args.image_path)\n\n # Assume an HFOV of 5 degrees\n hfov_deg = 5.0\n focal_length = (image.shape[0] / 2) / np.tan((hfov_deg/2)*180.0/np.pi)\n\n session_name = 'rotation_warp_image'\n tiler = ImageTile.get_instance(session=session_name)\n\n flow_vis_image = cv2.cvtColor(flow_plot.flow_direction_image(shape=image.shape), cv2.COLOR_HSV2BGR)\n\n # The visualization will be limited to this wall clock speed in hz\n rate_limit = RateLimit(limit_hz=60)\n\n last_wall_t = time.time()\n last_sim_t = time.time()\n\n frame_id = 0\n # The simulation will be limited to this virtual speed in hz\n for sim_t in iter(TimeIterator(sim_rate_hz=60)):\n sim_delta_t = sim_t-last_sim_t\n wall_t = time.time()\n wall_delta_t = wall_t - last_wall_t\n\n # Generate a random yaw, pitch, roll\n # It would be better to simulate a random walk on a sphere in a 4 dimensional space\n std_dev_deg = 0.5\n\n ypr = np.random.normal(loc=0.0, scale=std_dev_deg*np.pi/180.0, size=(3,))\n\n optical_flow_rot = warp.discrete_optical_flow_due_to_rotation(\n ypr[0], ypr[1], ypr[2],\n focal_length, image.shape)\n warped_image = warp.image_warp(image, optical_flow_rot)\n\n if args.save_dir:\n file_name = 'image_{:06d}.png'.format(frame_id)\n cv2.imwrite(os.path.join(args.save_dir, file_name), warped_image)\n frame_id += 1\n\n tiler.add_image(image)\n tiler.add_image(warped_image)\n\n optical_flow_rot_image = cv2.cvtColor(flow_plot.visualize_optical_flow(optical_flow_rot), cv2.COLOR_HSV2BGR)\n tiler.add_image(optical_flow_rot_image)\n\n tiler.add_image(flow_vis_image)\n\n flow_on_image = flow_plot.dense_flow_as_quiver_plot(optical_flow_rot, np.copy(image))\n tiler.add_image(flow_on_image)\n\n lk_flow_list = opencv_optical_flow.lucas_kanade(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY),\n cv2.cvtColor(warped_image, cv2.COLOR_BGR2GRAY))\n lk_flow_subtracted = flow_plot.subtract_dense_flow_from_sparse_flow(lk_flow_list, optical_flow_rot)\n\n flow_on_image = flow_plot.sparse_flow_as_quiver_plot(lk_flow_list, np.copy(image))\n tiler.add_image(flow_on_image)\n\n flow_on_image = flow_plot.sparse_flow_as_quiver_plot(lk_flow_subtracted, np.copy(image))\n tiler.add_image(flow_on_image)\n\n cv2.imshow(session_name, tiler.compose())\n cv2.setWindowTitle(session_name, session_name + ' real fps: {:.1f} sim fps: {:.1f}'.format(\n 1.0 / wall_delta_t,\n 1.0 / sim_delta_t))\n tiler.clear_scene()\n cv2.waitKey(1)\n\n rate_limit.sleep()\n last_sim_t = sim_t\n last_wall_t = wall_t\n" ]
[ [ "numpy.tan", "numpy.random.normal", "numpy.copy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
r4lv/VIP
[ "fa45f71e391a4ec84658e5be1d6a057cfafdcd6c" ]
[ "vip_hci/fm/negfc_speckle_noise.py" ]
[ "#! /usr/bin/env python\n\n\"\"\"\nModule with routines allowing for the estimation of the uncertainty on the \nparameters of an imaged companion associated to residual speckle noise.\n\"\"\"\n\n__author__ = 'O. Wertz, C. A. Gomez Gonzalez, V. Christiaens'\n__all__ = ['speckle_noise_uncertainty']\n\n#import itertools as itt\nfrom multiprocessing import cpu_count\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ..config.utils_conf import pool_map, iterable #eval_func_tuple\nfrom ..fm import cube_inject_companions \nfrom .negfc_simplex import firstguess_simplex\nfrom .negfc_fmerit import get_mu_and_sigma\nfrom .utils_negfc import cube_planet_free\nfrom .negfc_mcmc import confidence\n\n\ndef speckle_noise_uncertainty(cube, p_true, angle_range, derot_angles, algo, \n psfn, plsc, fwhm, aperture_radius, opp_ang=False,\n indep_ap=False, cube_ref=None, fmerit='sum', \n algo_options={}, transmission=None, mu_sigma=None, \n wedge=None, weights=None, force_rPA=False, \n nproc=None, simplex_options=None, bins=None, \n save=False, output=None, verbose=True, \n full_output=True, plot=False):\n \"\"\"\n Step-by-step procedure used to determine the speckle noise uncertainty \n associated to the parameters of a companion candidate.\n\n The steps 1 to 3 need to be performed for each angle.\n \n 1) At the true planet radial distance and for a given angle, we \\\n inject a fake companion in our planet-free cube.\n \n 2) Then, using the negative fake companion method, we determine the \\\n position and flux of the fake companion thanks to a Simplex \\\n Nelder-Mead minimization.\n \n 3) We calculate the offset between the true values of the position \\\n and the flux of the fake companion, and those obtained from the \\\n minimization. The results will be dependent on the angular \\\n position of the fake companion. \n \n The resulting distribution of deviations is then used to infer the \n 1-sigma uncertainty on each parameter by fitting a 1d-gaussian.\n \n Parameters\n ----------\n cube: numpy array\n The original ADI cube. \n p_true: tuple or numpy array with 3 elements\n The radial separation, position angle (from x=0 axis) and flux \n associated to a given companion candidate for which the speckle \n uncertainty is to be evaluated. The planet will first \n be subtracted from the cube, then used for test injections.\n angle_range: 1d numpy array\n Range of angles (counted from x=0 axis, counter-clockwise) at which the \n fake companions will be injected, in [0,360[.\n derot_angles: 1d numpy array\n Derotation angles for ADI. Length should match input cube.\n algo: python routine\n Routine to be used to model and subtract the stellar PSF. From an input\n cube, derotation angles, and optional arguments, it should return a \n post-processed frame.\n psfn: 2d numpy array\n 2d array with the normalized PSF template. The PSF image must be \n centered wrt to the array. Therefore, it is recommended to run the \n function ``metrics/normalize_psf()`` to generate a centered and \n flux-normalized PSF template. \n plsc : float\n Value of the plsc in arcsec/px. Only used for printing debug output when\n ``verbose=True``.\n fwhm: float\n FWHM of the PSF in pixels.\n aperture_radius: float\n Radius of the apertures used for NEGFC, in terms of FWHM.\n opp_ang: bool, opt\n Whether to also use opposite derotation angles to double sample size.\n Uses the same angle range.\n indep_ap: bool, opt.\n Whether to only consider independent apertures. If yes, will supersede\n the range provided in angle_range, and only consider the first and last\n values, then fit as many non-overlapping apertures as possible. \n The empty cube will also be used with opposite derotation angles to \n double the number of independent apertures.\n algo_options: dict, opt.\n Options for algo. To be provided as a dictionary. Can include ncomp \n (for PCA), svd_mode, collapse, imlib, interpolation, scaling, delta_rot\n transmission: numpy array, optional\n Array with 2 columns. First column is the radial separation in pixels. \n Second column is the off-axis transmission (between 0 and 1) at the \n radial separation given in column 1.\n mu_sigma: tuple of 2 floats, bool or None, opt\n If set to None: not used, and falls back to original version of the \n algorithm, using fmerit.\n If a tuple of 2 elements: should be the mean and standard deviation of \n pixel intensities in an annulus centered on the lcoation of the \n companion candidate, excluding the area directly adjacent to the CC.\n If set to anything else, but None/False/tuple: will compute said mean \n and standard deviation automatically.\n force_rPA: bool, optional\n Whether to only search for optimal flux, provided (r,PA).\n fmerit: None\n Figure of merit to use, if mu_sigma is None.\n simplex_options: dict\n All the required simplex parameters, for instance {'tol':1e-08, \n 'max_iter':200}\n bins: int or None, opt\n Number of bins for histogram of parameter deviations. If None, will be\n determined automatically based on number of injected fake companions.\n full_output: bool, optional\n Whether to return more outputs.\n output: str, optional\n The name of the output file (if save is True) \n save: bool, optional\n If True, the result are pickled.\n verbose: bool, optional\n If True, informations are displayed in the shell.\n plot: bool, optional\n Whether to plot the gaussian fit to the distributions of parameter \n deviations (between retrieved and injected).\n \n Returns:\n --------\n sp_unc: numpy ndarray of 3 elements\n Uncertainties on the radius, position angle and flux of the companion,\n respectively, associated to residual speckle noise. Only 1 element if\n force_rPA is set to True.\n If full_output, also returns:\n mean_dev: numpy ndarray of 3 elements\n Mean deviation for each of the 3 parameters\n p_simplex: numpy ndarray n_fc x 3\n Parameters retrieved by the simplex for the injected fake \n companions; n_fc is the number of injected \n offset: numpy ndarray n_fc x 3\n Deviations with respect to the values used for injection of the \n fake companions.\n chi2, nit, success: numpy ndarray of length n_fc \n Outputs from the simplex function for the retrieval of the \n parameters of each injected companion: chi square value, number of\n iterations and whether the simplex converged, respectively.\n \"\"\" \n \n if not nproc: # Hyper-threading \"duplicates\" the cores -> cpu_count/2\n nproc = (cpu_count()/2)\n \n if verbose:\n print('')\n print('#######################################################')\n print('### SPECKLE NOISE DETERMINATION ###')\n print('#######################################################')\n print('')\n \n r_true, theta_true, f_true = p_true\n\n if indep_ap:\n angle_span = angle_range[-1]-angle_range[0]\n n_ap = int(np.deg2rad(angle_span)*r_true/fwhm)\n delta_theta = angle_span/n_ap\n angle_range = np.linspace(angle_range[0]+delta_theta/2, \n angle_range[-1]+delta_theta/2, n_ap, \n endpoint=False)\n\n elif angle_range[0]%360 == angle_range[-1]%360:\n angle_range = angle_range[:-1]\n \n if verbose: \n print('Number of steps: {}'.format(angle_range.shape[0]))\n print('')\n \n imlib = algo_options.get('imlib','opencv')\n interpolation = algo_options.get('interpolation','lanczos4')\n \n # FIRST SUBTRACT THE TRUE COMPANION CANDIDATE\n planet_parameter = np.array([[r_true, theta_true, f_true]])\n cube_pf = cube_planet_free(planet_parameter, cube, derot_angles, psfn, plsc, \n imlib=imlib, interpolation=interpolation,\n transmission=transmission)\n\n # Measure mu and sigma once in the annulus (instead of each MCMC step)\n if isinstance(mu_sigma,tuple):\n if len(mu_sigma)!=2:\n raise TypeError(\"If a tuple, mu_sigma must have 2 elements\")\n elif mu_sigma is not None:\n ncomp = algo_options.get('ncomp', None)\n annulus_width = algo_options.get('annulus_width', int(fwhm))\n if weights is not None:\n if not len(weights)==cube.shape[0]:\n raise TypeError(\"Weights should have same length as cube axis 0\")\n norm_weights = weights/np.sum(weights)\n else:\n norm_weights=weights\n mu_sigma = get_mu_and_sigma(cube, derot_angles, ncomp, annulus_width, \n aperture_radius, fwhm, r_true, theta_true, \n cube_ref=cube_ref, wedge=wedge, algo=algo, \n weights=norm_weights, \n algo_options=algo_options)\n \n res = pool_map(nproc, _estimate_speckle_one_angle, iterable(angle_range), \n cube_pf, psfn, derot_angles, r_true, f_true, plsc, fwhm,\n aperture_radius, cube_ref, fmerit, algo, algo_options, \n transmission, mu_sigma, weights, force_rPA, simplex_options, \n imlib, interpolation, verbose=verbose)\n residuals = np.array(res)\n \n if opp_ang: # do opposite angles\n res = pool_map(nproc, _estimate_speckle_one_angle, \n iterable(angle_range), cube_pf, psfn, -derot_angles, \n r_true, f_true, plsc, fwhm, aperture_radius, cube_ref, \n fmerit, algo, algo_options, transmission, mu_sigma, \n weights, force_rPA, simplex_options, imlib, \n interpolation, verbose=verbose)\n residuals2 = np.array(res)\n residuals = np.concatenate((residuals,residuals2))\n \n if verbose: \n print(\"residuals (offsets): \", residuals[:,3], residuals[:,4],\n residuals[:,5])\n\n p_simplex = np.transpose(np.vstack((residuals[:,0], residuals[:,1],\n residuals[:,2])))\n offset = np.transpose(np.vstack((residuals[:,3],residuals[:,4],\n residuals[:,5])))\n print(offset)\n chi2 = residuals[:,6]\n nit = residuals[:,7]\n success = residuals[:,8]\n\n if save:\n speckles = {'r_true':r_true,\n 'angle_range': angle_range,\n 'f_true':f_true,\n 'r_simplex':residuals[:,0],\n 'theta_simplex':residuals[:,1],\n 'f_simplex':residuals[:,2],\n 'offset': offset,\n 'chi2': chi2,\n 'nit': nit,\n 'success': success}\n \n if output is None:\n output = 'speckles_noise_result'\n\n from pickle import Pickler\n with open(output,'wb') as fileSave:\n myPickler = Pickler(fileSave)\n myPickler.dump(speckles)\n\n\n # Calculate 1 sigma of distribution of deviations\n print(offset.shape)\n if force_rPA:\n offset = offset[:,2]\n print(offset.shape)\n if bins is None:\n bins = int(offset.shape[0]/10)\n mean_dev, sp_unc = confidence(offset, cfd=68.27, bins=bins, \n gaussian_fit=True, verbose=True, save=False, \n output_dir='', force=True)\n if plot:\n plt.show()\n\n if full_output:\n return sp_unc, mean_dev, p_simplex, offset, chi2, nit, success\n else:\n return sp_unc\n\n\ndef _estimate_speckle_one_angle(angle, cube_pf, psfn, angs, r_true, f_true, \n plsc, fwhm, aperture_radius, cube_ref, fmerit, \n algo, algo_options, transmission, mu_sigma, \n weights, force_rPA, simplex_options, imlib,\n interpolation, verbose=True):\n \n if verbose:\n print('Process is running for angle: {:.2f}'.format(angle))\n\n cube_fc = cube_inject_companions(cube_pf, psfn, angs, flevel=f_true, \n plsc=plsc, rad_dists=[r_true], \n n_branches=1, theta=angle, \n transmission=transmission, imlib=imlib,\n interpolation=interpolation, verbose=False)\n \n ncomp = algo_options.get('ncomp', None)\n annulus_width = algo_options.get('annulus_width', int(fwhm))\n \n res_simplex = firstguess_simplex((r_true,angle,f_true), cube_fc, angs, psfn, \n plsc, ncomp, fwhm, annulus_width, \n aperture_radius, cube_ref=cube_ref,\n fmerit=fmerit, algo=algo, \n algo_options=algo_options, imlib=imlib,\n interpolation=interpolation, \n transmission=transmission, \n mu_sigma=mu_sigma, weights=weights, \n force_rPA=force_rPA, \n options=simplex_options, \n verbose=False)\n\n if force_rPA:\n simplex_res_f, = res_simplex.x\n simplex_res_r, simplex_res_PA = r_true, angle\n else:\n simplex_res_r, simplex_res_PA, simplex_res_f = res_simplex.x\n offset_r = simplex_res_r - r_true\n offset_PA = simplex_res_PA - angle\n offset_f = simplex_res_f - f_true \n chi2 = res_simplex.fun\n nit = res_simplex.nit\n success = res_simplex.success \n \n return (simplex_res_r, simplex_res_PA, simplex_res_f, offset_r, offset_PA, \n offset_f, chi2, nit, success)" ]
[ [ "numpy.linspace", "matplotlib.pyplot.show", "numpy.concatenate", "numpy.deg2rad", "numpy.array", "numpy.sum", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
foroozandehgroup/mrpypulse
[ "6812ab8331321b46b4093514ddc8e07a19e20f81" ]
[ "tests/test_sequence.py" ]
[ "import numpy as np\nimport copy\nfrom mrpypulse import pulse, sequence, magnetization\n\n\ndef test_pc_rec():\n pc1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n pc2 = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3])\n pc3 = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3])\n\n ph_rec = sequence.pc_rec([pc1, pc2, pc3], [-1, +2, -2])\n\n pc31 = np.array([0, 2, 0, 2, 2, 0, 2, 0, 0, 2, 0, 2, 2, 0, 2, 0])\n\n assert np.all(ph_rec == pc31)\n\n pc1 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2])\n pc2 = np.array([0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1])\n pc3 = np.array([0, 0, 1, 1, 2, 2, 3, 3, 0, 0, 1, 1, 2, 2, 3, 3])\n\n ph_rec = sequence.pc_rec([pc1, pc2, pc3], [-1, +2, -2])\n\n pc31 = np.array([0, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 2, 2, 0, 0, 2])\n\n assert np.all(ph_rec == pc31)\n\n\ndef test_sequence_insert():\n t90min = 80e-6\n t180min = 101e-6\n bw = 506e3\n tres = 0.5e-6\n chorus = sequence.Exc_3fs(t90min, t180min, bw, tres,\n plot=False, polyfit=False, t_del=80e-6)\n chorus.insert(0, 100e-6)\n p = pulse.Parametrized(AM=\"sech\", FM=\"sech\",\n tp=500e-6, Q=5., bw=300e3, ns=1000)\n chorus.insert(1, p)\n\n chorus2 = sequence.Exc_3fs(t90min, t180min, bw, tres,\n plot=False, polyfit=False, t_del=0e-6)\n\n chorus2.insert(0, 100e-6)\n chorus2.insert(2, 80e-6)\n chorus2.insert(3, 80e-6)\n chorus2.insert(1, p)\n\n assert chorus == chorus2\n\n\ndef test_sequence_append():\n chorus = sequence.Exc_3fs(t90min=79e-6, t180min=180e-6, bw=487.52e3,\n tres=0.5e-6, plot=False, polyfit=False)\n p = pulse.Parametrized(AM=\"sech\", FM=\"sech\",\n tp=500e-6, Q=5., bw=300e3, ns=1000)\n chorus2 = copy.deepcopy(chorus)\n chorus2.insert(3, 100e-6)\n chorus2.insert(3, p)\n chorus.append(100e-6)\n chorus.append(p)\n\n assert chorus == chorus2\n\n\ndef test_Exc_3fs_init():\n \"\"\"\n Generates a short chorus with a low time resolution and test that the\n magnetization is grossly refocused on y.\n \"\"\"\n t90min = 80e-6\n t180min = 101e-6\n bw = 506e3\n tres = 2e-6\n chorus = sequence.Exc_3fs(t90min, t180min, bw, tres,\n plot=False, polyfit=True)\n\n # check that magnetization is recocused in centre part\n off = np.linspace(-0.25*bw, 0.25*bw, 10)\n magn = magnetization.simulate(\n chorus.pulses, off=off, pc=chorus.pc)\n assert np.all(magn[1, :] > 0.9)\n" ]
[ [ "numpy.all", "numpy.array", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vladimirwest/deepspeech.pytorch
[ "46e417d8cbdcf1b41f95a8aa7a76b89349f3424f" ]
[ "transcribe.py" ]
[ "import argparse\nimport warnings\n\nfrom opts import add_decoder_args, add_inference_args\nfrom utils import load_model\n\nwarnings.simplefilter('ignore')\n\nfrom decoder import GreedyDecoder\n\nimport torch\n\nfrom data.data_loader import SpectrogramParser\nfrom model import DeepSpeech\nimport os.path\nimport json\n\nparser = argparse.ArgumentParser(description='DeepSpeech transcription')\nparser = add_inference_args(parser)\nparser.add_argument('--audio-path', default='audio.wav',\n help='Audio file to predict on')\nparser.add_argument('--offsets', dest='offsets', action='store_true', help='Returns time offset information')\nparser = add_decoder_args(parser)\nargs = parser.parse_args()\n\ndef decode_results(model, decoded_output, decoded_offsets):\n results = {\n \"output\": [],\n \"_meta\": {\n \"acoustic_model\": {\n \"name\": os.path.basename(args.model_path)\n },\n \"language_model\": {\n \"name\": os.path.basename(args.lm_path) if args.lm_path else None,\n },\n \"decoder\": {\n \"lm\": args.lm_path is not None,\n \"alpha\": args.alpha if args.lm_path is not None else None,\n \"beta\": args.beta if args.lm_path is not None else None,\n \"type\": args.decoder,\n }\n }\n }\n\n for b in range(len(decoded_output)):\n for pi in range(min(args.top_paths, len(decoded_output[b]))):\n result = {'transcription': decoded_output[b][pi]}\n if args.offsets:\n result['offsets'] = decoded_offsets[b][pi].tolist()\n results['output'].append(result)\n results = u\"{}\".format(str(results))\n return results\n\n\ndef transcribe(audio_path, parser, model, decoder, device):\n spect = parser.parse_audio(audio_path).contiguous()\n spect = spect.view(1, 1, spect.size(0), spect.size(1))\n spect = spect.to(device)\n input_sizes = torch.IntTensor([spect.size(3)]).int()\n out, output_sizes = model(spect, input_sizes)\n decoded_output, decoded_offsets = decoder.decode(out, output_sizes)\n return decoded_output, decoded_offsets\n\n\nif __name__ == '__main__':\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n model = load_model(device, args.model_path, args.cuda)\n\n if args.decoder == \"beam\":\n from decoder import BeamCTCDecoder\n\n decoder = BeamCTCDecoder(model.labels, lm_path=args.lm_path, alpha=args.alpha, beta=args.beta,\n cutoff_top_n=args.cutoff_top_n, cutoff_prob=args.cutoff_prob,\n beam_width=args.beam_width, num_processes=args.lm_workers)\n else:\n decoder = GreedyDecoder(model.labels, blank_index=model.labels.index('_'))\n\n parser = SpectrogramParser(model.audio_conf, normalize=True)\n\n decoded_output, decoded_offsets = transcribe(args.audio_path, parser, model, decoder, device)\n print(decode_results(model, decoded_output, decoded_offsets))" ]
[ [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nmvbxcz/ddfnet
[ "777bdeb73ad00e4c8ce53323cb282073817badf9" ]
[ "train.py" ]
[ "#!/usr/bin/env python3\n\"\"\" ImageNet Training Script\n\nThis is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet\ntraining results with some of the latest networks and training techniques. It favours canonical PyTorch\nand standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed\nand training result improvements over the usual PyTorch example scripts. Repurpose as you see fit.\n\nThis script was started from an early version of the PyTorch ImageNet example\n(https://github.com/pytorch/examples/tree/master/imagenet)\n\nNVIDIA CUDA specific speedups adopted from NVIDIA Apex examples\n(https://github.com/NVIDIA/apex/tree/master/examples/imagenet)\n\nHacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman)\n\"\"\"\nimport argparse\nimport time\nimport yaml\nimport os\nimport logging\nfrom collections import OrderedDict\nfrom contextlib import suppress\nfrom datetime import datetime\n\nimport torch\nimport torch.nn as nn\nimport torchvision.utils\nfrom torch.nn.parallel import DistributedDataParallel as NativeDDP\n\nfrom timm.data import create_dataset, create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset\nfrom timm.models import create_model, resume_checkpoint, load_checkpoint, convert_splitbn_model, model_parameters\nfrom timm.utils import *\nfrom timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy, JsdCrossEntropy\nfrom timm.optim import create_optimizer\nfrom timm.scheduler import create_scheduler\nfrom timm.utils import ApexScaler, NativeScaler\n\ntry:\n from apex import amp\n from apex.parallel import DistributedDataParallel as ApexDDP\n from apex.parallel import convert_syncbn_model\n has_apex = True\nexcept ImportError:\n has_apex = False\n\nhas_native_amp = False\ntry:\n if getattr(torch.cuda.amp, 'autocast') is not None:\n has_native_amp = True\nexcept AttributeError:\n pass\n\n# register models\nimport ddf_resnet\n\ntorch.backends.cudnn.benchmark = True\n_logger = logging.getLogger('train')\n\n# The first arg parser parses out only the --config argument, this argument is used to\n# load a yaml file containing key-values that override the defaults for the main parser below\nconfig_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)\nparser.add_argument('-c', '--config', default='', type=str, metavar='FILE',\n help='YAML config file specifying default arguments')\n\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\n\n# Dataset / Model parameters\nparser.add_argument('data_dir', metavar='DIR',\n help='path to dataset')\nparser.add_argument('--dataset', '-d', metavar='NAME', default='',\n help='dataset type (default: ImageFolder/ImageTar if empty)')\nparser.add_argument('--train-split', metavar='NAME', default='train',\n help='dataset train split (default: train)')\nparser.add_argument('--val-split', metavar='NAME', default='validation',\n help='dataset validation split (default: validation)')\nparser.add_argument('--model', default='ddf_mul_resnet50', type=str, metavar='MODEL',\n help='Name of model to train (default: \"ddf_mul_resnet50\"')\nparser.add_argument('--pretrained', action='store_true', default=False,\n help='Start with pretrained version of specified network (if avail)')\nparser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',\n help='Initialize model from this checkpoint (default: none)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='Resume full model and optimizer state from checkpoint (default: none)')\nparser.add_argument('--no-resume-opt', action='store_true', default=False,\n help='prevent resume of optimizer state when resuming model')\nparser.add_argument('--num-classes', type=int, default=None, metavar='N',\n help='number of label classes (Model default if None)')\nparser.add_argument('--gp', default=None, type=str, metavar='POOL',\n help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.')\nparser.add_argument('--img-size', type=int, default=None, metavar='N',\n help='Image patch size (default: None => model default)')\nparser.add_argument('--input-size', default=None, nargs=3, type=int,\n metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty')\nparser.add_argument('--crop-pct', default=None, type=float,\n metavar='N', help='Input image center crop percent (for validation only)')\nparser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',\n help='Override mean pixel value of dataset')\nparser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',\n help='Override std deviation of of dataset')\nparser.add_argument('--interpolation', default='', type=str, metavar='NAME',\n help='Image resize interpolation type (overrides model)')\nparser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 32)')\nparser.add_argument('-vb', '--validation-batch-size-multiplier', type=int, default=1, metavar='N',\n help='ratio of validation batch size to training batch size (default: 1)')\n\n# Optimizer parameters\nparser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',\n help='Optimizer (default: \"sgd\"')\nparser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON',\n help='Optimizer Epsilon (default: None, use opt default)')\nparser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',\n help='Optimizer Betas (default: None, use opt default)')\nparser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='Optimizer momentum (default: 0.9)')\nparser.add_argument('--weight-decay', type=float, default=0.0001,\n help='weight decay (default: 0.0001)')\nparser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',\n help='Clip gradient norm (default: None, no clipping)')\nparser.add_argument('--clip-mode', type=str, default='norm',\n help='Gradient clipping mode. One of (\"norm\", \"value\", \"agc\")')\n\n\n# Learning rate schedule parameters\nparser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',\n help='LR scheduler (default: \"step\"')\nparser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\nparser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',\n help='learning rate noise on/off epoch percentages')\nparser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',\n help='learning rate noise limit percent (default: 0.67)')\nparser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',\n help='learning rate noise std-dev (default: 1.0)')\nparser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT',\n help='learning rate cycle len multiplier (default: 1.0)')\nparser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N',\n help='learning rate cycle limit')\nparser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',\n help='warmup learning rate (default: 0.0001)')\nparser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',\n help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')\nparser.add_argument('--epochs', type=int, default=120, metavar='N',\n help='number of epochs to train (default: 120)')\nparser.add_argument('--start-epoch', default=None, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--decay-epochs', type=float, default=30, metavar='N',\n help='epoch interval to decay LR')\nparser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',\n help='epochs to warmup LR, if scheduler supports')\nparser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',\n help='epochs to cooldown LR at min_lr, after cyclic schedule ends')\nparser.add_argument('--patience-epochs', type=int, default=10, metavar='N',\n help='patience epochs for Plateau LR scheduler (default: 10')\nparser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',\n help='LR decay rate (default: 0.1)')\n\n# Augmentation & regularization parameters\nparser.add_argument('--no-aug', action='store_true', default=False,\n help='Disable all training augmentation, override other train aug args')\nparser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',\n help='Random resize scale (default: 0.08 1.0)')\nparser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO',\n help='Random resize aspect ratio (default: 0.75 1.33)')\nparser.add_argument('--hflip', type=float, default=0.5,\n help='Horizontal flip training aug probability')\nparser.add_argument('--vflip', type=float, default=0.,\n help='Vertical flip training aug probability')\nparser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',\n help='Color jitter factor (default: 0.4)')\nparser.add_argument('--aa', type=str, default='rand-m7-mstd0.5-inc1', metavar='NAME',\n help='Use AutoAugment policy. \"v0\" or \"original\". (default: rand-m7-mstd0.5-inc1)'),\nparser.add_argument('--aug-splits', type=int, default=0,\n help='Number of augmentation splits (default: 0, valid: 0 or >=2)')\nparser.add_argument('--jsd', action='store_true', default=False,\n help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.')\nparser.add_argument('--reprob', type=float, default=0.4, metavar='PCT',\n help='Random erase prob (default: 0.4)')\nparser.add_argument('--remode', type=str, default='pixel',\n help='Random erase mode (default: \"pixel\")')\nparser.add_argument('--recount', type=int, default=3,\n help='Random erase count (default: 3)')\nparser.add_argument('--resplit', action='store_true', default=False,\n help='Do not random erase first (clean) augmentation split')\nparser.add_argument('--mixup', type=float, default=0.0,\n help='mixup alpha, mixup enabled if > 0. (default: 0.)')\nparser.add_argument('--cutmix', type=float, default=0.0,\n help='cutmix alpha, cutmix enabled if > 0. (default: 0.)')\nparser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,\n help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')\nparser.add_argument('--mixup-prob', type=float, default=1.0,\n help='Probability of performing mixup or cutmix when either/both is enabled')\nparser.add_argument('--mixup-switch-prob', type=float, default=0.5,\n help='Probability of switching to cutmix when both mixup and cutmix enabled')\nparser.add_argument('--mixup-mode', type=str, default='batch',\n help='How to apply mixup/cutmix params. Per \"batch\", \"pair\", or \"elem\"')\nparser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',\n help='Turn off mixup after this epoch, disabled if 0 (default: 0)')\nparser.add_argument('--smoothing', type=float, default=0.1,\n help='Label smoothing (default: 0.1)')\nparser.add_argument('--train-interpolation', type=str, default='random',\n help='Training interpolation (random, bilinear, bicubic default: \"random\")')\nparser.add_argument('--drop', type=float, default=0.0, metavar='PCT',\n help='Dropout rate (default: 0.)')\nparser.add_argument('--drop-connect', type=float, default=None, metavar='PCT',\n help='Drop connect rate, DEPRECATED, use drop-path (default: None)')\nparser.add_argument('--drop-path', type=float, default=None, metavar='PCT',\n help='Drop path rate (default: None)')\nparser.add_argument('--drop-block', type=float, default=None, metavar='PCT',\n help='Drop block rate (default: None)')\n\n# Batch norm parameters (only works with gen_efficientnet based models currently)\nparser.add_argument('--bn-tf', action='store_true', default=False,\n help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')\nparser.add_argument('--bn-momentum', type=float, default=None,\n help='BatchNorm momentum override (if not None)')\nparser.add_argument('--bn-eps', type=float, default=None,\n help='BatchNorm epsilon override (if not None)')\nparser.add_argument('--sync-bn', action='store_true',\n help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')\nparser.add_argument('--dist-bn', type=str, default='',\n help='Distribute BatchNorm stats between nodes after each epoch (\"broadcast\", \"reduce\", or \"\")')\nparser.add_argument('--split-bn', action='store_true',\n help='Enable separate BN layers per augmentation split.')\n\n# Model Exponential Moving Average\nparser.add_argument('--model-ema', action='store_true', default=False,\n help='Enable tracking moving average of model weights')\nparser.add_argument('--model-ema-force-cpu', action='store_true', default=False,\n help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')\nparser.add_argument('--model-ema-decay', type=float, default=0.9998,\n help='decay factor for model weights moving average (default: 0.9998)')\n\n# Misc\nparser.add_argument('--seed', type=int, default=42, metavar='S',\n help='random seed (default: 42)')\nparser.add_argument('--log-interval', type=int, default=50, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--recovery-interval', type=int, default=0, metavar='N',\n help='how many batches to wait before writing recovery checkpoint')\nparser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N',\n help='number of checkpoints to keep (default: 10)')\nparser.add_argument('-j', '--workers', type=int, default=4, metavar='N',\n help='how many training processes to use (default: 1)')\nparser.add_argument('--save-images', action='store_true', default=False,\n help='save images of input bathes every log interval for debugging')\nparser.add_argument('--amp', action='store_true', default=False,\n help='use NVIDIA Apex AMP or Native AMP for mixed precision training')\nparser.add_argument('--apex-amp', action='store_true', default=False,\n help='Use NVIDIA Apex AMP mixed precision')\nparser.add_argument('--native-amp', action='store_true', default=False,\n help='Use Native Torch AMP mixed precision')\nparser.add_argument('--channels-last', action='store_true', default=False,\n help='Use channels_last memory layout')\nparser.add_argument('--pin-mem', action='store_true', default=False,\n help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')\nparser.add_argument('--no-prefetcher', action='store_true', default=False,\n help='disable fast prefetcher')\nparser.add_argument('--output', default='', type=str, metavar='PATH',\n help='path to output folder (default: none, current dir)')\nparser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC',\n help='Best metric (default: \"top1\"')\nparser.add_argument('--tta', type=int, default=0, metavar='N',\n help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')\nparser.add_argument(\"--local_rank\", default=0, type=int)\nparser.add_argument('--use-multi-epochs-loader', action='store_true', default=False,\n help='use the multi-epochs-loader to save time at the beginning of every epoch')\nparser.add_argument('--torchscript', dest='torchscript', action='store_true',\n help='convert model torchscript for inference')\n\n\ndef _parse_args():\n # Do we have a config file to parse?\n args_config, remaining = config_parser.parse_known_args()\n if args_config.config:\n with open(args_config.config, 'r') as f:\n cfg = yaml.safe_load(f)\n parser.set_defaults(**cfg)\n\n # The main arg parser parses the rest of the args, the usual\n # defaults will have been overridden if config file specified.\n args = parser.parse_args(remaining)\n\n # Cache the args as a text string to save them in the output dir later\n args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)\n return args, args_text\n\n\ndef main():\n setup_default_logging()\n args, args_text = _parse_args()\n\n args.prefetcher = not args.no_prefetcher\n args.distributed = False\n if 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\n args.device = 'cuda:0'\n args.world_size = 1\n args.rank = 0 # global rank\n if args.distributed:\n args.device = 'cuda:%d' % args.local_rank\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n args.world_size = torch.distributed.get_world_size()\n args.rank = torch.distributed.get_rank()\n _logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'\n % (args.rank, args.world_size))\n else:\n _logger.info('Training with a single process on 1 GPUs.')\n assert args.rank >= 0\n\n # resolve AMP arguments based on PyTorch / Apex availability\n use_amp = None\n if args.amp:\n # `--amp` chooses native amp before apex (APEX ver not actively maintained)\n if has_native_amp:\n args.native_amp = True\n elif has_apex:\n args.apex_amp = True\n if args.apex_amp and has_apex:\n use_amp = 'apex'\n elif args.native_amp and has_native_amp:\n use_amp = 'native'\n elif args.apex_amp or args.native_amp:\n _logger.warning(\"Neither APEX or native Torch AMP is available, using float32. \"\n \"Install NVIDA apex or upgrade to PyTorch 1.6\")\n\n torch.manual_seed(args.seed + args.rank)\n\n model = create_model(\n args.model,\n pretrained=args.pretrained,\n num_classes=args.num_classes,\n drop_rate=args.drop,\n drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path\n drop_path_rate=args.drop_path,\n drop_block_rate=args.drop_block,\n global_pool=args.gp,\n bn_tf=args.bn_tf,\n bn_momentum=args.bn_momentum,\n bn_eps=args.bn_eps,\n scriptable=args.torchscript,\n checkpoint_path=args.initial_checkpoint)\n if args.num_classes is None:\n assert hasattr(model, 'num_classes'), 'Model must have `num_classes` attr if not set on cmd line/config.'\n args.num_classes = model.num_classes # FIXME handle model default vs config num_classes more elegantly\n\n if args.local_rank == 0:\n _logger.info('Model %s created, param count: %d' %\n (args.model, sum([m.numel() for m in model.parameters()])))\n\n data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)\n\n # setup augmentation batch splits for contrastive loss or split bn\n num_aug_splits = 0\n if args.aug_splits > 0:\n assert args.aug_splits > 1, 'A split of 1 makes no sense'\n num_aug_splits = args.aug_splits\n\n # enable split bn (separate bn stats per batch-portion)\n if args.split_bn:\n assert num_aug_splits > 1 or args.resplit\n model = convert_splitbn_model(model, max(num_aug_splits, 2))\n\n # move model to GPU, enable channels last layout if set\n model.cuda()\n if args.channels_last:\n model = model.to(memory_format=torch.channels_last)\n\n # setup synchronized BatchNorm for distributed training\n if args.distributed and args.sync_bn:\n assert not args.split_bn\n if has_apex and use_amp != 'native':\n # Apex SyncBN preferred unless native amp is activated\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n if args.local_rank == 0:\n _logger.info(\n 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using '\n 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.')\n\n if args.torchscript:\n assert not use_amp == 'apex', 'Cannot use APEX AMP with torchscripted model'\n assert not args.sync_bn, 'Cannot use SyncBatchNorm with torchscripted model'\n model = torch.jit.script(model)\n\n optimizer = create_optimizer(args, model)\n\n # setup automatic mixed-precision (AMP) loss scaling and op casting\n amp_autocast = suppress # do nothing\n loss_scaler = None\n if use_amp == 'apex':\n model, optimizer = amp.initialize(model, optimizer, opt_level='O1')\n loss_scaler = ApexScaler()\n if args.local_rank == 0:\n _logger.info('Using NVIDIA APEX AMP. Training in mixed precision.')\n elif use_amp == 'native':\n amp_autocast = torch.cuda.amp.autocast\n loss_scaler = NativeScaler()\n if args.local_rank == 0:\n _logger.info('Using native Torch AMP. Training in mixed precision.')\n else:\n if args.local_rank == 0:\n _logger.info('AMP not enabled. Training in float32.')\n\n # optionally resume from a checkpoint\n resume_epoch = None\n if args.resume:\n resume_epoch = resume_checkpoint(\n model, args.resume,\n optimizer=None if args.no_resume_opt else optimizer,\n loss_scaler=None if args.no_resume_opt else loss_scaler,\n log_info=args.local_rank == 0)\n\n # setup exponential moving average of model weights, SWA could be used here too\n model_ema = None\n if args.model_ema:\n # Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper\n model_ema = ModelEmaV2(\n model, decay=args.model_ema_decay, device='cpu' if args.model_ema_force_cpu else None)\n if args.resume:\n load_checkpoint(model_ema.module, args.resume, use_ema=True)\n\n # setup distributed training\n if args.distributed:\n if has_apex and use_amp != 'native':\n # Apex DDP preferred unless native amp is activated\n if args.local_rank == 0:\n _logger.info(\"Using NVIDIA APEX DistributedDataParallel.\")\n model = ApexDDP(model, delay_allreduce=True)\n else:\n if args.local_rank == 0:\n _logger.info(\"Using native Torch DistributedDataParallel.\")\n model = NativeDDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1\n # NOTE: EMA model does not need to be wrapped by DDP\n\n # setup learning rate schedule and starting epoch\n lr_scheduler, num_epochs = create_scheduler(args, optimizer)\n start_epoch = 0\n if args.start_epoch is not None:\n # a specified start_epoch will always override the resume epoch\n start_epoch = args.start_epoch\n elif resume_epoch is not None:\n start_epoch = resume_epoch\n if lr_scheduler is not None and start_epoch > 0:\n lr_scheduler.step(start_epoch)\n\n if args.local_rank == 0:\n _logger.info('Scheduled epochs: {}'.format(num_epochs))\n\n # create the train and eval datasets\n dataset_train = create_dataset(\n args.dataset, root=args.data_dir, split=args.train_split, is_training=True, batch_size=args.batch_size)\n dataset_eval = create_dataset(\n args.dataset, root=args.data_dir, split=args.val_split, is_training=False, batch_size=args.batch_size)\n\n # setup mixup / cutmix\n collate_fn = None\n mixup_fn = None\n mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None\n if mixup_active:\n mixup_args = dict(\n mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,\n prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,\n label_smoothing=args.smoothing, num_classes=args.num_classes)\n if args.prefetcher:\n assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup)\n collate_fn = FastCollateMixup(**mixup_args)\n else:\n mixup_fn = Mixup(**mixup_args)\n\n # wrap dataset in AugMix helper\n if num_aug_splits > 1:\n dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)\n\n # create data loaders w/ augmentation pipeiine\n train_interpolation = args.train_interpolation\n if args.no_aug or not train_interpolation:\n train_interpolation = data_config['interpolation']\n loader_train = create_loader(\n dataset_train,\n input_size=data_config['input_size'],\n batch_size=args.batch_size,\n is_training=True,\n use_prefetcher=args.prefetcher,\n no_aug=args.no_aug,\n re_prob=args.reprob,\n re_mode=args.remode,\n re_count=args.recount,\n re_split=args.resplit,\n scale=args.scale,\n ratio=args.ratio,\n hflip=args.hflip,\n vflip=args.vflip,\n color_jitter=args.color_jitter,\n auto_augment=args.aa,\n num_aug_splits=num_aug_splits,\n interpolation=train_interpolation,\n mean=data_config['mean'],\n std=data_config['std'],\n num_workers=args.workers,\n distributed=args.distributed,\n collate_fn=collate_fn,\n pin_memory=args.pin_mem,\n use_multi_epochs_loader=args.use_multi_epochs_loader\n )\n\n loader_eval = create_loader(\n dataset_eval,\n input_size=data_config['input_size'],\n batch_size=args.validation_batch_size_multiplier * args.batch_size,\n is_training=False,\n use_prefetcher=args.prefetcher,\n interpolation=data_config['interpolation'],\n mean=data_config['mean'],\n std=data_config['std'],\n num_workers=args.workers,\n distributed=args.distributed,\n crop_pct=data_config['crop_pct'],\n pin_memory=args.pin_mem,\n )\n\n # setup loss function\n if args.jsd:\n assert num_aug_splits > 1 # JSD only valid with aug splits set\n train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing).cuda()\n elif mixup_active:\n # smoothing is handled with mixup target transform\n train_loss_fn = SoftTargetCrossEntropy().cuda()\n elif args.smoothing:\n train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()\n else:\n train_loss_fn = nn.CrossEntropyLoss().cuda()\n validate_loss_fn = nn.CrossEntropyLoss().cuda()\n\n # setup checkpoint saver and eval metric tracking\n eval_metric = args.eval_metric\n best_metric = None\n best_epoch = None\n saver = None\n output_dir = ''\n if args.local_rank == 0:\n output_base = args.output if args.output else './output'\n exp_name = '-'.join([\n datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n args.model,\n str(data_config['input_size'][-1])\n ])\n output_dir = get_outdir(output_base, 'train', exp_name)\n decreasing = True if eval_metric == 'loss' else False\n saver = CheckpointSaver(\n model=model, optimizer=optimizer, args=args, model_ema=model_ema, amp_scaler=loss_scaler,\n checkpoint_dir=output_dir, recovery_dir=output_dir, decreasing=decreasing, max_history=args.checkpoint_hist)\n with open(os.path.join(output_dir, 'args.yaml'), 'w') as f:\n f.write(args_text)\n\n try:\n for epoch in range(start_epoch, num_epochs):\n if args.distributed and hasattr(loader_train.sampler, 'set_epoch'):\n loader_train.sampler.set_epoch(epoch)\n\n train_metrics = train_one_epoch(\n epoch, model, loader_train, optimizer, train_loss_fn, args,\n lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,\n amp_autocast=amp_autocast, loss_scaler=loss_scaler, model_ema=model_ema, mixup_fn=mixup_fn)\n\n if args.distributed and args.dist_bn in ('broadcast', 'reduce'):\n if args.local_rank == 0:\n _logger.info(\"Distributing BatchNorm running means and vars\")\n distribute_bn(model, args.world_size, args.dist_bn == 'reduce')\n\n eval_metrics = validate(model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast)\n\n if model_ema is not None and not args.model_ema_force_cpu:\n if args.distributed and args.dist_bn in ('broadcast', 'reduce'):\n distribute_bn(model_ema, args.world_size, args.dist_bn == 'reduce')\n ema_eval_metrics = validate(\n model_ema.module, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast, log_suffix=' (EMA)')\n eval_metrics = ema_eval_metrics\n\n if lr_scheduler is not None:\n # step LR for next epoch\n lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])\n\n update_summary(\n epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),\n write_header=best_metric is None)\n\n if saver is not None:\n # save proper checkpoint with eval metric\n save_metric = eval_metrics[eval_metric]\n best_metric, best_epoch = saver.save_checkpoint(epoch, metric=save_metric)\n\n except KeyboardInterrupt:\n pass\n if best_metric is not None:\n _logger.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))\n\n\ndef train_one_epoch(\n epoch, model, loader, optimizer, loss_fn, args,\n lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress,\n loss_scaler=None, model_ema=None, mixup_fn=None):\n\n if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:\n if args.prefetcher and loader.mixup_enabled:\n loader.mixup_enabled = False\n elif mixup_fn is not None:\n mixup_fn.mixup_enabled = False\n\n second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n losses_m = AverageMeter()\n\n model.train()\n\n end = time.time()\n last_idx = len(loader) - 1\n num_updates = epoch * len(loader)\n for batch_idx, (input, target) in enumerate(loader):\n last_batch = batch_idx == last_idx\n data_time_m.update(time.time() - end)\n if not args.prefetcher:\n input, target = input.cuda(), target.cuda()\n if mixup_fn is not None:\n input, target = mixup_fn(input, target)\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n\n with amp_autocast():\n output = model(input)\n loss = loss_fn(output, target)\n\n if not args.distributed:\n losses_m.update(loss.item(), input.size(0))\n\n optimizer.zero_grad()\n if loss_scaler is not None:\n loss_scaler(\n loss, optimizer,\n clip_grad=args.clip_grad, clip_mode=args.clip_mode,\n parameters=model_parameters(model, exclude_head='agc' in args.clip_mode),\n create_graph=second_order)\n else:\n loss.backward(create_graph=second_order)\n if args.clip_grad is not None:\n dispatch_clip_grad(\n model_parameters(model, exclude_head='agc' in args.clip_mode),\n value=args.clip_grad, mode=args.clip_mode)\n optimizer.step()\n\n if model_ema is not None:\n model_ema.update(model)\n\n torch.cuda.synchronize()\n num_updates += 1\n batch_time_m.update(time.time() - end)\n if last_batch or batch_idx % args.log_interval == 0:\n lrl = [param_group['lr'] for param_group in optimizer.param_groups]\n lr = sum(lrl) / len(lrl)\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n losses_m.update(reduced_loss.item(), input.size(0))\n\n if args.local_rank == 0:\n _logger.info(\n 'Train: {} [{:>4d}/{} ({:>3.0f}%)] '\n 'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '\n 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '\n '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '\n 'LR: {lr:.3e} '\n 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(\n epoch,\n batch_idx, len(loader),\n 100. * batch_idx / last_idx,\n loss=losses_m,\n batch_time=batch_time_m,\n rate=input.size(0) * args.world_size / batch_time_m.val,\n rate_avg=input.size(0) * args.world_size / batch_time_m.avg,\n lr=lr,\n data_time=data_time_m))\n\n if args.save_images and output_dir:\n torchvision.utils.save_image(\n input,\n os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),\n padding=0,\n normalize=True)\n\n if saver is not None and args.recovery_interval and (\n last_batch or (batch_idx + 1) % args.recovery_interval == 0):\n saver.save_recovery(epoch, batch_idx=batch_idx)\n\n if lr_scheduler is not None:\n lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)\n\n end = time.time()\n # end for\n\n if hasattr(optimizer, 'sync_lookahead'):\n optimizer.sync_lookahead()\n\n return OrderedDict([('loss', losses_m.avg)])\n\n\ndef validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):\n batch_time_m = AverageMeter()\n losses_m = AverageMeter()\n top1_m = AverageMeter()\n top5_m = AverageMeter()\n\n model.eval()\n\n end = time.time()\n last_idx = len(loader) - 1\n with torch.no_grad():\n for batch_idx, (input, target) in enumerate(loader):\n last_batch = batch_idx == last_idx\n if not args.prefetcher:\n input = input.cuda()\n target = target.cuda()\n if args.channels_last:\n input = input.contiguous(memory_format=torch.channels_last)\n\n with amp_autocast():\n output = model(input)\n if isinstance(output, (tuple, list)):\n output = output[0]\n\n # augmentation reduction\n reduce_factor = args.tta\n if reduce_factor > 1:\n output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)\n target = target[0:target.size(0):reduce_factor]\n\n loss = loss_fn(output, target)\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n if args.distributed:\n reduced_loss = reduce_tensor(loss.data, args.world_size)\n acc1 = reduce_tensor(acc1, args.world_size)\n acc5 = reduce_tensor(acc5, args.world_size)\n else:\n reduced_loss = loss.data\n\n torch.cuda.synchronize()\n\n losses_m.update(reduced_loss.item(), input.size(0))\n top1_m.update(acc1.item(), output.size(0))\n top5_m.update(acc5.item(), output.size(0))\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):\n log_name = 'Test' + log_suffix\n _logger.info(\n '{0}: [{1:>4d}/{2}] '\n 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '\n 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '\n 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(\n log_name, batch_idx, last_idx, batch_time=batch_time_m,\n loss=losses_m, top1=top1_m, top5=top5_m))\n\n metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])\n\n return metrics\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.jit.script", "torch.cuda.synchronize", "torch.nn.CrossEntropyLoss", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.manual_seed", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "torch.no_grad", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.nn.parallel.DistributedDataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
silverman/silverman.github.io
[ "346120239ed35008a48c296643e7d8233973b324" ]
[ "build-presets/MakePresetTree.py" ]
[ "\"\"\"\nThis code uses the PySAM wrapper for the SAM GUI to generate energy yield and create a new preset tree.\nIt loops through every combination of cell technology, package type, system type, inverter loading ratio\nand location to determine the energy yield with those settings.\n\nNote: this script runs PySAM 3300 times (for each preset combination) and takes ~30 mins to finish running.\n\"\"\"\nimport pandas as pd\nimport json\nimport PySAM.Pvwattsv7 as pvwatts\nimport glob\nimport PySAM.ResourceTools as tools\nimport PySAM.PySSC as pssc\nfrom pathlib import Path # for platform independent paths\n\n# to avoid rounding issues, the lat and lon returned by pysam are in this file\n# locations maps a lat/lon pair to the string name of the location\nlocations = {}\ndf = pd.read_csv('location_coordinates.csv')\nfor index, row in df.iterrows():\n locations[row['ID']] = 'USA ' + \\\n row['State'] + ' ' + row['Place']\n\n# Define feasible system configurations\ncell_technologies = ['mono-Si', 'multi-Si', 'CdTe']\n\npackage_types = {\n 'mono-Si': ['glass-polymer backsheet', 'glass-glass'],\n 'multi-Si': ['glass-polymer backsheet', 'glass-glass'],\n 'CdTe': ['glass-glass']\n}\n\nsystem_types = {\n 'mono-Si': ('fixed tilt, utility scale', 'single-axis tracked, utility scale', 'roof-mounted, residential scale', 'roof-mounted, commercial scale', 'fixed tilt, commercial scale'),\n 'multi-Si': ('fixed tilt, utility scale', 'single-axis tracked, utility scale', 'roof-mounted, commercial scale', 'fixed tilt, commercial scale'),\n 'CdTe': ('fixed tilt, utility scale', 'single-axis tracked, utility scale', 'roof-mounted, commercial scale', 'fixed tilt, commercial scale')\n}\n\n\n# Preset values for module parameters: costs are in USD per square meter, efficiency reported as a percentage\nmodule_details = {\n 'cost_front_layer': 3.5,\n 'cost_cell': {'mono-Si': 22.2, 'multi-Si': 19.4, 'CdTe': 21.3},\n 'cost_back_layer': {'glass-polymer backsheet': 2.4, 'glass-glass': 3},\n 'cost_noncell': 13.6,\n 'efficiency': {'mono-Si': 19.5, 'multi-Si': 17.5, 'CdTe': 18.0},\n}\n\n\n# Preset values for operation & maintenance costs, reported in USD/kW(DC) per year\ncost_om = {\n 'fixed tilt, utility scale': 16.32,\n 'single-axis tracked, utility scale': 17.46,\n 'roof-mounted, residential scale': 28.94,\n 'roof-mounted, commercial scale': 18.55,\n 'fixed tilt, commercial scale': 18.71\n}\n\n# WARNING: make sure these values are the same as in the MakeBOSTree.py file\ninverter_loading_ratio = [1.1, 1.3, 1.4]\n\n# creating PySAM model with default info from json file (that doesn't have location info)\njson_file = open(\"pvwatts_inputs.json\")\npvwatts_dict = json.load(json_file)\n\n# using the pvwatts model\npv_dat = pssc.dict_to_ssc_table(pvwatts_dict, \"pvwattsv7\")\njson_file.close()\njson_model = pvwatts.wrap(pv_dat)\n\n# convert weather files into format that can be used by PySAM\nweather_folder = \"weather_data\"\nweather_files = glob.glob(weather_folder + \"/*.csv\")\n\n# tilt angles reported in degrees, needed for running PySAM\ntilt = {'fixed tilt, utility scale': 33, 'single-axis tracked, utility scale': 0,\n 'roof-mounted, residential scale': 25, 'roof-mounted, commercial scale': 10, 'fixed tilt, commercial scale': 10}\n\n# 0: fixed rack, 1: fixed roof, 2: 1 axis, 3: backtracked; needed for running PySAM\n# backtracking for the silicon single-axis tracked systems but not for CdTe\narray_type = {'mono-Si': {'fixed tilt, utility scale': 0, 'single-axis tracked, utility scale': 3,\n 'roof-mounted, residential scale': 1, 'roof-mounted, commercial scale': 1, 'fixed tilt, commercial scale': 0},\n 'multi-Si': {'fixed tilt, utility scale': 0, 'single-axis tracked, utility scale': 3,\n 'roof-mounted, residential scale': 1, 'roof-mounted, commercial scale': 1, 'fixed tilt, commercial scale': 0},\n 'CdTe': {'fixed tilt, utility scale': 0, 'single-axis tracked, utility scale': 2,\n 'roof-mounted, residential scale': 1, 'roof-mounted, commercial scale': 1, 'fixed tilt, commercial scale': 0}}\n\n\n# between 0 and 1, needed for running PySAM\nground_coverage_ratio = {'fixed tilt, utility scale': 0.44, 'single-axis tracked, utility scale': 0.33,\n 'roof-mounted, residential scale': 0.44, 'roof-mounted, commercial scale': 0.44, 'fixed tilt, commercial scale': 0.44}\n\npreset_tree = {}\nfor cell_technology in cell_technologies:\n if cell_technology not in preset_tree:\n preset_tree[cell_technology] = {}\n for package_type in package_types[cell_technology]:\n if package_type not in preset_tree[cell_technology]:\n preset_tree[cell_technology][package_type] = {}\n for system_type in system_types[cell_technology]:\n if system_type not in preset_tree[cell_technology][package_type]:\n preset_tree[cell_technology][package_type][system_type] = {}\n for ilr in inverter_loading_ratio:\n if ilr not in preset_tree[cell_technology][package_type][system_type]:\n preset_tree[cell_technology][package_type][system_type][ilr] = {}\n for f in weather_files:\n json_model.SolarResource.solar_resource_data = tools.SAM_CSV_to_solar_data(\n f)\n # remove folder name from file name\n f = f.replace(weather_folder + '/', '')\n # get the id from the beginning of the string\n location_id = f.split('_')[0]\n # string name of location\n location = locations[int(location_id)]\n\n # set specific inputs of PySAM model based on system type and ILR\n json_model.SystemDesign.gcr = ground_coverage_ratio[system_type]\n json_model.SystemDesign.array_type = array_type[cell_technology][system_type]\n json_model.SystemDesign.tilt = tilt[system_type]\n json_model.SystemDesign.dc_ac_ratio = ilr\n\n json_model.execute(0) # run the model\n # get the energy yield from the outputs\n energy_yield = json_model.Outputs.kwh_per_kw\n\n preset_tree[cell_technology][package_type][system_type][ilr][location] = {\n 'cost_front_layer': module_details['cost_front_layer'],\n 'cost_cell': module_details['cost_cell'][cell_technology],\n 'cost_back_layer': module_details['cost_back_layer'][package_type],\n 'cost_noncell': module_details['cost_noncell'],\n 'cost_om': cost_om[system_type],\n 'efficiency': module_details['efficiency'][cell_technology],\n 'energy_yield': energy_yield,\n 'degradation_rate': 0.7,\n 'state': location.split(' ')[1]\n }\n\n\nwith open(Path('../js/PresetTree.js'), 'w') as file:\n file.write('var preset_tree = ' + json.dumps(preset_tree,\n indent=2, separators=(',', ': ')))\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
ChetGoerzen/rockhound
[ "28bf3247752ffcf470cdb4b7d782eb5e1706f008" ]
[ "examples/stw105.py" ]
[ "r\"\"\"\nSTW105\n======\n\nThe STW105 Earth model [Kustowski2008]_ is a one-dimensional model representing average Earth\nproperties as a function of depth. The model includes the radius, density, seismic velocities,\nattenuation (Q), and anisotropic parameter (:math:`\\eta`) on the boundaries of several Earth layers.\nIt's available through IRIS Data Services Products [IRIS2011]_ in a txt file (text).\nThe data is loaded into :class:`pandas.DataFrame` objects.\n\"\"\"\n\nimport rockhound as rh\nimport matplotlib.pyplot as plt\n\n# load STW105 into a DataFrame\nstw105 = rh.fetch_stw105()\n\n# Plot density and velocities\nfig, axes = plt.subplots(1, 2, figsize=(9, 5), sharey=True)\nfig.suptitle(\"STW105\")\nax = axes[0]\nstw105.plot(\"density\", \"radius\", legend=False, ax=ax)\nax.set_xlabel(\"Density [g/cm³]\")\nax.set_ylabel(\"Radius [m]\")\nax.grid()\nax = axes[1]\nfor velocity in [\"Vpv\", \"Vsv\", \"Vph\", \"Vsh\"]:\n stw105.plot(velocity, \"radius\", legend=False, ax=ax, label=velocity)\nax.grid()\nax.legend()\nax.set_xlabel(\"Velocity [km/s]\")\nplt.show()\n\nfig, ax = plt.subplots()\nstw105.plot(\"eta\", \"radius\", legend=False, ax=ax)\nax.set_xlabel(\"eta\")\nax.set_ylabel(\"Radius [m]\")\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
skant626/attention-is-all-you-need-pytorch
[ "d4092de8589d463ad7cac222c2293375f5ca1879" ]
[ "transformer/SubLayers.py" ]
[ "''' Define the sublayers in encoder/decoder layer '''\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom transformer.Modules import ScaledDotProductAttention\nimport torch \nimport copy, math\ndef clones(module, N):\n \"Produce N identical layers.\"\n return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n__author__ = \"Yu-Hsiang Huang\"\n\n\n\ndef attention(query, key, value, mask=None, dropout=None):\n \"Compute 'Scaled Dot Product Attention'\"\n d_k = query.size(-1)\n scores = torch.matmul(query, key.transpose(-2, -1)) \\\n / math.sqrt(d_k)\n if mask is not None:\n scores = scores.masked_fill(mask == 0, -1e9)\n p_attn = F.softmax(scores, dim = -1)\n if dropout is not None:\n p_attn = dropout(p_attn)\n return torch.matmul(p_attn, value), p_attn\n\nclass MultiHeadAttention(nn.Module):\n def __init__(self, h, d_model, d_k, d_v, dropout=0.1):\n \"Take in model size and number of heads.\"\n super(MultiHeadAttention, self).__init__()\n assert d_model % h == 0\n # We assume d_v always equals d_k\n self.d_k = d_model // h\n self.h = h\n self.linears = clones(nn.Linear(d_model, d_k * h , bias=False), 4)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n \n def forward(self, query, key, value, mask=None):\n \"Implements Figure 2\"\n if mask is not None:\n # Same mask applied to all h heads.\n mask = mask.unsqueeze(1)\n nbatches = query.size(0)\n residual = query\n # 1) Do all the linear projections in batch from d_model => h x d_k \n query, key, value = \\\n [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n for l, x in zip(self.linears, (query, key, value))]\n \n # 2) Apply attention on all the projected vectors in batch. \n x, self.attn = attention(query, key, value, mask=mask, \n dropout=self.dropout)\n \n # 3) \"Concat\" using a view and apply a final linear. \n x = x.transpose(1, 2).contiguous() \\\n .view(nbatches, -1, self.h * self.d_k)\n x = self.linears[-1](x)\n x = self.dropout(x)\n x += residual\n #return self.linears[-1](x), self.attn\n return x, self.attn\n\"\"\"\nclass MultiHeadAttention(nn.Module):\n ''' Multi-Head Attention module '''\n\n def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):\n super().__init__()\n\n self.n_head = n_head\n self.d_k = d_k\n self.d_v = d_v\n\n self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)\n self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)\n self.fc = nn.Linear(n_head * d_v, d_model, bias=False)\n\n self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)\n\n self.dropout = nn.Dropout(dropout)\n self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)\n\n\n def forward(self, q, k, v, mask=None):\n\n d_k, d_v, n_head = self.d_k, self.d_v, self.n_head\n sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)\n\n residual = q\n\n # Pass through the pre-attention projection: b x lq x (n*dv)\n # Separate different heads: b x lq x n x dv\n q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)\n k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)\n v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)\n\n # Transpose for attention dot product: b x n x lq x dv\n q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)\n\n if mask is not None:\n mask = mask.unsqueeze(1) # For head axis broadcasting.\n\n q, attn = self.attention(q, k, v, mask=mask)\n\n # Transpose to move the head dimension back: b x lq x n x dv\n # Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)\n q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)\n q = self.dropout(self.fc(q))\n q += residual\n\n q = self.layer_norm(q)\n\n return q, attn\n\"\"\"\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Linear(d_in, d_hid) # position-wise\n self.w_2 = nn.Linear(d_hid, d_in) # position-wise\n self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n\n residual = x\n\n x = self.w_2(F.relu(self.w_1(x)))\n x = self.dropout(x)\n x += residual\n\n x = self.layer_norm(x)\n\n return x\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.nn.LayerNorm", "torch.matmul", "torch.nn.Linear" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marta-0/hippocampal_volume_quantification
[ "387dcb402b6cac3019fa922663a4f7a86a944d4c" ]
[ "section2/src/data_prep/SlicesDataset.py" ]
[ "\"\"\"\nModule for Pytorch dataset representations\n\"\"\"\n\nimport torch\nfrom torch.utils.data import Dataset\n\nclass SlicesDataset(Dataset):\n \"\"\"\n This class represents an indexable Torch dataset\n which could be consumed by the PyTorch DataLoader class\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n self.slices = []\n\n for i, d in enumerate(data):\n for j in range(d[\"image\"].shape[0]):\n self.slices.append((i, j))\n\n def __getitem__(self, idx):\n \"\"\"\n This method is called by PyTorch DataLoader class to return a sample with id idx\n\n Arguments: \n idx {int} -- id of sample\n\n Returns:\n Dictionary of 2 Torch Tensors of dimensions [1, W, H]\n \"\"\"\n slc = self.slices[idx]\n sample = dict()\n sample[\"id\"] = idx\n\n # You could implement caching strategy here if dataset is too large to fit\n # in memory entirely\n # Also this would be the place to call transforms if data augmentation is used\n \n # TASK: Create two new keys in the \"sample\" dictionary, named \"image\" and \"seg\"\n # The values are 3D Torch Tensors with image and label data respectively. \n # First dimension is size 1, and last two hold the voxel data from the respective\n # slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors. \n # Your tensor needs to be of shape [1, patch_size, patch_size]\n # Don't forget that you need to put a Torch Tensor into your dictionary element's value\n # Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array\n # and the slice number are in the slc variable. \n # Hint2: You can use None notation like so: arr[None, :] to add size-1 \n # dimension to a Numpy array\n \n sample['image'] = torch.from_numpy(self.data[slc[0]]['image'][slc[1]]).unsqueeze(0)\n sample['seg'] = torch.from_numpy(self.data[slc[0]]['seg'][None, slc[1]])\n\n return sample\n\n def __len__(self):\n \"\"\"\n This method is called by PyTorch DataLoader class to return number of samples in the dataset\n\n Returns:\n int\n \"\"\"\n return len(self.slices)" ]
[ [ "torch.from_numpy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
xiaoyi-Zeng/nerf_pl
[ "53f5fcfe911a1248963f6f82ada1e0361bb7e296" ]
[ "models/nerf.py" ]
[ "import torch\nfrom torch import nn\n\nclass Embedding(nn.Module):\n def __init__(self, in_channels, N_freqs, logscale=True):\n \"\"\"\n Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)\n in_channels: number of input channels (3 for both xyz and direction)\n \"\"\"\n super(Embedding, self).__init__()\n self.N_freqs = N_freqs\n self.in_channels = in_channels\n self.funcs = [torch.sin, torch.cos]\n self.out_channels = in_channels*(len(self.funcs)*N_freqs+1)\n\n if logscale:\n self.freq_bands = 2**torch.linspace(0, N_freqs-1, N_freqs)\n else:\n self.freq_bands = torch.linspace(1, 2**(N_freqs-1), N_freqs)\n\n def forward(self, x):\n \"\"\"\n Embeds x to (x, sin(2^k x), cos(2^k x), ...) \n Different from the paper, \"x\" is also in the output\n See https://github.com/bmild/nerf/issues/12\n\n Inputs:\n x: (B, self.in_channels)\n\n Outputs:\n out: (B, self.out_channels)\n \"\"\"\n out = [x]\n for freq in self.freq_bands:\n for func in self.funcs:\n out += [func(freq*x)]\n\n return torch.cat(out, -1)\n\n\nclass NeRF(nn.Module):\n def __init__(self,\n D=10, W=256,\n in_channels_xyz=63, in_channels_dir=27, \n skips=[4,6,8]):\n \"\"\"\n D: number of layers for density (sigma) encoder\n W: number of hidden units in each layer\n in_channels_xyz: number of input channels for xyz (3+3*10*2=63 by default)\n in_channels_dir: number of input channels for direction (3+3*4*2=27 by default)\n skips: add skip connection in the Dth layer\n \"\"\"\n super(NeRF, self).__init__()\n self.D = D\n self.W = W\n self.in_channels_xyz = in_channels_xyz\n self.in_channels_dir = in_channels_dir\n self.skips = skips\n\n # xyz encoding layers\n for i in range(D):\n if i == 0:\n layer = nn.Linear(in_channels_xyz, W)\n elif i in skips:\n layer = nn.Linear(W+in_channels_xyz, W)\n else:\n layer = nn.Linear(W, W)\n layer = nn.Sequential(layer, nn.ReLU(True))\n setattr(self, f\"xyz_encoding_{i+1}\", layer)\n self.xyz_encoding_final = nn.Linear(W, W)\n\n # direction encoding layers\n self.dir_encoding = nn.Sequential(\n nn.Linear(W+in_channels_dir, W//2),\n nn.ReLU(True))\n\n self.sigmaone=nn.Linear(W,1)\n self.rgbone=nn.Linear(W//2,3)\n self.xyz_encoding_final_one = nn.Linear(W, W)\n self.dir_encoding_one = nn.Sequential(\n nn.Linear(W+in_channels_dir, W//2),\n nn.ReLU(True))\n\n self.sigmatwo=nn.Linear(W,1)\n self.rgbtwo=nn.Linear(W//2,3)\n self.xyz_encoding_final_two = nn.Linear(W, W)\n self.dir_encoding_two = nn.Sequential(\n nn.Linear(W+in_channels_dir, W//2),\n nn.ReLU(True))\n\n self.sigmathree=nn.Linear(W,1)\n self.rgbthree=nn.Linear(W//2,3)\n self.xyz_encoding_final_three = nn.Linear(W, W)\n self.dir_encoding_three = nn.Sequential(\n nn.Linear(W+in_channels_dir, W//2),\n nn.ReLU(True))\n \n # output layers\n self.sigma = nn.Linear(W, 1)\n self.rgb = nn.Sequential(\n nn.Linear(W//2, 3),\n nn.Sigmoid())\n\n def forward(self, x, sigma_only=False):\n \"\"\"\n Encodes input (xyz+dir) to rgb+sigma (not ready to render yet).\n For rendering this ray, please see rendering.py\n\n Inputs:\n x: (B, self.in_channels_xyz(+self.in_channels_dir))\n the embedded vector of position and direction\n sigma_only: whether to infer sigma only. If True,\n x is of shape (B, self.in_channels_xyz)\n\n Outputs:\n if sigma_ony:\n sigma: (B, 1) sigma\n else:\n out: (B, 4), rgb and sigma\n \"\"\"\n if not sigma_only:\n input_xyz, input_dir = \\\n torch.split(x, [self.in_channels_xyz, self.in_channels_dir], dim=-1)\n else:\n input_xyz = x\n\n xyz_ = input_xyz\n \n for i in range(self.D):\n if i==4:\n xyz_encoding_final_one = self.xyz_encoding_final_one(xyz_)\n dir_encoding_input_one = torch.cat([xyz_encoding_final_one, input_dir], -1)\n dir_encoding_one = self.dir_encoding_one(dir_encoding_input_one)\n sigma_one = self.sigmaone(xyz_)\n rgb_one = self.rgbone(dir_encoding_one)\n elif i==6:\n xyz_encoding_final_two = self.xyz_encoding_final_two(xyz_)\n dir_encoding_input_two = torch.cat([xyz_encoding_final_two, input_dir], -1)\n dir_encoding_two = self.dir_encoding_two(dir_encoding_input_two)\n sigma_two = self.sigmatwo(xyz_)\n rgb_two = self.rgbtwo(dir_encoding_two)\n elif i==8:\n xyz_encoding_final_three = self.xyz_encoding_final_three(xyz_)\n dir_encoding_input_three = torch.cat([xyz_encoding_final_three, input_dir], -1)\n dir_encoding_three = self.dir_encoding_three(dir_encoding_input_three)\n sigma_three = self.sigmathree(xyz_)\n rgb_three = self.rgbthree(dir_encoding_three)\n \n if i in self.skips:\n xyz_ = torch.cat([input_xyz, xyz_], -1)\n xyz_ = getattr(self, f\"xyz_encoding_{i+1}\")(xyz_)\n \n \n \n sigma = self.sigma(xyz_) + sigma_one + sigma_two + sigma_three\n if sigma_only:\n return sigma\n\n xyz_encoding_final = self.xyz_encoding_final(xyz_)\n\n dir_encoding_input = torch.cat([xyz_encoding_final, input_dir], -1)\n dir_encoding = self.dir_encoding(dir_encoding_input)\n rgb = self.rgb(dir_encoding) + rgb_one + rgb_two + rgb_three\n\n out = torch.cat([rgb, sigma], -1)\n\n return out\n" ]
[ [ "torch.linspace", "torch.cat", "torch.nn.Sigmoid", "torch.nn.Linear", "torch.split", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LeianChen/GUI_new2
[ "4bb97f982c86bc71caa12e03691b3448094e18b5" ]
[ "model.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__version__ 1.0.0\n\nBasic agent-based modelling. Builds agents into a 2D plane represented by a\nraster environment. Scrapes some web data and use it to initialise the model.\nAgents have methods of move, eat, and share. When running the model, a new\nwindow will appear. Please find this window and select model from the menu bar\nand select Run model.\n\n\"\"\"\n\n# In Spyder set display to Inline!\n\nimport random\n#import operator\nimport matplotlib\nimport tkinter\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot\nimport matplotlib.animation\nimport matplotlib.backends.backend_tkagg\nimport agentframework\nimport csv\nimport requests\nimport bs4\n\n\n\n#######################################################\n#########Step 1: Initialise parameters##################\n########################################################\nprint('Step 1: Initialise parameters.')\n\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\n\n\nprint(\"num_of_agents\", str(num_of_agents))\nprint(\"num_of_iterations\", str(num_of_iterations))\nprint(\"neighbourhood\", str(neighbourhood))\n\n\n\n#################################################\n########Step 2: Initialise GUI.#################\n#################################################\nprint('Step 2: Initialise GUI.')\n#root = tkinter.Tk()\n#root.wm_title(\"Model\")\n\n\n\n#######################################################\n######Step 3: Get data from the web.#################\n#####################################################\nprint('Step 3: Get data from the web.')\nurl = 'http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html'\nr = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')\ncontent = r.text\nsoup = bs4.BeautifulSoup(content, 'html.parser')\ntd_ys = soup.find_all(attrs={\"class\" : \"y\"})\ntd_xs = soup.find_all(attrs={\"class\" : \"x\"})\ntd_zs = soup.find_all(attrs={\"class\" : \"z\"})\nfor td in td_xs:\n print(td.text)\n#print(td_ys)\n#print(td_xs)\n\n\n\n###############################################################################################################\n######Step 4: Initialise environment containing data about the spatial environment in which agents act#########\n###############################################################################################################\nprint('Step 4: Initialise environment containing data about the spatial environment in which agents act.')\n\nenvironment = []\n\nwith open('in.txt', newline='') as f:\n reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)\n for row in reader:\n rowlist = []\n for value in row:\n rowlist.append(value)\n #print(value)\n environment.append(rowlist)\n\n\n\n################################################\n###########Step 5: Initialise agents.###########\n################################################\nprint('Step 5: Initialise agents.')\n\nagents = []\n\nfor i in range(num_of_agents):\n y = int(td_ys[i].text)\n x = int(td_xs[i].text)\n agents.append(agentframework.Agent(environment, agents, x, y))\n\n\n#####################################\n######Step 6: Initialise the GUI.####\n#####################################\nprint('Step 6: Initialise the GUI.')\n\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\ncarry_on = True\n\nprint(\"A GUI window will appear. Please select \\\"Run Model\\\" from the \\\"Model\\\" menu to run the model.\")\n\n\n####################################################\n###############Step 7: Animation.#################\n##################################################\nprint('Step 7: Animation.')\n\ndef update(frame_number):\n global carry_on\n \n fig.clear()\n\n if (carry_on):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n else:\n carry_on = False\n print(\"stopping condition\")\n\n\n #Plot the environment\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].y,agents[i].x)\n #print(agents[i].y,agents[i].x)\n\ndef gen_function(b = [0]):\n a = 0\n global carry_on\n while (a < 10) & (carry_on) :\n yield a\t\t\t# Returns control and waits next call.\n a = a + 1\n\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n#matplotlib.pyplot.show()\n\n\n\n#Display the plot\n\ndef run():\n global animation\n animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n canvas.draw()\n\n\nroot = tkinter.Tk() \nroot.wm_title(\"Model\")\ncanvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)\ncanvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)\n\n\nmenu_bar = tkinter.Menu(root)\nroot.config(menu=menu_bar)\nmodel_menu = tkinter.Menu(menu_bar)\nmenu_bar.add_cascade(label=\"Model\", menu=model_menu)\nmodel_menu.add_command(label=\"Run model\", command=run)\n\n\n\n\n####################################################\n###############Shuffle and Store Checks.############\n####################################################\n\n##Check if shuffle works\n#for j in range(num_of_iterations):\n#\n# for k in range(num_of_agents):\n# print(agents[k].x,agents[k].y)\n# print (\"shuffling...\")\n#\n# random.shuffle(agents)\n#\n# for k in range(num_of_agents):\n# print(agents[k].x,agents[k].y)\n# print(\"----\")\n# \n#\n##Check store of all agents\n#for i in range(num_of_agents):\n# print(agents[i].store)\n\n\n\n#################################################################\n###########Calculate the distance between agents.################\n#################################################################\n\n#def distance_between(agents_row_a, agents_row_b):\n# return (((agents_row_a.x - agents_row_b.x)**2) + \n# ((agents_row_a.y - agents_row_b.y)**2))**0.5\n#\n#\n#for agents_row_a in agents:\n# for agents_row_b in agents:\n# distance = distance_between(agents_row_a, agents_row_b)\n\n\n\nroot.mainloop()\n#tkinter.mainloop()\nprint(\"Thank you for using the model!\")\n\n\n\n\n\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.scatter", "matplotlib.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylim", "matplotlib.pyplot.xlim", "matplotlib.animation.FuncAnimation", "matplotlib.backends.backend_tkagg.FigureCanvasTkAgg" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
agermanidis/glow
[ "2b8fe7d46892d4ad727b9b54adf72754c3350486" ]
[ "demo/runway_model.py" ]
[ "import runway\nfrom model import *\nfrom imutils.face_utils.helpers import FACIAL_LANDMARKS_68_IDXS\nfrom imutils.face_utils.helpers import FACIAL_LANDMARKS_5_IDXS\nfrom imutils.face_utils.helpers import shape_to_np\nimport numpy as np\nimport cv2\nimport dlib\n\n\nclass FaceWarper:\n def __init__(self, predictor, desiredLeftEye=(0.35, 0.35),\n desiredFaceWidth=512, desiredFaceHeight=None):\n # store the facial landmark predictor, desired output left\n # eye position, and desired output face width + height\n self.predictor = predictor\n self.desiredLeftEye = desiredLeftEye\n self.desiredFaceWidth = desiredFaceWidth\n self.desiredFaceHeight = desiredFaceHeight\n\n # if the desired face height is None, set it to be the\n # desired face width (normal behavior)\n if self.desiredFaceHeight is None:\n self.desiredFaceHeight = self.desiredFaceWidth\n\n def align(self, image, gray, rect, z_addition):\n # convert the landmark (x, y)-coordinates to a NumPy\n h1, w1 = image.shape[:2]\n shape = self.predictor(gray, rect)\n shape = shape_to_np(shape)\n\n #simple hack ;)\n if (len(shape)==68):\n # extract the left and right eye (x, y)-coordinates\n (lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS[\"left_eye\"]\n (rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS[\"right_eye\"]\n else:\n (lStart, lEnd) = FACIAL_LANDMARKS_5_IDXS[\"left_eye\"]\n (rStart, rEnd) = FACIAL_LANDMARKS_5_IDXS[\"right_eye\"]\n\n leftEyePts = shape[lStart:lEnd]\n rightEyePts = shape[rStart:rEnd]\n\n # compute the center of mass for each eye\n leftEyeCenter = leftEyePts.mean(axis=0).astype(\"int\")\n rightEyeCenter = rightEyePts.mean(axis=0).astype(\"int\")\n\n # compute the angle between the eye centroids\n dY = rightEyeCenter[1] - leftEyeCenter[1]\n dX = rightEyeCenter[0] - leftEyeCenter[0]\n angle = np.degrees(np.arctan2(dY, dX)) - 180\n\n # compute the desired right eye x-coordinate based on the\n # desired x-coordinate of the left eye\n desiredRightEyeX = 1.0 - self.desiredLeftEye[0]\n\n # determine the scale of the new resulting image by taking\n # the ratio of the distance between eyes in the *current*\n # image to the ratio of distance between eyes in the\n # *desired* image\n dist = np.sqrt((dX ** 2) + (dY ** 2))\n desiredDist = (desiredRightEyeX - self.desiredLeftEye[0])\n desiredDist *= self.desiredFaceWidth\n scale = desiredDist / dist\n\n # compute center (x, y)-coordinates (i.e., the median point)\n # between the two eyes in the input image\n eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,\n (leftEyeCenter[1] + rightEyeCenter[1]) // 2)\n\n # grab the rotation matrix for rotating and scaling the face\n M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)\n\n # update the translation component of the matrix\n tX = self.desiredFaceWidth * 0.5\n tY = self.desiredFaceHeight * self.desiredLeftEye[1]\n M[0, 2] += (tX - eyesCenter[0])\n M[1, 2] += (tY - eyesCenter[1])\n\n # apply the affine transformation\n (w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)\n output = cv2.warpAffine(image, M, (w, h),\n flags=cv2.INTER_CUBIC)\n\n # invert the previous affine transformation for later\n Mi = cv2.invertAffineTransform(M)\n\n # BGR -> RGB\n output = output[:,:,::-1]\n\n # encode with GLOW, do operations on z\n z = encode(output)\n z[0] += z_addition\n\n # decode back to image and back to BGR\n output = decode(z)[0]\n output = output[:,:,::-1]\n\n # invert the affine transformation on output\n output = cv2.warpAffine(output, Mi, (w1, h1),\n flags=cv2.INTER_CUBIC)\n\n # overwrite original image with masked output\n mask = np.sum(output, axis=2) == 0.0\n image = np.multiply(mask.reshape((h1, w1, 1)), image)\n image += output\n\n return image\n\n\n\n# load face detection and warping\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\nwarper = FaceWarper(predictor, desiredFaceWidth=256, desiredLeftEye=(0.371, 0.480))\n\n# tags that can be modified\ntags = \"5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young\"\ntags = tags.split()\n\ncommand_inputs = {\n 'image': runway.image,\n 'feature': runway.category(choices=tags, default=tags[2]),\n 'amount': runway.number(default=0.5, min=0, max=1, step=0.1)\n}\n\[email protected]('manipulate', inputs=command_inputs, outputs={'output': runway.image})\ndef detect(sess, inp):\n img = np.array(inp['image'])\n amount = inp['amount']\n feature = inp['feature']\n z_addition = amount * z_manipulate[tags.index(feature)]\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 2)\n if len(rects) == 0:\n print('nothing found')\n return dict(output=img)\n img = warper.align(img[:, :, ::-1], gray, rects[0], z_addition)[:, :, ::-1]\n img = np.array(Image.fromarray(img).convert('RGB'))\n output = np.clip(img, 0, 255).astype(np.uint8)\n return dict(output=output)\n\n\nif __name__ == '__main__':\n runway.run()\n" ]
[ [ "numpy.sqrt", "numpy.clip", "numpy.arctan2", "numpy.array", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Kexiii/DenseNet-Cifar10
[ "440e7ffb059be720eb35cdb9092c6c521ea9dd2b" ]
[ "cifar10_train.py" ]
[ "# -*- coding:utf-8 -*-\nimport keras\nimport numpy as np\nimport os\nimport matplotlib \nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt\nfrom keras.optimizers import SGD\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\nfrom keras import backend as K\nfrom keras.datasets import cifar10\nfrom keras.models import load_model\nfrom data_input.data_input import getDataGenerator\nfrom model.DenseNet import createDenseNet\n\n#define DenseNet parms\nROWS = 32\nCOLS = 32\nCHANNELS = 3\nnb_classes = 10\nbatch_size = 32\nnb_epoch = 40\nimg_dim = (ROWS,COLS,CHANNELS)\ndensenet_depth = 40\ndensenet_growth_rate = 12\n\n#define filepath parms\ncheck_point_file = r\"./densenet_check_point.h5\"\nloss_trend_graph_path = r\"./loss.jpg\"\nacc_trend_graph_path = r\"./acc.jpg\"\n\ndef main(resume=False):\n print('Now,we start compiling DenseNet model...')\n model = createDenseNet(nb_classes=nb_classes,img_dim=img_dim,depth=densenet_depth,\n growth_rate = densenet_growth_rate)\n if resume == True: \n model.load_weights(check_point_file)\n \n optimizer = Adam()\n #optimizer = SGD(lr=0.001)\n \n model.compile(loss='categorical_crossentropy',optimizer=optimizer,metrics=['accuracy'])\n \n print('Now,we start loading data...')\n (x_train,y_train),(x_test,y_test) = cifar10.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n y_train = keras.utils.to_categorical(y_train, nb_classes)\n y_test= keras.utils.to_categorical(y_test, nb_classes)\n train_datagen = getDataGenerator(train_phase=True)\n train_datagen = train_datagen.flow(x_train,y_train,batch_size = batch_size)\n validation_datagen = getDataGenerator(train_phase=False)\n validation_datagen = validation_datagen.flow(x_test,y_test,batch_size = batch_size)\n \n print('Now,we start defining callback functions...')\n \"\"\"\n lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=np.sqrt(0.1),\n cooldown=0, patience=3, min_lr=1e-6)\n \"\"\"\n model_checkpoint = ModelCheckpoint(check_point_file, monitor=\"val_acc\", save_best_only=True,\n save_weights_only=True, verbose=1)\n \n #callbacks=[lr_reducer,model_checkpoint]\n callbacks=[model_checkpoint]\n \n print(\"Now,we start training...\")\n history = model.fit_generator(generator=train_datagen,\n steps_per_epoch= x_train.shape[0] // batch_size,\n epochs=nb_epoch,\n callbacks=callbacks,\n validation_data=validation_datagen,\n validation_steps = x_test.shape[0] // batch_size,\n verbose=1)\n \n print(\"Now,we start drawing the loss and acc trends graph...\")\n #summarize history for accuracy \n fig = plt.figure(1)\n plt.plot(history.history[\"acc\"]) \n plt.plot(history.history[\"val_acc\"]) \n plt.title(\"Model accuracy\") \n plt.ylabel(\"accuracy\") \n plt.xlabel(\"epoch\") \n plt.legend([\"train\",\"test\"],loc=\"upper left\") \n plt.savefig(acc_trend_graph_path) \n plt.close(1)\n \n #summarize history for loss\n fig = plt.figure(2) \n plt.plot(history.history[\"loss\"]) \n plt.plot(history.history[\"val_loss\"]) \n plt.title(\"Model loss\") \n plt.ylabel(\"loss\") \n plt.xlabel(\"epoch\") \n plt.legend([\"train\",\"test\"],loc=\"upper left\") \n plt.savefig(loss_trend_graph_path)\n plt.close(2)\n \n print(\"We are done, everything seems OK...\")\n \nif __name__ == '__main__':\n K.set_image_data_format('channels_last')\n #set_max_gpu_memory()\n main(resume=True)" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mr-love/openpyxl
[ "31a51044f936e16bea4cc353fe99a5aff6fec7d0" ]
[ "openpyxl/utils/dataframe.py" ]
[ "from __future__ import absolute_import\n# Copyright (c) 2010-2018 openpyxl\n\nimport numpy\nfrom pandas import Timestamp\n\n\ndef dataframe_to_rows(df, index=True, header=True):\n \"\"\"\n Convert a Pandas dataframe into something suitable for passing into a worksheet\n \"\"\"\n blocks = df._data.blocks\n ncols = sum(b.shape[0] for b in blocks)\n data = [None] * ncols\n\n for b in blocks:\n values = b.values\n\n if b.dtype.type == numpy.datetime64:\n values = numpy.array([Timestamp(v) for v in values.ravel()])\n values = values.reshape(b.shape)\n\n result = values.tolist()\n\n for col_loc, col in zip(b.mgr_locs, result):\n data[col_loc] = col\n\n if header:\n values = list(df.columns.values)\n if df.columns.dtype.type == numpy.datetime64:\n values = [Timestamp(v) for v in values]\n yield [None]*index + values\n\n for idx, v in enumerate(df.index):\n yield [v]*index + [data[j][idx] for j in range(ncols)]\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bmhopkinson/pytorch-deeplab-xception
[ "f652206cfa07363018c1879de496da49358ba289" ]
[ "dataloaders/datasets/cityscapes.py" ]
[ "import os\nimport numpy as np\nimport scipy.misc as m\nfrom PIL import Image\nfrom torch.utils import data\nfrom myinfo import Path\nfrom torchvision import transforms\nfrom dataloaders import custom_transforms as tr\n\nclass CityscapesSegmentation(data.Dataset):\n NUM_CLASSES = 19\n\n def __init__(self, args, root=Path.db_root_dir('cityscapes'), split=\"train\"):\n\n self.root = root\n self.split = split\n self.args = args\n self.files = {}\n\n self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)\n self.annotations_base = os.path.join(self.root, 'gtFine_trainvaltest', 'gtFine', self.split)\n\n self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png')\n\n self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]\n self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]\n self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence', \\\n 'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain', \\\n 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \\\n 'motorcycle', 'bicycle']\n\n self.ignore_index = 255\n self.class_map = dict(zip(self.valid_classes, range(self.NUM_CLASSES)))\n\n if not self.files[split]:\n raise Exception(\"No files for split=[%s] found in %s\" % (split, self.images_base))\n\n print(\"Found %d %s images\" % (len(self.files[split]), split))\n\n def __len__(self):\n return len(self.files[self.split])\n\n def __getitem__(self, index):\n\n img_path = self.files[self.split][index].rstrip()\n lbl_path = os.path.join(self.annotations_base,\n img_path.split(os.sep)[-2],\n os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')\n\n _img = Image.open(img_path).convert('RGB')\n _tmp = np.array(Image.open(lbl_path), dtype=np.uint8)\n _tmp = self.encode_segmap(_tmp)\n _target = Image.fromarray(_tmp)\n\n sample = {'image': _img, 'label': _target}\n\n if self.split == 'train':\n return self.transform_tr(sample)\n elif self.split == 'val':\n return self.transform_val(sample)\n elif self.split == 'test':\n return self.transform_ts(sample)\n\n def encode_segmap(self, mask):\n # Put all void classes to zero\n for _voidc in self.void_classes:\n mask[mask == _voidc] = self.ignore_index\n for _validc in self.valid_classes:\n mask[mask == _validc] = self.class_map[_validc]\n return mask\n\n def recursive_glob(self, rootdir='.', suffix=''):\n \"\"\"Performs recursive glob with given suffix and rootdir\n :param rootdir is the root directory\n :param suffix is the suffix to be searched\n \"\"\"\n return [os.path.join(looproot, filename)\n for looproot, _, filenames in os.walk(rootdir)\n for filename in filenames if filename.endswith(suffix)]\n\n def transform_tr(self, sample):\n composed_transforms = transforms.Compose([\n tr.RandomHorizontalFlip(),\n tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),\n tr.RandomGaussianBlur(),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n def transform_val(self, sample):\n\n composed_transforms = transforms.Compose([\n tr.FixScaleCrop(crop_size=self.args.crop_size),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n def transform_ts(self, sample):\n\n composed_transforms = transforms.Compose([\n tr.FixedResize(size=self.args.crop_size),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\nif __name__ == '__main__':\n from dataloaders.utils import encode_segmap\n from torch.utils.data import DataLoader\n import matplotlib.pyplot as plt\n import argparse\n\n parser = argparse.ArgumentParser()\n args = parser.parse_args()\n args.base_size = 513\n args.crop_size = 513\n\n cityscapes_train = CityscapesSegmentation(args, split='train')\n\n dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2)\n\n for ii, sample in enumerate(dataloader):\n for jj in range(sample[\"image\"].size()[0]):\n img = sample['image'].numpy()\n gt = sample['label'].numpy()\n tmp = np.array(gt[jj]).astype(np.uint8)\n segmap = encode_segmap(tmp, dataset='cityscapes')\n img_tmp = np.transpose(img[jj], axes=[1, 2, 0])\n img_tmp *= (0.229, 0.224, 0.225)\n img_tmp += (0.485, 0.456, 0.406)\n img_tmp *= 255.0\n img_tmp = img_tmp.astype(np.uint8)\n plt.figure()\n plt.title('display')\n plt.subplot(211)\n plt.imshow(img_tmp)\n plt.subplot(212)\n plt.imshow(segmap)\n\n if ii == 1:\n break\n\n plt.show(block=True)\n\n" ]
[ [ "matplotlib.pyplot.imshow", "matplotlib.pyplot.title", "torch.utils.data.DataLoader", "matplotlib.pyplot.subplot", "numpy.transpose", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
weberlo/tvm
[ "b076cad542524cb3744149d953c341b5815f6474" ]
[ "topi/tests/python/test_topi_conv2d_winograd.py" ]
[ "\"\"\"Example code to do convolution.\"\"\"\n\nimport numpy as np\nimport tvm\nfrom tvm import autotvm\nfrom tvm.autotvm.task.space import FallbackConfigEntity\nimport topi\nimport topi.testing\nfrom tvm.contrib.pickle_memoize import memoize\nfrom topi.util import get_const_tuple\n\n\ndef verify_conv2d_nchw(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, add_bias=False, add_relu=False):\n print(\"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))\n\n in_height = in_width = in_size\n\n A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')\n W = tvm.placeholder((num_filter, in_channel, kernel, kernel), name='W')\n bias = tvm.placeholder((num_filter, 1, 1), name='bias')\n\n a_shape = get_const_tuple(A.shape)\n w_shape = get_const_tuple(W.shape)\n bias_shape = get_const_tuple(bias.shape)\n dtype = A.dtype\n\n @memoize(\"topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw\")\n def get_ref_data():\n a_np = np.random.uniform(size=a_shape).astype(dtype)\n w_np = np.random.uniform(size=w_shape).astype(dtype)\n b_np = np.random.uniform(size=bias_shape).astype(dtype)\n dw_np = topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))\n c_np = topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)\n if add_bias:\n b_np = np.random.uniform(size=bias_shape).astype(dtype)\n c_np += b_np\n if add_relu:\n c_np = np.maximum(c_np, 0)\n return a_np, w_np, b_np, c_np\n\n a_np, w_np, b_np, c_np = get_ref_data()\n\n def check_device(device):\n ctx = tvm.context(device, 0)\n if not ctx.exist:\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n with tvm.target.create(device):\n C = topi.nn.conv2d(A, W, stride, padding, dilation, layout='NCHW', out_dtype=dtype)\n if add_bias:\n C = topi.add(C, bias)\n if add_relu:\n C = topi.nn.relu(C)\n s = topi.generic.schedule_conv2d_nchw([C])\n\n a = tvm.nd.array(a_np, ctx)\n w = tvm.nd.array(w_np, ctx)\n b = tvm.nd.array(b_np, ctx)\n c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)\n if add_bias:\n func = tvm.build(s, [A, W, bias, C], device, name=\"relu_%d_%d_%d_%d_%d_%d_%d_%d\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))\n func(a, w, b, c)\n else:\n func = tvm.build(s, [A, W, C], device, name=\"relu_%d_%d_%d_%d_%d_%d_%d_%d\" % (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))\n func(a, w, c)\n tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)\n\n\n for device in ['cuda', 'llvm -device=arm_cpu', 'opencl -device=mali']:\n check_device(device)\n\n\nclass WinogradFallback(autotvm.FallbackContext):\n def _query_inside(self, target, workload):\n key = (target, workload)\n if key in self.memory:\n return self.memory[key]\n cfg = FallbackConfigEntity()\n cfg.template_key = 'winograd'\n self.memory[key] = cfg\n return cfg\n\n\ndef test_conv2d_nchw():\n autotvm.DispatchContext.current.silent = True\n\n with WinogradFallback():\n # resnet 18 workloads\n verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1)\n verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1)\n verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1)\n verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1)\n\n # batch size = 2\n verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1)\n\n # relu, bias\n verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_bias=True)\n verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True)\n verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True)\n\n # werid workloads\n verify_conv2d_nchw(1, 1, 1, 1, 3, 1, 1)\n verify_conv2d_nchw(3, 3, 3, 3, 3, 1, 1)\n verify_conv2d_nchw(2, 13, 71, 59, 3, 1, 1)\n\nif __name__ == \"__main__\":\n test_conv2d_nchw()\n" ]
[ [ "numpy.random.uniform", "numpy.maximum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
simon-donike/MoransI_tiff
[ "1b362487fe96282820dde93906681c8d289f6ee6" ]
[ "data/MoransI.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 5 18:32:44 2020\n\n@author: simondonike\n\"\"\"\n\nimport pysal\nfrom skimage.io import imread\nfrom libpysal.weights import lat2W\nimport pandas as pd\nfrom esda.moran import Moran\nfrom skimage.color import rgb2gray\nfrom splot.esda import moran_scatterplot\nimport matplotlib.pyplot as Mplt\n#import matplotlib.image as mamag\n\n# I started off with a tiff image, converted it to greyscale and\n# then proceeded to Moran I value without converting it into shapefile.\n# Because a shape file has a lot of info including tags lat, long. population and much more meant for geography\n\nrasterImage = imread(r'images/sbg.tif')\n\ndef rgb2gray(rgb):\n import numpy as np\n return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])\n\n\n\n#RasterIamgeGrey is data as pd. dataframe from here on\nrasterImageGrey = rgb2gray(rasterImage)\n\ncol,row = rasterImageGrey.shape[:2]\ndf = pd.DataFrame(rasterImageGrey.flatten()) #What does this do?\nWeightMatrix= lat2W(row,col)\nWeightMatrix = lat2W(rasterImageGrey.shape[0],rasterImageGrey.shape[1])\nMoranM= Moran(rasterImageGrey,WeightMatrix)\nfig, ax = moran_scatterplot(MoranM, aspect_equal=True)\n\n#print(\"Raster Dimension:{}\".format(rasterImageGrey.shape))\nprint(\"Moran's I Value:%f\"%MoranM.I)\nMplt.show()" ]
[ [ "numpy.dot", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
catalystneuro/nwb-conversion-tools
[ "84ca038d99f7928d6c2a15686c9fa3a719466d9e" ]
[ "nwb_conversion_tools/utils/conversion_tools.py" ]
[ "\"\"\"Authors: Cody Baker, Alessio Buccino.\"\"\"\nfrom pathlib import Path\nimport numpy as np\nimport uuid\nfrom datetime import datetime\nfrom warnings import warn\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom time import perf_counter\nfrom typing import Optional\n\nfrom pynwb import NWBFile\nfrom pynwb.file import Subject\nfrom spikeextractors import RecordingExtractor, SubRecordingExtractor\n\nfrom .json_schema import dict_deep_update\nfrom .spike_interface import write_recording\n\n\ndef get_module(nwbfile: NWBFile, name: str, description: str = None):\n \"\"\"Check if processing module exists. If not, create it. Then return module.\"\"\"\n if name in nwbfile.processing:\n if description is not None and nwbfile.modules[name].description != description:\n warn(\n \"Custom description given to get_module does not match existing module description! \"\n \"Ignoring custom description.\"\n )\n return nwbfile.processing[name]\n else:\n if description is None:\n description = \"No description.\"\n return nwbfile.create_processing_module(name=name, description=description)\n\n\ndef get_default_nwbfile_metadata():\n \"\"\"\n Return structure with defaulted metadata values required for a NWBFile.\n\n These standard defaults are\n metadata[\"NWBFile\"][\"session_description\"] = \"no description\"\n metadata[\"NWBFile\"][\"session_description\"] = datetime(1970, 1, 1)\n\n Proper conversions should override these fields prior to calling NWBConverter.run_conversion()\n \"\"\"\n metadata = dict(\n NWBFile=dict(\n session_description=\"no description\",\n session_start_time=datetime(1970, 1, 1).isoformat(),\n identifier=str(uuid.uuid4()),\n )\n )\n return metadata\n\n\ndef make_nwbfile_from_metadata(metadata: dict):\n \"\"\"Make NWBFile from available metadata.\"\"\"\n metadata = dict_deep_update(get_default_nwbfile_metadata(), metadata)\n nwbfile_kwargs = metadata[\"NWBFile\"]\n if \"Subject\" in metadata:\n # convert ISO 8601 string to datetime\n if \"date_of_birth\" in metadata[\"Subject\"] and isinstance(metadata[\"Subject\"][\"date_of_birth\"], str):\n metadata[\"Subject\"][\"date_of_birth\"] = datetime.fromisoformat(metadata[\"Subject\"][\"date_of_birth\"])\n nwbfile_kwargs.update(subject=Subject(**metadata[\"Subject\"]))\n # convert ISO 8601 string to datetime\n if isinstance(nwbfile_kwargs.get(\"session_start_time\", None), str):\n nwbfile_kwargs[\"session_start_time\"] = datetime.fromisoformat(metadata[\"NWBFile\"][\"session_start_time\"])\n return NWBFile(**nwbfile_kwargs)\n\n\ndef check_regular_timestamps(ts):\n \"\"\"Check whether rate should be used instead of timestamps.\"\"\"\n time_tol_decimals = 9\n uniq_diff_ts = np.unique(np.diff(ts).round(decimals=time_tol_decimals))\n return len(uniq_diff_ts) == 1\n\n\ndef estimate_recording_conversion_time(\n recording: RecordingExtractor, mb_threshold: float = 100.0, write_kwargs: Optional[dict] = None\n) -> (float, float):\n \"\"\"\n Test the write speed of recording data to NWB on this system.\n\n recording : RecordingExtractor\n The recording object to be written.\n mb_threshold : float\n Maximum amount of data to test with. Defaults to 100, which is just over 2 seconds of standard SpikeGLX data.\n\n Returns\n -------\n total_time : float\n Estimate of total time (in minutes) to write all data based on speed estimate and known total data size.\n speed : float\n Speed of the conversion in MB/s.\n \"\"\"\n if write_kwargs is None:\n write_kwargs = dict()\n\n temp_dir = Path(mkdtemp())\n test_nwbfile_path = temp_dir / \"recording_speed_test.nwb\"\n\n num_channels = recording.get_num_channels()\n itemsize = recording.get_dtype().itemsize\n total_mb = recording.get_num_frames() * num_channels * itemsize / 1e6\n if total_mb > mb_threshold:\n truncation = (mb_threshold * 1e6) // (num_channels * itemsize)\n test_recording = SubRecordingExtractor(parent_recording=recording, end_frame=truncation)\n else:\n test_recording = recording\n\n actual_test_mb = test_recording.get_num_frames() * num_channels * itemsize / 1e6\n start = perf_counter()\n write_recording(recording=test_recording, save_path=test_nwbfile_path, overwrite=True, **write_kwargs)\n end = perf_counter()\n delta = end - start\n speed = actual_test_mb / delta\n total_time = (total_mb / speed) / 60\n\n rmtree(temp_dir)\n return total_time, speed\n" ]
[ [ "numpy.diff" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
taylorguo/nsfw-Detection-Segmentation
[ "0befa479af0d1ea790953e840d9ee026aa74b842" ]
[ "depth-stream/ex6_ird_stream.py" ]
[ "'''\nhttps://github.com/elmonkey/Python_OpenNI2/tree/master/samples\nOfficial primense openni2 and nite2 python bindings.\nStreams infra-red camera\nref:\n http://www.eml.ele.cst.nihon-u.ac.jp/~momma/wiki/wiki.cgi/OpenNI/Python.html\n@author: Carlos Torres <[email protected]>\n'''\n\nimport cv2\n\nfrom primesense import openni2#, nite2\nimport numpy as np\nfrom primesense import _openni2 as c_api\n\n#import matplotlib.pyplot as plt\n\n\n## Directory where OpenNI2.so is located\n#dist = '/home/carlos/Install/kinect/OpenNI2-Linux-Arm-2.2/Redist/'\ndist ='/home/simon/deeplearning/openni/Install/kinect/openni2/OpenNI2-x64/Redist'\n## Initialize openni and check\nopenni2.initialize(dist)#'C:\\Program Files\\OpenNI2\\Redist\\OpenNI2.dll') # accepts the path of the OpenNI redistribution\nif (openni2.is_initialized()):\n print(\"openNI2 initialized\")\nelse:\n print(\"openNI2 not initialized\")\n\n#### initialize nite and check\n##nite2.initialize()\n##if (nite2.is_initialized()):\n## print \"nite2 initialized\"\n##else:\n## print \"nite2 not initialized\"\n#### ===============================\n\n\ndev = openni2.Device.open_any()\nprint('Some Device Information')\nprint('\\t', dev.get_sensor_info(openni2.SENSOR_DEPTH))\nprint('\\t', dev.get_sensor_info(openni2.SENSOR_IR))\nprint('\\t', dev.get_sensor_info(openni2.SENSOR_COLOR))\n#ut = nite2.UserTracker(dev)\n\n## streams\n# Depth stream\ndepth_stream = dev.create_depth_stream()\n\n# IR stream\nir_stream = dev.create_ir_stream()\n\n## Set stream speed and resolution\n#w = 640\nw = 640\n#w = 320\n#h = 480\nh = 480\n#h = 240\n#fps=30\nfps = 30\n\n## Set the video properties\n#print 'Get b4 video mode', depth_stream.get_video_mode()\n#depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_100_UM, resolutionX=w, resolutionY=h, fps=fps))\ndepth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=w, resolutionY=h, fps=fps))\ndepth_stream.set_mirroring_enabled(False)\n#depth_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_DEPTH_1_MM, resolutionX=640, resolutionY=480, fps=30))\n#print 'Get after video mode', depth_stream.get_video_mode()\nir_stream.set_video_mode(c_api.OniVideoMode(pixelFormat=c_api.OniPixelFormat.ONI_PIXEL_FORMAT_GRAY16, resolutionX=w, resolutionY=h, fps=fps))\nir_stream.set_mirroring_enabled(False)\n\n## Start the streams\ndepth_stream.start()\nir_stream.start()\n\n\ndef get_depth1():\n \"\"\"\n Returns numpy ndarrays representing raw and ranged depth images.\n Outputs:\n depth:= raw depth, 1L ndarray, dtype=uint16, min=0, max=2**12-1\n d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255\n Note1:\n fromstring is faster than asarray or frombuffer\n Note2:\n depth = depth.reshape(120,160) #smaller image for faster response\n NEEDS default video configuration\n depth = depth.reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)\n \"\"\"\n depth_frame = depth_stream.read_frame()\n depth = np.fromstring(depth_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(h,w) # Works & It's FAST\n d4d = np.uint8(depth.astype(float) *255/ 2**12-1) # Correct the range. Depth images are 12bits\n #d4d = cv2.cvtColor(d4d,cv2.COLOR_GRAY2RGB)\n d4d = np.dstack((d4d,d4d,d4d)) # faster than cv2 conversion\n return depth, d4d\n#get_depth\n\n\ndef get_depth():\n \"\"\"\n Returns numpy ndarrays representing the raw and ranged depth images.\n Outputs:\n dmap:= distancemap in mm, 1L ndarray, dtype=uint16, min=0, max=2**12-1\n d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255\n Note1:\n fromstring is faster than asarray or frombuffer\n Note2:\n .reshape(120,160) #smaller image for faster response\n OMAP/ARM default video configuration\n .reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)\n Requires .set_video_mode\n \"\"\"\n dmap = np.fromstring(depth_stream.read_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(h,w) # Works & It's FAST\n d4d = np.uint8(dmap.astype(float) *255/ 2**12-1) # Correct the range. Depth images are 12bits\n #d4d = cv2.cvtColor(d4d,cv2.COLOR_GRAY2RGB)\n d4d = np.dstack((d4d,d4d,d4d)) # faster than cv2 conversion\n return dmap, d4d\n#get_depth\n\ndef get_ir():\n \"\"\"\n Returns numpy ndarrays representing raw and ranged infra-red(IR) images.\n Outputs:\n ir := raw IR, 1L ndarray, dtype=uint16, min=0, max=2**12-1\n ir4d := IR for display, 3L ndarray, dtype=uint8, min=0, max=255\n \"\"\"\n ir_frame = ir_stream.read_frame()\n ir_frame_data = ir_stream.read_frame().get_buffer_as_uint16()\n ir4d = np.ndarray((ir_frame.height, ir_frame.width),dtype=np.uint16, buffer = ir_frame_data).astype(np.float32)\n ir4d = np.uint8((ir4d/ir4d.max()) * 255)\n ir4d = cv2.cvtColor(ir4d,cv2.COLOR_GRAY2RGB)\n return ir_frame, ir4d\n#get_ir\n\nframe_idx = 0\n\n## main loop\ndone = False\nwhile not done:\n key = cv2.waitKey(1)\n if (key&255) == 27:\n done = True\n ## Read in the streams\n # Depth\n dmap,d4d = get_depth()\n # Infrared\n ir_frame, ir4d = get_ir()\n cv2.imshow(\"Depth||IR\", np.hstack((d4d, ir4d)))\n cv2.imwrite('checkboard.png',ir4d)\n\n #cv2.imshow(\"Depth\", d4d)\n #cv2.imshow(\"IR\", ir4d)\n frame_idx+=1\n# end while\n\n## Release resources and terminate\ncv2.destroyAllWindows()\ndepth_stream.stop()\nopenni2.unload()\nprint (\"Terminated\")\n" ]
[ [ "numpy.hstack", "numpy.ndarray", "numpy.dstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fcomitani/simpsom
[ "a1c48f4e9ab13727cf213ce756cd4848a1d34ae7" ]
[ "simpsom/cluster/density_peak.py" ]
[ "\"\"\"\nDensity Peak Clustering\n\nA Rodriguez, A Laio,\nClustering by fast search and find of density peaks\nSCIENCE, 1492, vol 322 (2014) \n\nF. Comitani @2017-2021\n\"\"\"\n\nimport sys\nimport warnings\n\nfrom operator import attrgetter\n\nfrom math import sqrt, exp\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nclass Point:\n \"\"\" Class for the points to cluster. \"\"\"\n\n def __init__(self, coordinates):\n\n \"\"\"Initialise the point.\n\n Args:\n coordinates (np.array): Array containing the point coordinates in N dimensions.\n\n \"\"\"\n \n self.coor = []\n for c in coordinates:\n self.coor.append(c)\n\n \"\"\" Initialise empty density (rho), higher-density distance (delta), list of distances, \n the nearest neighbour of higher density, and the cluster.\"\"\"\n\n self.rho = 0\n self.delta = sys.maxsize\n self.dists = {}\n self.nneigh = None\n self.cl = [0]\n self.core = True\n\n\n def set_dist(self, coll):\n \n \"\"\"Calculate the distances from all other points in a Collection. [Deprecated]\n\n Args:\n coll (Collection): Collection containing all the points of the dataset used to calculate the distances. \n\n \"\"\"\n\n warnings.warn('Setting individual distances is deprecated, use the Collection.set_dists() instead!', DeprecationWarning)\n\n for p2 in coll.points:\n if self != p2: self.dists[p2] = dist(self,p2)\n\n\n def set_rho(self, coll, type_func='step'):\n\n \"\"\"Calculate the density of the single point for a given dataset. [Deprecated]\n\n Args:\n coll (Collection): Collection containing all the points of the dataset used to calculate the density. \n type_func (str): step function type (step, gaussian kernel or logistic).\n\n \"\"\"\n\n warnings.warn('Setting individual rhos is deprecated, use the Collection.set_rhos() instead!', DeprecationWarning)\n\n if self not in coll.points:\n print('WARNING: calculating the density for a point that was not found in the dataset, make sure to be consistent with your data!')\n \n for p2 in coll.points:\n if self != p2: \n if type_func == 'step':\n self.rho = self.rho+step(self,p2,self.refd)\n elif type_func == 'gaussian':\n self.rho = self.rho+gaussian(self,p2,self.refd)\n elif type_func == 'logistic':\n self.rho = self.rho+logistic(self,p2,self.refd)\n else:\n \"\"\" Raise exception if the selected function is not available is used. \"\"\"\n raise NotImplementedError('Only step, gaussian kernel or logistic functions are implemented')\n\n\n def set_delta(self, coll):\n\n \"\"\"Calculate the distance of the point from higher density points and set the nearest neighbour. [Deprecated]\n\n Args:\n coll (Collection): Collection containing all the points of the dataset used to calculate the distance.\n\n \"\"\"\n \n warnings.warn('Setting individual deltas is deprecated, use the Collection.set_deltas() instead!', DeprecationWarning)\n \n if self not in coll.points:\n print('WARNING: calculating the distance for a point that was not found in the dataset, make sure to be consistent with your data!')\n\n mind = sys.maxsize\n dist_high, dist_low = [], []\n \n for p2 in coll.points:\n if self != p2:\n d = dist(self,p2)\n if self.rho < p2.rho:\n dist_high.append(d)\n \"\"\" Choose nearest neighbour and handle empty list \"\"\"\n if d < min(dist_high or [999]): self.nneigh=p2\n else:\n distsLow.append(d)\n \n \"\"\" If the point has maximal rho, then return max distance \"\"\"\n\n self.delta = min(dist_high) if len(dist_high) > 0 \\\n else max(dist_low) \n\n\nclass Collection:\n\n \"\"\"Class for a collection of point objects. \"\"\"\n\n def __init__(self, coor_array, type_func='gaussian', percent=0.02, PBC=False, net_height=0, net_width=0):\n \n \"\"\" Generate a collection of point objects from an array containing their coordinates.\n \n Args:\n coor_array (array): Array containing the coordinates of the points to cluster.\n type_func (str): step function for calculating rho (step, gaussian kernel or logistic).\n percent (float): average percentage of neighbours.\n PBC (bool, optional): Activate/deactivate Periodic Boundary Conditions.\n net_height (int, optional): Number of nodes along the first dimension, required for PBC.\n net_width (int, optional): Numer of nodes along the second dimension, required for PBC.\n\n \"\"\"\n \n self.points = []\n for coors in coor_array:\n self.points.append(Point(coors)) \n\n index=int(round(len(self.points)*percent))\n\n self.PBC=PBC\n self.net_height = net_height\n self.net_width = net_width\n\n self.all_dists = []\n self.set_dists()\n\n self.all_dists.sort()\n self.refd = self.all_dists[index]\n \n \"\"\" Make sure rhos are set before setting deltas \"\"\"\n\n self.set_rhos(type_func)\n self.set_deltas() \n\n self.clusters = {}\n \n\n def set_dists(self):\n \n \"\"\"Calculate the distance matrix for all points. \"\"\"\n\n for p1 in self.points:\n for p2 in self.points:\n if self.points.index(p1) < self.points.index(p2): \n d = dist(p1,p2, 'euclid', self.PBC, self.net_height, self.net_width)\n self.all_dists.append(d) \n p1.dists[p2] = d \n p2.dists[p1] = d\n\n\n def set_rhos(self, type_func='step'):\n \n \"\"\"Calculate the density for each point in the dataset. \n\n Args:\n type_func (str): step function type (step, gaussian or logistic)\n\n \"\"\"\n\n for p1 in self.points:\n for p2 in self.points:\n if self.points.index(p1) < self.points.index(p2): \n if type_func == 'step':\n p1.rho = p1.rho+step(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n p2.rho = p2.rho+step(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n elif type_func == 'gaussian':\n p1.rho = p1.rho+gaussian(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n p2.rho = p2.rho+gaussian(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n elif type_func == 'logistic':\n p1.rho = p1.rho+sigmoid(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n p2.rho = p2.rho+sigmoid(p1,p2,self.refd,PBC=self.PBC,net_height=self.net_height,net_width=self.net_width)\n else:\n raise NotImplementedError('Only step, gaussian kernel or logistic functions are implemented')\n\n\n def set_deltas(self):\n\n \"\"\"Calculate the distance from higher density points for each point in the dataset. \"\"\"\n\n for p1 in self.points:\n for p2 in self.points:\n if self.points.index(p1) < self.points.index(p2): \n d = p1.dists[p2]\n if p1.rho < p2.rho and d < p1.delta: \n p1.delta = d\n p1.nneigh = p2\n elif p1.rho > p2.rho and d < p2.delta: \n p2.delta = d\n p2.nneigh = p1\n \n \"\"\" If the point has maximal rho, then return max distance \"\"\"\n\n pmax = max(self.points, key=attrgetter('rho'))\n pmax.delta = max(pmax.dists.values())\n\n\n def decision_graph(self, show=False, printout=True):\n\n \"\"\"Calculate the decision graph, delta vs rho for the points belonging to the Collection \n and find the cluster centers.\n\n Args:\n show (bool, optional): Choose to display the plot.\n printout (bool, optional): Choose to save the plot to a file.\n\n \"\"\"\n\n fig, ax = plt.subplots()\n p_rhos, p_deltas = [p.rho for p in self.points], [p.delta for p in self.points]\n\n mean_deltas, stdev_deltas = np.mean(p_deltas), np.std(p_deltas)\n self.ctrs, ctrRhos, ctr_deltas = [], [], []\n \n i=1\n for p in self.points:\n if p.delta > mean_deltas+1.5*stdev_deltas:\n self.ctrs.append(p) \n p.cl[0] = i\n i += 1\n ctrRhos.append(p.rho), ctr_deltas.append(p.delta) \n p_rhos.pop(p_rhos.index(p.rho)), p_deltas.pop(p_deltas.index(p.delta))\n\n plt.scatter(p_rhos, p_deltas, \\\n alpha=0.8, s=100, edgecolors='none', color='#3333AA')\n\n plt.scatter(ctrRhos, ctr_deltas, \\\n alpha=0.8, s=100, edgecolors='none', color='#AA3333')\n\n if printout is True:\n plt.savefig('decision_graph.png', bbox_inches='tight', dpi=600)\n if show is True:\n plt.show()\n plt.clf()\n\n\n def cluster_assign(self):\n\n \"\"\"Assign a cluster to each point according according to its nearest neighbour with higher density.\"\"\"\n\n \"\"\" Temporary workaround until I can make mutable p.cl work, SLOW! \"\"\"\n\n while [0] in [p.cl for p in self.points]: \n for p in self.points: \n if p.cl == [0]: p.cl = p.nneigh.cl\n\n\n def core_assign(self):\n \n \"\"\"Assign points as belonging to the core or the halo of a cluster.\"\"\"\n\n for c in range(1,len(self.ctrs)+1):\n self.clusters[c] = []\n\n border = []\n for p in self.points:\n if p.cl == [c]:\n self.clusters[c].append(p)\n for key,value in p.dists.items():\n if key.cl != c and value < self.refd:\n border.append(p)\n continue\n\n if border != []:\n pmax = max(border, key=attrgetter('rho')) \n ref_rho = pmax.rho\n\n for p in self.clusters[c]:\n if p.rho < ref_rho: \n p.core=False \n\n\n def get_clusterList(self):\n\n \"\"\" Returns the indeces of the clustered points as a list.\n \n Returns:\n clusters (list, int): a list of lists containing the points indices belonging to each cluster\n \"\"\" \n\n clusters = []\n\n for val in self.clusters.values():\n inds = []\n for p in val:\n inds.append(self.points.index(p))\n clusters.append(inds)\n\n return clusters \n\n\ndef dist(p1,p2, metric='euclid', PBC=False, net_height=0, net_width=0):\n\n \"\"\"Calculate the distance between two point objects in a N dimensional space according to a given metric.\n\n Args:\n p1 (point): First point object for the distance.\n p2 (point): Second point object for the distance.\n metric (string): Metric to use. For now only euclidean distance is implemented.\n PBC (bool, optional): Activate/deactivate Periodic Boundary Conditions.\n net_height (int, optional): Number of nodes along the first dimension, required for PBC.\n net_width (int, optional): Numer of nodes along the second dimension, required for PBC.\n\n Returns:\n (float): The distance between the two points.\n\n \"\"\"\n\n if metric == 'euclid':\n if len(p1.coor) != len(p2.coor): raise ValueError('Points must have the same dimensionality!')\n else:\n if PBC is True:\n \"\"\" Hexagonal Periodic Boundary Conditions \"\"\"\n\n if net_height % 2 == 0:\n offset = 0\n else: \n offset = 0.5\n \n return min([sqrt((p1.coor[0]-p2.coor[0])*(p1.coor[0]-p2.coor[0])\\\n +(p1.coor[1]-p2.coor[1])*(p1.coor[1]-p2.coor[1])),\n #right\n sqrt((p1.coor[0]-p2.coor[0]+net_width)*(p1.coor[0]-p2.coor[0]+net_width)\\\n +(p1.coor[1]-p2.coor[1])*(p1.coor[1]-p2.coor[1])),\n #bottom \n sqrt((p1.coor[0]-p2.coor[0]+offset)*(p1.coor[0]-p2.coor[0]+offset)\\\n +(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)),\n #left\n sqrt((p1.coor[0]-p2.coor[0]-net_width)*(p1.coor[0]-p2.coor[0]-net_width)\\\n +(p1.coor[1]-p2.coor[1])*(p1.coor[1]-p2.coor[1])),\n #top \n sqrt((p1.coor[0]-p2.coor[0]-offset)*(p1.coor[0]-p2.coor[0]-offset)\\\n +(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4)),\n #bottom right\n sqrt((p1.coor[0]-p2.coor[0]+net_width+offset)*(p1.coor[0]-p2.coor[0]+net_width+offset)\\\n +(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)),\n #bottom left\n sqrt((p1.coor[0]-p2.coor[0]-net_width+offset)*(p1.coor[0]-p2.coor[0]-net_width+offset)\\\n +(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]+net_height*2/sqrt(3)*3/4)),\n #top right\n sqrt((p1.coor[0]-p2.coor[0]+net_width-offset)*(p1.coor[0]-p2.coor[0]+net_width-offset)\\\n +(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4)),\n #top left\n sqrt((p1.coor[0]-p2.coor[0]-net_width-offset)*(p1.coor[0]-p2.coor[0]-net_width-offset)\\\n +(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4)*(p1.coor[1]-p2.coor[1]-net_height*2/sqrt(3)*3/4))])\n\n else:\n diffs = 0\n for i in range(len(p1.coor)): \n diffs = diffs+((p1.coor[i]-p2.coor[i])*(p1.coor[i]-p2.coor[i]))\n return sqrt(diffs)\n\n else:\n \n \"\"\" Raise exception if metric other then euclidean is used \"\"\"\n \n raise NotImplementedError('PBC are implemented only with Euclidean distance')\n\n\ndef step(p1, p2, cutoff, PBC=False, net_height=0, net_width=0):\n\n \"\"\"Step function activated when the distance of two points is less than the cutoff.\n\n Args:\n p1 (point): First point object for the distance.\n p2 (point): Second point object for the distance.\n cutoff (float): The cutoff to define the proximity of the points.\n PBC (bool, optional): Activate/deactivate Periodic Boundary Conditions.\n net_height (int, optional): Number of nodes along the first dimension, required for PBC.\n net_width (int, optional): Numer of nodes along the second dimension, required for PBC.\n\n Returns:\n (int): 1 if the points are closer than the cutoff, 0 otherwise.\n\n \"\"\"\n\n if dist(p1,p2, 'euclid', PBC, net_height, net_width)<cutoff: \n return 1\n\n return 0 \n\n\ndef gaussian(p1, p2, sigma, PBC=False, net_height=0, net_width=0):\n\n \"\"\"Gaussian function of the distance between two points scaled with sigma.\n\n Args:\n p1 (point): First point object for the distance.\n p2 (point): Second point object for the distance.\n sigma (float): The scaling factor for the distance.\n PBC (bool, optional): Activate/deactivate Periodic Boundary Conditions.\n net_height (int, optional): Number of nodes along the first dimension, required for PBC.\n net_width (int, optional): Numer of nodes along the second dimension, required for PBC.\n\n Returns:\n (float): value of the gaussian function.\n\n \"\"\"\n\n return exp(-1.0*dist(p1,p2, 'euclid', PBC, net_height, net_width)*\\\n dist(p1,p2, 'euclid', PBC, net_height, net_width)/sigma*sigma)\n\n\ndef sigmoid(p1, p2, sigma, PBC=False, net_height=0, net_width=0):\n\n \"\"\"Logistic function of the distance between two points scaled with sigma.\n\n Args:\n p1 (point): First point object for the distance.\n p2 (point): Second point object for the distance.\n sigma (float): The scaling factor for the distance.\n PBC (bool, optional): Activate/deactivate Periodic Boundary Conditions.\n net_height (int, optional): Number of nodes along the first dimension, required for PBC.\n net_width (int, optional): Numer of nodes along the second dimension, required for PBC.\n\n Returns:\n (float): value of the logistic function.\n\n \"\"\"\n\n return exp(-1.0*(1.0+exp((dist(p1,p2, 'euclid', PBC, net_height, net_width))/sigma)))\n\n\ndef density_peak(sample, show=False, printout=False, percent=0.02, PBC=False, net_height=0, net_width=0):\n\n \"\"\" Run the complete clustering algorithm in one go and returns the clustered indices as a list.\n\n Args:\n sample (array): The input dataset\n show (bool, optional): Choose to display the decision graph.\n printout (bool, optional): Choose to save the decision graph to a file.\n \n Returns:\n clusters (list, int): a list of lists containing the points indices belonging to each cluster\n \"\"\" \n \n pts = Collection(sample, percent=percent, PBC=PBC, net_height=net_height, net_width=net_width)\n pts.decision_graph(show=show, printout=printout)\n pts.cluster_assign()\n pts.core_assign()\n\n return pts.get_clusterList()\n\n\ndef dp_test(out_path='./'):\n\n import os\n import numpy as np\n\n \"\"\" Run the complete clustering algorithm on a test case and print the clustered points graph. \n \n Args:\n out_path (str, optional): path to the output folder.\n \"\"\"\n \n \"\"\" Set up output folder. \"\"\"\n \n if out_path != './':\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n print(\"Testing Density Peak...\")\n\n np.random.seed(100)\n samples1 = np.random.multivariate_normal([0, 0], [[1, 0.1],[0.1, 1]], 100)\n samples2 = np.random.multivariate_normal([10, 10], [[2, 0.5],[0.5, 2]], 100)\n samples3 = np.random.multivariate_normal([0, 10], [[2, 0.5],[0.5, 2]], 100)\n samples4 = np.random.uniform(0, 14, [50,2])\n samplesTmp = np.concatenate((samples1,samples2), axis=0)\n samplesTmp2 = np.concatenate((samplesTmp,samples3), axis=0)\n samples = np.concatenate((samplesTmp2,samples4), axis=0)\n\n pts = Collection(samples)\n\n pts.decision_graph(printout=False)\n pts.cluster_assign()\n pts.core_assign()\n\n print(pts.get_clusterList())\n\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==0], [p.coor[1] for p in pts.points if p.cl[0]==0], 'o', c='black')\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==1 and p.core==True], [p.coor[1] for p in pts.points if p.cl[0]==1 and p.core==True], 'o', c=\"#ff0000\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==1 and p.core==False], [p.coor[1] for p in pts.points if p.cl[0]==1 and p.core==False], 'o', c=\"#ffaaaa\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==2 and p.core==True], [p.coor[1] for p in pts.points if p.cl[0]==2 and p.core==True], 'o', c=\"#00ff00\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==2 and p.core==False], [p.coor[1] for p in pts.points if p.cl[0]==2 and p.core==False], 'o', c=\"#aaffaa\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==3 and p.core==True], [p.coor[1] for p in pts.points if p.cl[0]==3 and p.core==True], 'o', c=\"#ffff00\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==3 and p.core==False], [p.coor[1] for p in pts.points if p.cl[0]==3 and p.core==False], 'o', c=\"#ffffaa\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==4 and p.core==True], [p.coor[1] for p in pts.points if p.cl[0]==4 and p.core==True], 'o', c=\"#0000ff\")\n plt.plot([p.coor[0] for p in pts.points if p.cl[0]==4 and p.core==False], [p.coor[1] for p in pts.points if p.cl[0]==4 and p.core==False], 'o', c=\"#aaaaff\")\n\n plt.savefig(os.path.join(out_path,'dp_test_out.png'), bbox_inches='tight', dpi=200)\n\n \n print(\"Done!\")\n\n\nif __name__ == \"__main__\":\n\n test()\n" ]
[ [ "numpy.random.seed", "matplotlib.pyplot.scatter", "numpy.random.multivariate_normal", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "numpy.concatenate", "matplotlib.pyplot.plot", "numpy.std", "matplotlib.pyplot.clf", "numpy.mean", "numpy.random.uniform", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ANUPAMA1028/FACEMASKDETECTION
[ "ecca5e986e3fb2101448d655636f64e9b3b996b6" ]
[ "detect_mask_image.py" ]
[ "# USAGE\n# python detect_mask_image.py --image images/pic1.jpeg\n\n# import the necessary packages\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.models import load_model\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nimport numpy as np\nimport pandas as pd\nimport cv2\nimport imutils\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport math\nfrom sklearn.metrics import f1_score\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras import optimizers\n\n#from keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, Input, Dropout\n#from keras.models import Model, Sequential\n#from keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import Adam\nfrom tkinter import*\nfrom PIL import Image, ImageTk\nfrom tkinter import filedialog\nfrom shutil import copyfile\nglobal filename\n\ndef quitgui():\n root.destroy()\ndef DetectFaceMask():\n #root.destroy()\n filename = filedialog.askopenfilename(initialdir = \"/\",title = \"select a file\", filetypes = ((\"Text files\",\"*.txt*\"),(\"all files\",\"*.*\")))\n mask_image(filename)\ndef mask_image(filename):\n\t# construct the argument parser and parse the arguments\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-i\", \"--image\", required=False,\n\t\thelp=\"path to input image\")\n\tap.add_argument(\"-f\", \"--face\", type=str,\n\t\tdefault=\"face_detector\",\n\t\thelp=\"path to face detector model directory\")\n\tap.add_argument(\"-m\", \"--model\", type=str,\n\t\tdefault=\"mask_detector.model\",\n\t\thelp=\"path to trained face mask detector model\")\n\tap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\t\thelp=\"minimum probability to filter weak detections\")\n\targs = vars(ap.parse_args())\n\n\t# load our serialized face detector model from disk\n\tprint(\"[INFO] loading face detector model...\")\n\tprototxtPath = os.path.sep.join([args[\"face\"], \"deploy.prototxt\"])\n\tweightsPath = os.path.sep.join([args[\"face\"],\n\t\t\"res10_300x300_ssd_iter_140000.caffemodel\"])\n\tnet = cv2.dnn.readNet(prototxtPath, weightsPath)\n\n\t# load the face mask detector model from disk\n\tprint(\"[INFO] loading face mask detector model...\")\n\tmodel = load_model(args[\"model\"])\n\n\t# load the input image from disk, clone it, and grab the image spatial\n\t# dimensions\n\timage = cv2.imread(filename)\n\torig = image.copy()\n\t(h, w) = image.shape[:2]\n\n\t# construct a blob from the image\n\tblob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),\n\t\t(104.0, 177.0, 123.0))\n\n\t# pass the blob through the network and obtain the face detections\n\tprint(\"[INFO] computing face detections...\")\n\tnet.setInput(blob)\n\tdetections = net.forward()\n\n\t# loop over the detections\n\tfor i in range(0, detections.shape[2]):\n\t\t# extract the confidence (i.e., probability) associated with\n\t\t# the detection\n\t\tconfidence = detections[0, 0, i, 2]\n\n\t\t# filter out weak detections by ensuring the confidence is\n\t\t# greater than the minimum confidence\n\t\tif confidence > args[\"confidence\"]:\n\t\t\t# compute the (x, y)-coordinates of the bounding box for\n\t\t\t# the object\n\t\t\tbox = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n\t\t\t(startX, startY, endX, endY) = box.astype(\"int\")\n\n\t\t\t# ensure the bounding boxes fall within the dimensions of\n\t\t\t# the frame\n\t\t\t(startX, startY) = (max(0, startX), max(0, startY))\n\t\t\t(endX, endY) = (min(w - 1, endX), min(h - 1, endY))\n\n\t\t\t# extract the face ROI, convert it from BGR to RGB channel\n\t\t\t# ordering, resize it to 224x224, and preprocess it\n\t\t\tface = image[startY:endY, startX:endX]\n\t\t\tface = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n\t\t\tface = cv2.resize(face, (224, 224))\n\t\t\tface = img_to_array(face)\n\t\t\tface = preprocess_input(face)\n\t\t\tface = np.expand_dims(face, axis=0)\n\n\t\t\t# pass the face through the model to determine if the face\n\t\t\t# has a mask or not\n\t\t\t(mask, withoutMask) = model.predict(face)[0]\n\n\t\t\t# determine the class label and color we'll use to draw\n\t\t\t# the bounding box and text\n\t\t\tlabel = \"Mask\" if mask > withoutMask else \"No Mask\"\n\t\t\tcolor = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n\t\t\t# include the probability in the label\n\t\t\tlabel = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n\t\t\t# display the label and bounding box rectangle on the output\n\t\t\t# frame\n\t\t\tcv2.putText(image, label, (startX, startY - 10),\n\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n\t\t\tcv2.rectangle(image, (startX, startY), (endX, endY), color, 2)\n\n\t# show the output image\n\tcv2.imshow(\"Output\", image)\n\tcv2.waitKey(0)\n#if __name__ == \"__main__\":\n#mask_image()\n\nglobal root\nroot=Tk()\nroot.title(\"Face Mask Detection\")\n#root.geometry(\"500x500\")\nf1=Frame(root,bg=\"lightblue\",borderwidth=6,relief=GROOVE)\nf1.pack(side=TOP,fill=X)\nf2=Frame(root,bg=\"lightblue\",borderwidth=6,relief=GROOVE)\nf2.pack(side=TOP,fill=X)\nLabel(f1,text=\"Welcome to Face Mask Detection\",fg=\"black\",bg=\"white\",font=\"Timesnewroman 20 bold\").pack()\n\nbtn1=Button(root,text=\"Detect Face Mask\",command= DetectFaceMask,height=2,width=30,bg='pink',font=\"Timesnewroman 16 bold\",pady=10)\nbtn1.pack()\n\nButton(root,text=\"Quit\",command=quitgui,height=2,width=20,bg='red',font=\"Timesnewroman 16 bold\",pady=10).pack()\nroot.mainloop()\n\n\n" ]
[ [ "tensorflow.keras.models.load_model", "numpy.expand_dims", "tensorflow.keras.applications.mobilenet_v2.preprocess_input", "numpy.array", "tensorflow.keras.preprocessing.image.img_to_array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
Totilarson/CorePy
[ "9691f89692a552c1527d6c2fce74ace268d20b58" ]
[ "CorePycodes/NN_model_build.py" ]
[ "import pandas as pd\nimport numpy as np\n#from sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport pickle\nimport seaborn as sns\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport os\nimport json\n\n\n\nRoot_path = os.path.dirname(os.getcwd())\nRun_settings=json.load(open(os.path.join(Root_path + '/CorePycodes/' + 'Run_settings' + '.json')))\nCorebeta=json.load(open(os.path.join(Root_path + '/CoreData/CoreBeta/' + Run_settings['CoreOfStudy'] +'.json')))\n\n\nFormation_names = '-'.join(Run_settings[\"Formation\"]+Run_settings[\"Formation_2\"]) # Would like to have Formation_names defined in Corebeta\n\nRoot_path = os.path.dirname(os.getcwd())\nNeuralModel_TrainingDataSet = os.path.join(Root_path + '/CoreData/CoreNeuralModel/' + Formation_names + '_TrainingDataset.csv')\nNeuralModel_TrainingDataSet = pd.read_csv(NeuralModel_TrainingDataSet).sort_values(by=[Run_settings[\"Depth_model\"]], ascending=False)\n\n\n# Making training dataset for Neural model\ny=NeuralModel_TrainingDataSet['Chemofacies_train']\nX = NeuralModel_TrainingDataSet[Run_settings[\"elements\"]].values #converts X from a df to an array\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20,random_state=0)\n\n\n# scale the data\nscaler = StandardScaler()\nscaler.fit(X_train) # check this. why is it here and does it need to be used by all datasets\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\nX_total = scaler.transform(X)\n\n# apply the neural network to the scaled data\n# solver{‘lbfgs’, ‘sgd’, ‘adam’} sgd: stochastic gradient descent. adam: stochastic gradient-based optimizer. lbfgs: an optimizer in the family of quasi-Newton methods\n# activation{‘identity’, ‘logistic’, ‘tanh’, ‘relu’}\nmlp = MLPClassifier(hidden_layer_sizes=(10,10,10),random_state=1,activation = 'relu',solver='sgd', max_iter=2000)\nmlp.fit(X_train, y_train)\n\n#run the model off the test data\ny_pred = mlp.predict(X_test)\nprint(classification_report(y_test,y_pred))\n\ncnf_matrix = metrics.confusion_matrix(y_test, y_pred)\nsns.heatmap(cnf_matrix, annot=True)\nplt.show()\n\n#fig, ((ax1)) = plt.subplots(nrows=1, ncols=1, figsize=(5,5)) #sharex=True, sharey=True,\n#plt.subplot(1,1, 1)\n#plt.savefig(os.path.join(dirName + '/' + Run_settings['CoreOfStudy'] + '_' + Formation_names + '_PCA' + '.png'),dpi = 300)\n\n#####run the model across the entire dataset to add a column of predicted chemofacies to the exported data sheet\n\nchemo_predict=mlp.predict(X_total) #scaled original dataset. \nchemo_predict=chemo_predict.reshape(-1, 1)\n#chemo_predict=pd.DataFrame(data=chemo_predict,columns=[\"predicted\"])\nchemo_prob=mlp.predict_proba(X_total)\n\n## Issues here. THe number of Probabilities should be dependant on on number of chemifacies defined for Formation\n\nChemofacies_count=np.sort(NeuralModel_TrainingDataSet['Chemofacies_train'].unique())\n\nPrediction_matrix_headings=['Chemo_pred']\n\nfor i in range(len(Chemofacies_count)):\n Prediction_matrix_headings.append('Prob'+str(Chemofacies_count[i]))\n\n\ndata=pd.DataFrame(np.concatenate((chemo_predict,chemo_prob),axis=1),columns = Prediction_matrix_headings)\n\nNN_file=os.path.join(Root_path + '/CoreData/CoreNeuralModel/' + 'NN_model_' + Formation_names)\n\noutfile = open(NN_file,'wb')\npickle.dump(mlp,outfile)\noutfile.close()\n\n\n \n\n\n" ]
[ [ "sklearn.neural_network.MLPClassifier", "pandas.read_csv", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "numpy.concatenate", "sklearn.preprocessing.StandardScaler", "matplotlib.pyplot.show", "sklearn.metrics.classification_report" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
usc-ee250-spring2021/lab02-haotianxu2021
[ "1be1f20f03d0f9da3f732db7777eb72b7da6ed9f" ]
[ "Software/Python/grove_hightemperature_sensor/grove_hightemperature_sensor.py" ]
[ "import grovepi\nimport math\nimport json\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\n# Library written for Python 3!\n\n# take a look in the datasheet\n# http://www.mouser.com/catalog/specsheets/Seeed_111020002.pdf\n\n# class for the K-Type temperature sensor (w/ long probe/sonde)\nclass HighTemperatureSensor:\n\n # initialize the object with the appropriate sensor pins on the GrovePi and configuration JSON\n def __init__(self, _temperature_pin, _thermocouple_pin, _json_path = None):\n \n if(_json_path is None):\n _json_path = 'thermocouple_table.json'\n\n try:\n with open(_json_path) as table_file:\n table = json.load(table_file)\n\n self.__interpolateTable(table)\n self.__amp_av = table[\"amp_factor\"]\n self.__vol_offset = table[\"amp_offset\"]\n\n except:\n self.sensor_table = None\n self.__amp_av = 1\n self.__vol_offset = 1\n self.voltage_to_degrees_table = None\n\n # save the variables inside the object\n self.temperature_pin = _temperature_pin\n self.thermocouple_pin = _thermocouple_pin\n\n # set the pins as INPUT\n # this sensor outputs analog values so you can\n # use one of the 3 analog ports on the GrovePi\n grovepi.pinMode(self.temperature_pin, \"INPUT\")\n grovepi.pinMode(self.thermocouple_pin, \"INPUT\")\n\n # function for retrieving the room temperature_pin\n # if values exceed what's written in the datasheet\n # then it throws a ValueError exception\n def getRoomTemperature(self):\n # ratio for translating from 3.3V to 5.0V (what we read is in the range of 0 -> 3.3V)\n voltage_ratio = 5.0 / 3.3\n\n # and multiply what we read by that ratio\n # and read it for about 12 times -> this way we get smoother readings\n # the reason we average it is because the table we provided isn't big enough\n # and as a consequence you'd get values like (20 degrees, 24 degrees and so on)\n analog_sum = 0\n for step in range(12):\n analog_sum += grovepi.analogRead(self.temperature_pin)\n pass\n analog_value = (analog_sum / 12) * voltage_ratio\n # see the datasheet for more information\n\n try:\n calculated_resistance = (1023 - analog_value) * 10000 / analog_value\n calculated_temperature = 1 / (math.log(calculated_resistance / 10000) / 3975 + 1 / 298.15) - 273.15\n\n # if the values exceed a certain threshold\n # then raise a ValueError exception\n if not (calculated_temperature >= -50.0 and calculated_temperature <= 145.0):\n raise ValueError('temperature out of range')\n\n # and return what we got calculated\n return calculated_temperature\n\n except ZeroDivisionError:\n\n return 0\n\n # function for retrieving the temperature at the tip of the probe / sonde\n # only the temperature of the tip of the probe is measured\n # the rest of the K-Type sensor is for reaching the hot environment you want to measure\n # so you don't get burned\n def getProbeTemperature(self):\n\n if not self.voltage_to_degrees_table is None:\n probe_tip_voltage = self.__getThermocoupleVoltage()\n degrees_from_table = self.voltage_to_degrees_table(probe_tip_voltage)\n\n return float(degrees_from_table)\n\n else:\n\n return None\n\n # private function which can't be accessed from the outside\n # this is an imperitave solution - it was found through experiments\n # basically it calculates the voltage of the K-type sensor\n # before it gets into the amplifier - so the voltage is between -6.48 mV to 54.9 mV\n def __getThermocoupleVoltage(self):\n analog_value = grovepi.analogRead(self.thermocouple_pin);\n probe_tip_voltage = (analog_value - self.__vol_offset) / self.__amp_av\n\n return probe_tip_voltage\n\n\n # function for interpolating values from [table] array\n def __interpolateTable(self, table):\n\n degrees_keys_list = list(table[\"degrees_table\"].keys())\n degrees_list = [int(x) for x in degrees_keys_list]\n voltages_list = []\n\n for degrees in degrees_keys_list:\n voltage_corespondent = table[\"degrees_table\"][degrees]\n voltages_list.append(voltage_corespondent)\n\n self.voltage_to_degrees_table = interp1d(voltages_list, degrees_list)\n" ]
[ [ "scipy.interpolate.interp1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
tdurham86/L2_sci-ATAC-seq
[ "6d6e3be43d26ef2534b538021edf02cc2e59ea83" ]
[ "scripts/lda_clustering/split_bed_script.py" ]
[ "#! /usr/bin/env python\n\n#Author: Timothy Durham (c) 2018\n\nimport argparse\nimport numpy\nimport os\nfrom plumbum import local\nimport sys\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--cell_name_to_cluster_map')\n parser.add_argument('--cuts_bed_file')\n parser.add_argument('--out_dir')\n args = parser.parse_args()\n\n #get cell names for each cluster\n cluster_map = numpy.loadtxt(args.cell_name_to_cluster_map, delimiter='\\t', dtype=object)\n for clust_num in set(cluster_map[:,1]):\n clust_out_dir = os.path.join(args.out_dir, clust_num)\n if not os.path.isdir(clust_out_dir):\n os.makedirs(clust_out_dir)\n with open(os.path.join(clust_out_dir, '{!s}.indextable.txt'.format(clust_num)), 'w') as out:\n out.write('\\n'.join(['\\t'.join(cluster_map[idx]) for idx in numpy.where(cluster_map[:,1] == clust_num)[0]]) + '\\n')\n cell_names_to_clusters = dict([tuple(cluster_map[idx]) for idx in range(cluster_map.shape[0])])\n\n# with open(args.cell_name_to_cluster_map) as map_in:\n# cell_names_to_clusters = dict([(elt.strip().split()[0], \n# elt.strip().split()[1]) for elt in map_in])\n\n #parse bam file to make per-cluster bam files\n total_cuts_bed = args.cuts_bed_file\n out_dir = args.out_dir\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n bed_paths = {elt:os.path.join(out_dir, elt, '{!s}.cuts.bed'.format(elt)) for elt in set(cell_names_to_clusters.values())}\n bed_files = {}\n for elt, path in bed_paths.items():\n if not os.path.isdir(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n bed_files[elt] = open(path, 'w')\n# bed_files = {elt:open(path, 'w') for elt,path in bed_paths.items()}\n not_assignable_path = os.path.join(out_dir, 'unassignable.cuts.bed')\n not_assignable = open(not_assignable_path, 'w')\n with open(total_cuts_bed) as lines_in:\n for idx, line in enumerate(lines_in):\n if idx and not idx % 500000:\n sys.stdout.write('Processed {!s} BED records.\\n'.format(idx))\n sys.stdout.flush()\n cell_id = line.split()[3].split(':')[0]\n try:\n bed_files[cell_names_to_clusters[cell_id]].write(line)\n except KeyError:\n not_assignable.write(line)\n sys.stdout.write('Processed {!s} BED records.\\n'.format(idx))\n sys.stdout.flush()\n not_assignable.close()\n for bed in bed_files.values(): \n bed.close()\n\n sys.stdout.write('Splitting BED file complete.\\n')\n sys.stdout.flush()\n" ]
[ [ "numpy.where", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wangxihao/rgbd-kinect-pose
[ "03180723c99759ba2500bcd42b5fe7a1d26eb507" ]
[ "extern/face_expression/face_expression/utils/misc.py" ]
[ "import collections\nimport pydoc\n\nimport torch\nimport torch.nn.functional as F\n\nfrom face_expression import utils\n\n\ndef infer_smplx(smplx_model, expression, pose, beta):\n batch_size = expression.shape[0]\n device = expression.device\n \n # extract\n jaw_pose = pose[:, 63:66]\n translation = pose[:, 84:87]\n root_orientation = pose[:, 87:90]\n\n eye_pose = torch.zeros(batch_size, 6).to(device)\n body_pose = torch.zeros(batch_size, 63).to(device)\n hand_pose = torch.zeros(batch_size, 12).to(device)\n \n # infer SMPLX model\n keypoints_3d, rotation_matrices, verts = smplx_model(\n root_orient=root_orientation,\n pose_body=body_pose,\n pose_hand=hand_pose,\n pose_jaw=jaw_pose,\n pose_eye=eye_pose,\n betas=beta,\n trans=translation,\n expression=expression\n )\n\n return keypoints_3d, rotation_matrices, verts\n\n\ndef project_keypoints_3d(keypoints_3d, projection_matrix):\n keypoints_3d_homo = F.pad(keypoints_3d, pad=[0, 1], mode='constant', value=0.0)\n\n keypoints_2d_homo_proj = torch.bmm(keypoints_3d_homo, projection_matrix.transpose(1, 2))\n keypoints_2d_proj = keypoints_2d_homo_proj[:, :, :2] / keypoints_2d_homo_proj[:, :, 2:]\n\n return keypoints_2d_proj\n\n\ndef infer_smplx_keypoints_2d(smplx_model, expression, pose, beta, projection_matrix):\n keypoints_3d, rotation_matrices, verts = infer_smplx(smplx_model, expression, pose, beta)\n keypoints_2d = project_keypoints_3d(keypoints_3d, projection_matrix)\n\n return keypoints_2d\n\n\ndef get_dataloaders(config, splits=('train', 'val')):\n dataloaders = collections.OrderedDict()\n \n for dataset_type in splits:\n data_config = config.data[dataset_type]\n\n dataset_cls = pydoc.locate(data_config.dataset.cls)\n dataset = dataset_cls(**data_config.dataset.args)\n\n dataloader_args = data_config.dataloader.args\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=dataloader_args.batch_size,\n num_workers=dataloader_args.num_workers,\n sampler=utils.common.get_data_sampler(dataset, shuffle=dataloader_args.shuffle, is_distributed=False), ## TODO: check distributed\n pin_memory=True,\n drop_last=dataloader_args.drop_last\n )\n\n dataloaders[dataset_type] = dataloader\n\n return dataloaders\n\n\ndef get_logger(config):\n logger_cls = pydoc.locate(config.log.logger.cls)\n logger = logger_cls(config, config.log.project_dir, config.log.project_name, config.log.experiment_name)\n\n return logger\n" ]
[ [ "torch.nn.functional.pad", "torch.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vacancy/TensorArtist
[ "7654eb026f6d87f64e28ca152d006ef7625b0f45" ]
[ "tartist/nn/tfutils.py" ]
[ "# -*- coding:utf8 -*-\n# File : tfutils.py\n# Author : Jiayuan Mao\n# Email : [email protected]\n# Date : 1/31/17\n# \n# This file is part of TensorArtist.\n\nimport re\nimport tensorflow as tf\n\n\nclass TArtGraphKeys:\n PLACEHOLDERS = 'placeholders'\n TART_VARIABLES = 'tart_variables'\n INFERENCE_SUMMARIES = 'inference_summaries'\n SCALAR_VARIABLES = 'scalar_variables'\n OPTIMIZER_VARIABLES = 'optimizer_variables'\n\n # DEPRECATED: (2017-12-02)\n TART_OPERATORS = 'tart_operators'\n\n\ndef clean_name(tensor, suffix=':0'):\n name = tensor.name\n if name.endswith(suffix):\n name = name[:-len(suffix)]\n return name\n\n\ndef escape_name(tensor):\n name = tensor.name\n return re.sub(':|/', '_', name)\n\n\ndef clean_summary_suffix(name):\n return re.sub('_\\d+$', '', name)\n\n\ndef remove_tower_name(name):\n return re.sub('^tower/\\d+/', '', name)\n\n\ndef format_summary_name(name):\n name = clean_summary_suffix(name)\n name = remove_tower_name(name)\n if 'train/' in name:\n name = name.replace('train/', '')\n name = 'train/' + name\n return name\n\n\ndef assign_variable(var, value, session=None, use_locking=False):\n from .graph.env import get_default_env\n session = session or get_default_env().session\n session.run(var.assign(value, use_locking=use_locking))\n\n\ndef fetch_variable(var, session=None):\n from .graph.env import get_default_env\n session = session or get_default_env().session\n try:\n return session.run(var)\n except tf.errors.FailedPreconditionError:\n session.run(var.initializer)\n return session.run(var)\n\n\ndef fetch_variables(var_list, session=None):\n from .graph.env import get_default_env\n session = session or get_default_env().session\n try:\n return session.run(var_list)\n except tf.errors.FailedPredictionError as e:\n raise ValueError('Uninitialized variable(s) encountered in fetch_variables') from e\n\n\ndef assign_variables(var_list_or_dict, value_list=None, session=None, use_locking=False):\n from .graph.env import get_default_env\n session = session or get_default_env().session\n\n assigns = []\n if isinstance(var_list_or_dict, dict):\n iterator = var_list_or_dict.items()\n else:\n iterator = zip(var_list_or_dict, value_list)\n\n for var, value in iterator:\n assigns.append(tf.assign(var, value, use_locking=use_locking, name='assign_{}'.format(escape_name(var))))\n\n session.run(tf.group(*assigns))\n\n\ndef extend_collection_list(base, *others):\n if base is None:\n return others\n if type(base) is str:\n return (base, ) + others\n assert isinstance(base, (tuple, list))\n return tuple(base) + others\n" ]
[ [ "tensorflow.group" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] } ]
rileymcdowell/snakenet
[ "21e06a8491c1897630cbe95add157e29063a0f1f" ]
[ "snakenet/model_player.py" ]
[ "import pygame\nimport pickle\nimport numpy as np\n\nfrom snakenet.game_constants \\\n import DOWN, UP, LEFT, RIGHT, MOVE_TO_KEYPRESS, VALID_MOVES\n\n_MODEL = None\ndef get_model():\n global _MODEL\n if _MODEL is None:\n with open('model.pkl', 'rb') as f:\n _MODEL = pickle.load(f)\n return _MODEL\n\ndef warmup_model_player():\n get_model() \n\ndef get_action_values(image, model, target_network):\n \"\"\"\n Derive action values for the current state.\n If `target_network`, derive the values from the target\n network instead of the online network. This should only\n be set to true during model training.\n \"\"\"\n image = image[np.newaxis,...] # Add color channel.\n image = image[np.newaxis,...] # Make it a singleton list.\n if target_network:\n action_values = model.predict([image], target_network)[0] # Extract the values.\n else:\n action_values = model.predict([image], target_network)[0] # Extract the values.\n return action_values\n\ndef get_model_prediction_idx(game, model, target_network):\n \"\"\"\n This is the equivalent of the policy (pi). It converts the\n output of the Q-function to an action. In this case, the\n action is selecting the index of a move to perform.\n \"\"\"\n image = game.state.plane\n action_values = get_action_values(image, model, target_network)\n max_prediction_idx = np.argmax(action_values)\n return max_prediction_idx, action_values \n\ndef get_model_keypress(game):\n model = get_model()\n predicted_best_move_idx, action_values = get_model_prediction_idx(game, model, target_network=False)\n predicted_best_move = VALID_MOVES[predicted_best_move_idx]\n return MOVE_TO_KEYPRESS[predicted_best_move], action_values\n" ]
[ [ "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
parthpatwa/Persona-Dialogue-Generation
[ "909b32d0b84b6e34de68745391f3fbfb4711b4d8" ]
[ "train_transmitter.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"Train model for ppl metric with pre-selected parameters.\nThese parameters have some variance in their final perplexity, but they were\nused to achieve the pre-trained model.\n\"\"\"\nimport os\nimport random\nimport torch\nfrom agents.transmitter.transmitter import ARCH_CHOICE\nfrom parlai.scripts.train_model import setup_args as setup_dict_args, TrainLoop\n\n# if is original, train model on original data; otherwise on revised data.\nIS_ORIGINAL = False\n\nTRANSMITTER_DIR = './tmp/transmitter'\nVERSION = \"transmitter_revised\"\n\n\ndef setup_task():\n if IS_ORIGINAL:\n task_name = 'tasks.convai2transmitter.agents:SelfOriginalTeacher'\n else:\n task_name = 'tasks.convai2transmitter.agents:SelfRevisedTeacher'\n return task_name\n\n\ndef setup_seed(seed=1706123):\n # random seed, to evaluate the performance\n torch.random.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)\n\n\ndef gpt_setting():\n return 10, 1e-4, 'gpt_custom', 1.0\n\n\ndef lstm_setting():\n return 64, 3, 'sgd', 0.1\n\n\ndef setup_args():\n \"\"\"\n Use create test env setting\n :return: opt\n \"\"\"\n parser = setup_dict_args()\n exp_name = VERSION\n beam_size = 2\n encode_layers = 2\n decode_layers = 2\n embedding_size = 256\n turn_emed_size = 50\n encoder_turn_use = False\n encoder_dis_use = False\n encoder_hidden_size = 1024\n decoder_hidden_size = 1024\n encode_max_seq_len = 256\n decode_max_seq_len = 32\n smoothing = 0.05\n dropout = 0.1\n embedding_type = 'glove'\n momentum = 0.9\n persona_append_strategy = 'concat'\n history_append_strategy = -1\n select_persona = False\n shuffle_persona = True\n share_decoder_input_output_embed = False\n num_train_epochs = 4\n\n if ARCH_CHOICE == 'gpt':\n batchsize, lr, optimizer, gradient_clip = gpt_setting()\n else:\n batchsize, lr, optimizer, gradient_clip = lstm_setting()\n\n task_name = setup_task()\n parser.set_defaults(\n task=task_name,\n rank_candidates=False,\n # task='tasks.convai2transmitter.agents:SelfRevisedTeacher:no_cands',\n model='agents.transmitter.transmitter:TransformerAgent',\n model_file='./tmp/transmitter/{}.model'.format(exp_name),\n dict_tokenizer='split',\n datatype='train',\n gpt_lr=6.25e-5,\n n_epoches=n_epoches,\n num_epochs=num_train_epochs,\n batchsize=batchsize,\n beam_size=beam_size,\n encoder_layers=encode_layers,\n decoder_layers=decode_layers,\n encoder_embed_dim=embedding_size,\n encoder_turn_dim=turn_emed_size,\n encoder_turn_use=encoder_turn_use,\n encoder_dis_use=encoder_dis_use,\n decoder_embed_dim=embedding_size,\n encode_max_seq_len=encode_max_seq_len,\n decode_max_seq_len=decode_max_seq_len,\n select_persona=select_persona,\n shuffle_persona=shuffle_persona,\n persona_append_strategy=persona_append_strategy,\n history_append_strategy=history_append_strategy,\n encoder_bidirectional=False,\n encoder_hidden_size=encoder_hidden_size,\n decoder_hidden_size=decoder_hidden_size,\n smoothing=smoothing,\n lr=lr,\n dropout=dropout,\n encoder_dropout_in=dropout,\n encoder_dropout_out=0,\n decoder_dropout_in=dropout,\n decoder_dropout_out=0,\n share_decoder_input_output_embed=share_decoder_input_output_embed,\n gradient_clip=gradient_clip,\n lookuptable='enc_dec',\n optimizer=optimizer,\n embedding_type=embedding_type,\n momentum=momentum,\n # rough enough\n validation_max_exs=-1,\n validation_every_n_secs=3600,\n validation_metric='ppl',\n validation_metric_mode='min',\n validation_patience=10,\n log_every_n_secs=30,\n gpu=0,\n # logging configuration\n exp=exp_name,\n tensorboard_log=True,\n tensorboard_tag='exp',\n train_report_metrics='ppl,f1,hits@1',\n tensorboard_metrics='ppl,f1,hits@1',\n )\n return parser\n\n\nif __name__ == '__main__':\n opt = setup_args()\n setup_seed()\n TrainLoop(opt).train()\n" ]
[ [ "torch.random.manual_seed", "torch.cuda.manual_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
wjbKimberly/tf-cpn-need
[ "4eec0bf14bd6ecfe19a8d3709f8b99a90c93a9cd" ]
[ "models/COCO.res50.384x288.CPN/COCOAllJoints_minus.py" ]
[ "#!/usr/bin/python3\n# coding=utf-8\n\nimport os\nimport os.path as osp\nimport numpy as np\nimport cv2\n\nimport sys\ncur_dir = os.path.dirname(__file__)\nsys.path.insert(0, os.path.join(cur_dir, 'MSCOCO', 'PythonAPI'))\nfrom pycocotools.coco import COCO\n\nclass COCOJoints(object):\n def __init__(self):\n root_data_dir=\"/home/data/COCO/MSCOCO/\"\n self.kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder',\n 'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist',\n 'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle']\n self.max_num_joints = 17\n self.color = np.random.randint(0, 256, (self.max_num_joints, 3))\n\n self.mpi = []\n self.test_mpi = []\n for mpi, stage in zip([self.mpi, self.test_mpi], ['train', 'val']):\n if stage == 'train':\n self._train_gt_path=os.path.join(root_data_dir, 'annotations', 'person_keypoints_trainvalminusminival2014.json')\n coco = COCO(self._train_gt_path)\n else:\n self._val_gt_path=os.path.join(root_data_dir, 'annotations', 'person_keypoints_minival2014.json')\n coco = COCO(self._val_gt_path)\n if stage == 'train':\n for aid in coco.anns.keys():\n ann = coco.anns[aid]\n if ann['image_id'] not in coco.imgs or ann['image_id'] == '366379':\n continue\n imgname = coco.imgs[ann['image_id']]['file_name']\n prefix = 'val' if 'val' in imgname else 'train'\n rect = np.array([0, 0, 1, 1], np.int32)\n if ann['iscrowd']:\n continue\n joints = ann['keypoints']\n bbox = ann['bbox']\n if np.sum(joints[2::3]) == 0 or ann['num_keypoints'] == 0 :\n continue\n root_data_dir+stage + '2017/' + str(ann['image_id']).zfill(12) + '.jpg'\n humanData = dict(aid = aid,joints=joints, imgpath=imgname, headRect=rect, bbox=bbox, imgid = ann['image_id'], segmentation = ann['segmentation'])\n mpi.append(humanData)\n elif stage == 'val':\n files = [(img_id,coco.imgs[img_id]) for img_id in coco.imgs]\n for img_id,img_info in files:\n imgname =root_data_dir+stage + '2017/' +str(img_info['file_name']).zfill(12) + '.jpg'\n humanData = dict(imgid = img_id,imgpath = imgname)\n mpi.append(humanData)\n else:\n print('COCO data error, please check')\n embed()\n\n def load_data(self, min_kps=1):\n mpi = [i for i in self.mpi if np.sum(np.array(i['joints'], copy=False)[2::3] > 0) >= min_kps]\n return mpi, self.test_mpi\n\nif __name__ == '__main__':\n coco_joints = COCOJoints()\n train, test = coco_joints.load_data(min_kps=1)\n from IPython import embed; embed()" ]
[ [ "numpy.array", "numpy.sum", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yhzhang35/transformer-xl
[ "0afa266400e34b9a71c8fd799eb6cf47704987a9" ]
[ "pytorch/predict.py" ]
[ "'''该py code是用训练好的模型来进行预测(生成数据)'''\nimport torch\nimport os\nif __name__=='__main__':\n '''加载模型'''\n model=None\n model_dir_path=\"\"\n with open(os.path.join(model_dir_path, 'model.pt'), 'rb') as f:\n model = torch.load(f)\n\n '''定义输入'''\n input_list=['3']\n predict_times_each=10\n # target_len\n\n '''模型预测'''\n # for\n\n '''结果展示'''" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
obver-se/fairMLHealth
[ "cf7db71e741e7093a84970b7bed397b01def540f" ]
[ "fairmlhealth/tutorial_helpers.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nAdd-ons for loading data, formatting, and generating tables as part of\nKDD 2020 Tutorial on Measuring Fairness for Healthcare.\nTo be called by Tutorial Notebook.\n\nContributors:\n camagallen <[email protected]>\n\"\"\"\n# Copyright (c) KenSci and contributors.\n# Licensed under the MIT License.\n\nfrom IPython.display import display\nimport numpy as np\nimport os\nimport pandas as pd\n\n# Metric libraries\nfrom aif360.sklearn.metrics import *\nfrom sklearn.metrics import (\n balanced_accuracy_score, roc_auc_score, accuracy_score, precision_score)\n\n# Tutorial Libraries\nfrom . import format_mimic_data\n\n\n'''\nGlobal variable for backward compatibility with KDD2020 tutorial. Used to\n reduce verbosity of comparison tables.\n'''\nTUTORIAL_ON = False\n\n\ndef start_tutorial():\n global TUTORIAL_ON\n TUTORIAL_ON = True\n\n\ndef stop_tutorial():\n global TUTORIAL_ON\n TUTORIAL_ON = True\n\n\ndef is_tutorial_running():\n return TUTORIAL_ON\n\n\n'''\nFormatting Helpers\n'''\n\n\ndef highlight_col(df, color='magenta'):\n return f'background-color: {color}'\n\n\ndef highlight_vals(df, values, colname=None, criteria=None, color='magenta',\n h_type='field'):\n \"\"\" Returns a list of strings setting the background color at each index of\n df where a[column] is in the list of values\n\n Args:\n df (pandas df): any dataframe\n values (list-like): values in colname to be highlighted\n colname (str): name of column against which to match values. Defaults\n to None.\n criteria (str): query criteria. may not . Defaults to None.\n color (str): css color name. Defaults to 'aquamarine'.\n h_type (str, optional): [description]. Defaults to 'field'.\n\n Raises:\n ValueError: [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n if (criteria is not None and values is not None):\n print(\"Cannot process both crieteria and values.\",\n \"Defaulting to criteria entry\")\n if h_type not in ['text', 'field']:\n raise ValueError(\"Wrong h_type sent\")\n if not isinstance(colname, (list, tuple)):\n colname = list(colname)\n if values is None:\n values = []\n if not isinstance(values, (list, tuple)):\n values = list(values)\n #\n if criteria is None:\n criteria = f\"in {values}\"\n highlight = pd.Series(data=False, index=df.index)\n for col in colname:\n test_vals = values\n if criteria is not None:\n test_vals += df.query(\" \".join([col, criteria]))\n highlight[col] = bool(df[col] in values)\n if h_type == 'text':\n return [f'color: {color}'\n if highlight.any() else '' for v in highlight]\n elif h_type == 'field':\n return [f'background-color: {color}'\n if highlight.any() else '' for v in highlight]\n\n\n\n\n'''\nLoaders and Printers\n'''\n\n\ndef load_mimic3_example(mimic_dirpath):\n \"\"\" Returns a formatted MIMIC-III data subset for use in KDD Tutorial\n\n If formatted data file exists, loads that file. Else, generates\n formatted data and saves in mimic_dirpath.\n\n Args:\n mimic_dirpath (str): valid path to downloaded MIMIC data\n\n Returns:\n pandas dataframe of formatted MIMIC-III data\n \"\"\"\n data_file = os.path.join(os.path.expanduser(mimic_dirpath),\n \"kdd_tutorial_data.csv\")\n if not os.path.exists(data_file):\n formatter = format_mimic_data.mimic_loader(data_file)\n success = formatter.generate_tutorial_data()\n if not success:\n raise RuntimeError(\"Error generating tutorial data.\")\n else:\n pass\n # Load data and restrict to only age 65+\n df = pd.read_csv(data_file)\n df['HADM_ID'] = df['HADM_ID'] + np.random.randint(10**6)\n df.rename(columns={'HADM_ID': 'ADMIT_ID'}, inplace=True)\n # Ensure that length_of_stay is at the end of the dataframe to reduce\n # confusion for first-time tutorial users\n df = df.loc[:, [c for c in df.columns\n if c != 'length_of_stay']+['length_of_stay']]\n return(df)\n\n\ndef print_feature_table(df):\n ''' Displays a table containing statistics on the features available in the\n passed df\n\n Args:\n df (pandas df): dataframe containing MIMIC data for the tutorial\n '''\n print(f\"\\n This data subset has {df.shape[0]} total observations\" +\n f\" and {df.shape[1]-2} input features \\n\")\n feat_df = pd.DataFrame({'feature': df.columns.tolist()}\n ).query('feature not in [\"ADMIT_ID\", \"length_of_stay\"]')\n feat_df['Raw Feature'] = feat_df['feature'].str.split(\"_\").str[0]\n count_df = feat_df.groupby('Raw Feature', as_index=False\n )['feature'].count(\n ).rename(columns={\n 'feature': 'Category Count (Encoded Features)'})\n display(count_df)\n\n\n'''\nTutorial-Specific Helpers\n'''\n\n\ndef simplify_tutorial_report(comparison_report_df):\n \"\"\"Updates a fainress comparison report to exlude FairLearn measures. For\n use in the KDD Tutorial, which first introduces AIF360 measures before\n introducing FairLearn\n\n Args:\n comparison_report_df (pandas df): a fairMLHealth model_comparison\n report\n\n Returns:\n an updated version of the comparison_report_df\n \"\"\"\n print(\"Note: this report has been simplified for this tutorial.\",\n \"For a more extensive report, omit the simplify_tutorial_report function\")\n fl_measures = [\"demographic_parity_difference\", \"demographic_parity_ratio\",\n \"equalized_odds_difference\", \"equalized_odds_ratio\"]\n ix_vals = comparison_report_df.index\n ix_vals = [v.replace(\" \", \"_\").lower() for v in ix_vals]\n drop_meas = [ix_vals.index(v) for v in ix_vals if v in fl_measures]\n df = comparison_report_df.drop(drop_meas, axis=0)\n return(df)\n\n" ]
[ [ "pandas.read_csv", "pandas.Series", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
FateScript/clearml
[ "f6651d39e062075d0480baf000a0711f2fd9dc5d" ]
[ "examples/pipeline/pipeline_from_decorator.py" ]
[ "from clearml.automation.controller import PipelineDecorator\n\n\n# Make the following function an independent pipeline component step\n# notice all package imports inside the function will be automatically logged as\n# required packages for the pipeline execution step\[email protected](return_values=['data_frame'], cache=True)\ndef step_one(pickle_data_url: str, extra: int = 43):\n print('step_one')\n # make sure we have scikit-learn for this step, we need it to use to unpickle the object\n import sklearn # noqa\n import pickle\n import pandas as pd\n from clearml import StorageManager\n local_iris_pkl = StorageManager.get_local_copy(remote_url=pickle_data_url)\n with open(local_iris_pkl, 'rb') as f:\n iris = pickle.load(f)\n data_frame = pd.DataFrame(iris['data'], columns=iris['feature_names'])\n data_frame.columns += ['target']\n data_frame['target'] = iris['target']\n return data_frame\n\n\n# Make the following function an independent pipeline component step\n# notice all package imports inside the function will be automatically logged as\n# required packages for the pipeline execution step.\n# Specifying `return_values` makes sure the function step can return an object to the pipeline logic\n# In this case, the returned tuple will be stored as an artifact named \"processed_data\"\[email protected](return_values=['processed_data'], cache=True,)\ndef step_two(data_frame, test_size=0.2, random_state=42):\n print('step_two')\n # make sure we have pandas for this step, we need it to use the data_frame\n import pandas as pd # noqa\n from sklearn.model_selection import train_test_split\n y = data_frame['target']\n X = data_frame[(c for c in data_frame.columns if c != 'target')]\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test\n\n\n# Make the following function an independent pipeline component step\n# notice all package imports inside the function will be automatically logged as\n# required packages for the pipeline execution step\n# Specifying `return_values` makes sure the function step can return an object to the pipeline logic\n# In this case, the returned object will be stored as an artifact named \"model\"\[email protected](return_values=['model'], cache=True,)\ndef step_three(data):\n print('step_three')\n # make sure we have pandas for this step, we need it to use the data_frame\n import pandas as pd # noqa\n from sklearn.linear_model import LogisticRegression\n X_train, X_test, y_train, y_test = data\n model = LogisticRegression(solver='liblinear', multi_class='auto')\n model.fit(X_train, y_train)\n return model\n\n\n# The actual pipeline execution context\n# notice that all pipeline component function calls are actually executed remotely\n# Only when a return value is used, the pipeline logic will wait for the component execution to complete\[email protected](name='custom pipeline logic', project='examples', version='0.0.5')\ndef executing_pipeline(pickle_url, mock_parameter='mock'):\n print('pipeline args:', pickle_url, mock_parameter)\n\n # Use the pipeline argument to start the pipeline and pass it ot the first step\n print('launch step one')\n data_frame = step_one(pickle_url)\n\n # Use the returned data from the first step (`step_one`), and pass it to the next step (`step_two`)\n # Notice! unless we actually access the `data_frame` object,\n # the pipeline logic does not actually load the artifact itself.\n # When actually passing the `data_frame` object into a new step,\n # It waits for the creating step/function (`step_one`) to complete the execution\n print('launch step two')\n processed_data = step_two(data_frame)\n\n # Notice we can actually process/modify the returned values inside the pipeline logic context.\n # This means the modified object will be stored on the pipeline Task.\n processed_data = [processed_data[0], processed_data[1]*2, processed_data[2], processed_data[3]]\n print('launch step three')\n model = step_three(processed_data)\n\n # Notice since we are \"printing\" the `model` object,\n # we actually deserialize the object from the third step, and thus wait for the third step to complete.\n print('pipeline completed with model: {}'.format(model))\n\n\nif __name__ == '__main__':\n # set the pipeline steps default execution queue (per specific step we can override it with the decorator)\n PipelineDecorator.set_default_execution_queue('default')\n # run the pipeline steps as subprocess on the current machine, for debugging purposes\n # PipelineDecorator.debug_pipeline()\n\n # Start the pipeline execution logic.\n executing_pipeline(\n pickle_url='https://github.com/allegroai/events/raw/master/odsc20-east/generic/iris_dataset.pkl',\n )\n\n print('process completed')\n" ]
[ [ "sklearn.model_selection.train_test_split", "pandas.DataFrame", "sklearn.linear_model.LogisticRegression" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
BioJoe/AHC-assay-by-DNA-content
[ "22c251a33a57faddef9aaf2bab3884a1f7cf909f" ]
[ "1_merge DNA content results.py" ]
[ "#load python included modules\r\nimport os\r\nimport ntpath\r\nimport tkinter as tk\r\nfrom tkinter import filedialog, simpledialog, messagebox\r\n#load additional python modules\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n#create function to get a list of all files in a directory and its subdirectories\r\ndef getListOfFiles(dirName):\r\n #list files in directory\r\n listOfFile = os.listdir(dirName)\r\n allFiles = list()\r\n for entry in listOfFile:\r\n fullPath = os.path.join(dirName, entry)\r\n # If entry is a directory then get the list of files in this directory \r\n if os.path.isdir(fullPath):\r\n allFiles = allFiles + getListOfFiles(fullPath)\r\n else:\r\n allFiles.append(fullPath) \r\n return allFiles\r\n\r\n# define a function to search for DNA content results files and merge them\r\ndef findAndMerge(filelist):\r\n # Prepare a dataframe\r\n df_out = pd.DataFrame()\r\n CystNumber = 1\r\n for file in filelist:\r\n file_name = ntpath.basename(file)\r\n dir_name = ntpath.dirname(file)\r\n #when finding a DNA content results file at the data to the dataframe, adjust cyst number\r\n if \" DNA content.xls\" in file_name:\r\n df_cells = pd.read_csv(file, sep='\\t')\r\n df_cells[\"CystNumber\"] = CystNumber\r\n df_out = df_out.append(df_cells, sort=False, ignore_index=True)\r\n CystNumber = CystNumber + 1\r\n print(\"I found results table \" + file_name)\r\n return df_out\r\n\r\n#required for the dialog boxes\r\nroot = tk.Tk()\r\nroot.withdraw()\r\n\r\n#create empty dataframe\r\ndf_merged = pd.DataFrame()\r\n\r\n#loop until all genotypes are merged\r\ngo_on = True\r\nwhile(go_on):\r\n #ask for a directory\r\n dirName = filedialog.askdirectory(title = \"Choose a folder containing results from 1 genotype\")\r\n\r\n #get filelist and \r\n filelist = getListOfFiles(dirName)\r\n df_out = findAndMerge(filelist)\r\n\r\n #ask user to specify the genotype\r\n genotype = simpledialog.askstring(title = None, prompt = \"Enter genotype\")\r\n df_out[\"genotype\"] = genotype\r\n df_merged = df_merged.append(df_out, sort = False, ignore_index = True)\r\n go_on = messagebox.askyesnocancel(title = None, message=\"Add another genotype?\")\r\n\r\n#set the label of the first column to objectnumber\r\ndf_merged.columns.values[[0]] = [\"ObjectNumber\"]\r\n\r\n#save\r\nsave_path = filedialog.asksaveasfilename(title='Save compiled results as ...',defaultextension = '.xlsx',initialdir = dirName, initialfile = \"compiled DNA content results\")\r\ndf_merged.to_excel(save_path, index=False)\r\nprint('done')\r\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
FeU-aKlos/gdal
[ "bba6781133815248c9329842d365f8812b74c33f" ]
[ "swig/python/gdal-utils/osgeo_utils/gdal_merge.py" ]
[ "#!/usr/bin/env python3\n###############################################################################\n# $Id$\n#\n# Project: InSAR Peppers\n# Purpose: Module to extract data from many rasters into one output.\n# Author: Frank Warmerdam, [email protected]\n#\n###############################################################################\n# Copyright (c) 2000, Atlantis Scientific Inc. (www.atlsci.com)\n# Copyright (c) 2009-2011, Even Rouault <even dot rouault at spatialys.com>\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Library General Public\n# License as published by the Free Software Foundation; either\n# version 2 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Library General Public License for more details.\n#\n# You should have received a copy of the GNU Library General Public\n# License along with this library; if not, write to the\n# Free Software Foundation, Inc., 59 Temple Place - Suite 330,\n# Boston, MA 02111-1307, USA.\n###############################################################################\n# changes 29Apr2011\n# If the input image is a multi-band one, use all the channels in\n# building the stack.\n# [email protected]\n\nimport math\nimport sys\nimport time\n\nfrom osgeo import gdal\nfrom osgeo_utils.auxiliary.util import GetOutputDriverFor\n\nprogress = gdal.TermProgress_nocb\n\n__version__ = '$id$'[5:-1]\n\n\n# =============================================================================\ndef raster_copy(s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,\n t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,\n nodata=None, verbose=0):\n\n if verbose != 0:\n print('Copy %d,%d,%d,%d to %d,%d,%d,%d.'\n % (s_xoff, s_yoff, s_xsize, s_ysize,\n t_xoff, t_yoff, t_xsize, t_ysize))\n\n if nodata is not None:\n return raster_copy_with_nodata(\n s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,\n t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,\n nodata)\n\n s_band = s_fh.GetRasterBand(s_band_n)\n m_band = None\n # Works only in binary mode and doesn't take into account\n # intermediate transparency values for compositing.\n if s_band.GetMaskFlags() != gdal.GMF_ALL_VALID:\n m_band = s_band.GetMaskBand()\n elif s_band.GetColorInterpretation() == gdal.GCI_AlphaBand:\n m_band = s_band\n if m_band is not None:\n return raster_copy_with_mask(\n s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,\n t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,\n m_band)\n\n s_band = s_fh.GetRasterBand(s_band_n)\n t_band = t_fh.GetRasterBand(t_band_n)\n\n data = s_band.ReadRaster(s_xoff, s_yoff, s_xsize, s_ysize,\n t_xsize, t_ysize, t_band.DataType)\n t_band.WriteRaster(t_xoff, t_yoff, t_xsize, t_ysize,\n data, t_xsize, t_ysize, t_band.DataType)\n\n return 0\n\n# =============================================================================\n\n\ndef raster_copy_with_nodata(s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,\n t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,\n nodata):\n import numpy as np\n\n s_band = s_fh.GetRasterBand(s_band_n)\n t_band = t_fh.GetRasterBand(t_band_n)\n\n data_src = s_band.ReadAsArray(s_xoff, s_yoff, s_xsize, s_ysize,\n t_xsize, t_ysize)\n data_dst = t_band.ReadAsArray(t_xoff, t_yoff, t_xsize, t_ysize)\n\n if not np.isnan(nodata):\n nodata_test = np.equal(data_src, nodata)\n else:\n nodata_test = np.isnan(data_src)\n\n to_write = np.choose(nodata_test, (data_src, data_dst))\n\n t_band.WriteArray(to_write, t_xoff, t_yoff)\n\n return 0\n\n# =============================================================================\n\n\ndef raster_copy_with_mask(s_fh, s_xoff, s_yoff, s_xsize, s_ysize, s_band_n,\n t_fh, t_xoff, t_yoff, t_xsize, t_ysize, t_band_n,\n m_band):\n import numpy as np\n\n s_band = s_fh.GetRasterBand(s_band_n)\n t_band = t_fh.GetRasterBand(t_band_n)\n\n data_src = s_band.ReadAsArray(s_xoff, s_yoff, s_xsize, s_ysize,\n t_xsize, t_ysize)\n data_mask = m_band.ReadAsArray(s_xoff, s_yoff, s_xsize, s_ysize,\n t_xsize, t_ysize)\n data_dst = t_band.ReadAsArray(t_xoff, t_yoff, t_xsize, t_ysize)\n\n mask_test = np.equal(data_mask, 0)\n to_write = np.choose(mask_test, (data_src, data_dst))\n\n t_band.WriteArray(to_write, t_xoff, t_yoff)\n\n return 0\n\n# =============================================================================\n\n\ndef names_to_fileinfos(names):\n \"\"\"\n Translate a list of GDAL filenames, into file_info objects.\n\n names -- list of valid GDAL dataset names.\n\n Returns a list of file_info objects. There may be less file_info objects\n than names if some of the names could not be opened as GDAL files.\n \"\"\"\n\n file_infos = []\n for name in names:\n fi = file_info()\n if fi.init_from_name(name) == 1:\n file_infos.append(fi)\n\n return file_infos\n\n# *****************************************************************************\n\n\nclass file_info(object):\n \"\"\"A class holding information about a GDAL file.\"\"\"\n\n def __init__(self):\n self.band_type = None\n self.bands = None\n self.ct = None\n self.filename = None\n self.geotransform = None\n self.lrx = None\n self.lry = None\n self.projection = None\n self.ulx = None\n self.uly = None\n self.xsize = None\n self.ysize = None\n\n def init_from_name(self, filename):\n \"\"\"\n Initialize file_info from filename\n\n filename -- Name of file to read.\n\n Returns 1 on success or 0 if the file can't be opened.\n \"\"\"\n fh = gdal.Open(filename)\n if fh is None:\n return 0\n\n self.filename = filename\n self.bands = fh.RasterCount\n self.xsize = fh.RasterXSize\n self.ysize = fh.RasterYSize\n self.band_type = fh.GetRasterBand(1).DataType\n self.projection = fh.GetProjection()\n self.geotransform = fh.GetGeoTransform()\n self.ulx = self.geotransform[0]\n self.uly = self.geotransform[3]\n self.lrx = self.ulx + self.geotransform[1] * self.xsize\n self.lry = self.uly + self.geotransform[5] * self.ysize\n\n ct = fh.GetRasterBand(1).GetRasterColorTable()\n if ct is not None:\n self.ct = ct.Clone()\n else:\n self.ct = None\n\n return 1\n\n def report(self):\n print('Filename: ' + self.filename)\n print('File Size: %dx%dx%d'\n % (self.xsize, self.ysize, self.bands))\n print('Pixel Size: %f x %f'\n % (self.geotransform[1], self.geotransform[5]))\n print('UL:(%f,%f) LR:(%f,%f)'\n % (self.ulx, self.uly, self.lrx, self.lry))\n\n def copy_into(self, t_fh, s_band=1, t_band=1, nodata_arg=None, verbose=0):\n \"\"\"\n Copy this files image into target file.\n\n This method will compute the overlap area of the file_info objects\n file, and the target gdal.Dataset object, and copy the image data\n for the common window area. It is assumed that the files are in\n a compatible projection ... no checking or warping is done. However,\n if the destination file is a different resolution, or different\n image pixel type, the appropriate resampling and conversions will\n be done (using normal GDAL promotion/demotion rules).\n\n t_fh -- gdal.Dataset object for the file into which some or all\n of this file may be copied.\n\n Returns 1 on success (or if nothing needs to be copied), and zero one\n failure.\n \"\"\"\n t_geotransform = t_fh.GetGeoTransform()\n t_ulx = t_geotransform[0]\n t_uly = t_geotransform[3]\n t_lrx = t_geotransform[0] + t_fh.RasterXSize * t_geotransform[1]\n t_lry = t_geotransform[3] + t_fh.RasterYSize * t_geotransform[5]\n\n # figure out intersection region\n tgw_ulx = max(t_ulx, self.ulx)\n tgw_lrx = min(t_lrx, self.lrx)\n if t_geotransform[5] < 0:\n tgw_uly = min(t_uly, self.uly)\n tgw_lry = max(t_lry, self.lry)\n else:\n tgw_uly = max(t_uly, self.uly)\n tgw_lry = min(t_lry, self.lry)\n\n # do they even intersect?\n if tgw_ulx >= tgw_lrx:\n return 1\n if t_geotransform[5] < 0 and tgw_uly <= tgw_lry:\n return 1\n if t_geotransform[5] > 0 and tgw_uly >= tgw_lry:\n return 1\n\n # compute target window in pixel coordinates.\n tw_xoff = int((tgw_ulx - t_geotransform[0]) / t_geotransform[1] + 0.1)\n tw_yoff = int((tgw_uly - t_geotransform[3]) / t_geotransform[5] + 0.1)\n tw_xsize = int((tgw_lrx - t_geotransform[0]) / t_geotransform[1] + 0.5) \\\n - tw_xoff\n tw_ysize = int((tgw_lry - t_geotransform[3]) / t_geotransform[5] + 0.5) \\\n - tw_yoff\n\n if tw_xsize < 1 or tw_ysize < 1:\n return 1\n\n # Compute source window in pixel coordinates.\n sw_xoff = int((tgw_ulx - self.geotransform[0]) / self.geotransform[1] + 0.1)\n sw_yoff = int((tgw_uly - self.geotransform[3]) / self.geotransform[5] + 0.1)\n sw_xsize = int((tgw_lrx - self.geotransform[0]) /\n self.geotransform[1] + 0.5) - sw_xoff\n sw_ysize = int((tgw_lry - self.geotransform[3]) /\n self.geotransform[5] + 0.5) - sw_yoff\n\n if sw_xsize < 1 or sw_ysize < 1:\n return 1\n\n # Open the source file, and copy the selected region.\n s_fh = gdal.Open(self.filename)\n\n return raster_copy(s_fh, sw_xoff, sw_yoff, sw_xsize, sw_ysize, s_band,\n t_fh, tw_xoff, tw_yoff, tw_xsize, tw_ysize, t_band,\n nodata_arg, verbose)\n\n\n# =============================================================================\ndef Usage():\n print('Usage: gdal_merge.py [-o out_filename] [-of out_format] [-co NAME=VALUE]*')\n print(' [-ps pixelsize_x pixelsize_y] [-tap] [-separate] [-q] [-v] [-pct]')\n print(' [-ul_lr ulx uly lrx lry] [-init \"value [value...]\"]')\n print(' [-n nodata_value] [-a_nodata output_nodata_value]')\n print(' [-ot datatype] [-createonly] input_files')\n print(' [--help-general]')\n print('')\n return 1\n\n\ndef gdal_merge(argv=None):\n verbose = 0\n quiet = 0\n names = []\n driver_name = None\n out_file = 'out.tif'\n\n ulx = None\n psize_x = None\n separate = 0\n copy_pct = 0\n nodata = None\n a_nodata = None\n create_options = []\n pre_init = []\n band_type = None\n createonly = 0\n bTargetAlignedPixels = False\n start_time = time.time()\n\n if argv is None:\n argv = argv\n argv = gdal.GeneralCmdLineProcessor(argv)\n if argv is None:\n return 0\n\n # Parse command line arguments.\n i = 1\n while i < len(argv):\n arg = argv[i]\n\n if arg == '-o':\n i = i + 1\n out_file = argv[i]\n\n elif arg == '-v':\n verbose = 1\n\n elif arg == '-q' or arg == '-quiet':\n quiet = 1\n\n elif arg == '-createonly':\n createonly = 1\n\n elif arg == '-separate':\n separate = 1\n\n elif arg == '-seperate':\n separate = 1\n\n elif arg == '-pct':\n copy_pct = 1\n\n elif arg == '-ot':\n i = i + 1\n band_type = gdal.GetDataTypeByName(argv[i])\n if band_type == gdal.GDT_Unknown:\n print('Unknown GDAL data type: %s' % argv[i])\n return 1\n\n elif arg == '-init':\n i = i + 1\n str_pre_init = argv[i].split()\n for x in str_pre_init:\n pre_init.append(float(x))\n\n elif arg == '-n':\n i = i + 1\n nodata = float(argv[i])\n\n elif arg == '-a_nodata':\n i = i + 1\n a_nodata = float(argv[i])\n\n elif arg == '-f' or arg == '-of':\n i = i + 1\n driver_name = argv[i]\n\n elif arg == '-co':\n i = i + 1\n create_options.append(argv[i])\n\n elif arg == '-ps':\n psize_x = float(argv[i + 1])\n psize_y = -1 * abs(float(argv[i + 2]))\n i = i + 2\n\n elif arg == '-tap':\n bTargetAlignedPixels = True\n\n elif arg == '-ul_lr':\n ulx = float(argv[i + 1])\n uly = float(argv[i + 2])\n lrx = float(argv[i + 3])\n lry = float(argv[i + 4])\n i = i + 4\n\n elif arg[:1] == '-':\n print('Unrecognized command option: %s' % arg)\n return Usage()\n\n else:\n names.append(arg)\n\n i = i + 1\n\n if not names:\n print('No input files selected.')\n return Usage()\n\n if driver_name is None:\n driver_name = GetOutputDriverFor(out_file)\n\n driver = gdal.GetDriverByName(driver_name)\n if driver is None:\n print('Format driver %s not found, pick a supported driver.' % driver_name)\n return 1\n\n DriverMD = driver.GetMetadata()\n if 'DCAP_CREATE' not in DriverMD:\n print('Format driver %s does not support creation and piecewise writing.\\nPlease select a format that does, such as GTiff (the default) or HFA (Erdas Imagine).' % driver_name)\n return 1\n\n # Collect information on all the source files.\n file_infos = names_to_fileinfos(names)\n\n if ulx is None:\n ulx = file_infos[0].ulx\n uly = file_infos[0].uly\n lrx = file_infos[0].lrx\n lry = file_infos[0].lry\n\n for fi in file_infos:\n ulx = min(ulx, fi.ulx)\n uly = max(uly, fi.uly)\n lrx = max(lrx, fi.lrx)\n lry = min(lry, fi.lry)\n\n if psize_x is None:\n psize_x = file_infos[0].geotransform[1]\n psize_y = file_infos[0].geotransform[5]\n\n if band_type is None:\n band_type = file_infos[0].band_type\n\n # Try opening as an existing file.\n gdal.PushErrorHandler('CPLQuietErrorHandler')\n t_fh = gdal.Open(out_file, gdal.GA_Update)\n gdal.PopErrorHandler()\n\n # Create output file if it does not already exist.\n if t_fh is None:\n\n if bTargetAlignedPixels:\n ulx = math.floor(ulx / psize_x) * psize_x\n lrx = math.ceil(lrx / psize_x) * psize_x\n lry = math.floor(lry / -psize_y) * -psize_y\n uly = math.ceil(uly / -psize_y) * -psize_y\n\n geotransform = [ulx, psize_x, 0, uly, 0, psize_y]\n\n xsize = int((lrx - ulx) / geotransform[1] + 0.5)\n ysize = int((lry - uly) / geotransform[5] + 0.5)\n\n if separate != 0:\n bands = 0\n\n for fi in file_infos:\n bands = bands + fi.bands\n else:\n bands = file_infos[0].bands\n\n t_fh = driver.Create(out_file, xsize, ysize, bands,\n band_type, create_options)\n if t_fh is None:\n print('Creation failed, terminating gdal_merge.')\n return 1\n\n t_fh.SetGeoTransform(geotransform)\n t_fh.SetProjection(file_infos[0].projection)\n\n if copy_pct:\n t_fh.GetRasterBand(1).SetRasterColorTable(file_infos[0].ct)\n else:\n if separate != 0:\n bands = 0\n for fi in file_infos:\n bands = bands + fi.bands\n if t_fh.RasterCount < bands:\n print('Existing output file has less bands than the input files. You should delete it before. Terminating gdal_merge.')\n return 1\n else:\n bands = min(file_infos[0].bands, t_fh.RasterCount)\n\n # Do we need to set nodata value ?\n if a_nodata is not None:\n for i in range(t_fh.RasterCount):\n t_fh.GetRasterBand(i + 1).SetNoDataValue(a_nodata)\n\n # Do we need to pre-initialize the whole mosaic file to some value?\n if pre_init is not None:\n if t_fh.RasterCount <= len(pre_init):\n for i in range(t_fh.RasterCount):\n t_fh.GetRasterBand(i + 1).Fill(pre_init[i])\n elif len(pre_init) == 1:\n for i in range(t_fh.RasterCount):\n t_fh.GetRasterBand(i + 1).Fill(pre_init[0])\n\n # Copy data from source files into output file.\n t_band = 1\n\n if quiet == 0 and verbose == 0:\n progress(0.0)\n fi_processed = 0\n\n for fi in file_infos:\n if createonly != 0:\n continue\n\n if verbose != 0:\n print(\"\")\n print(\"Processing file %5d of %5d, %6.3f%% completed in %d minutes.\"\n % (fi_processed + 1, len(file_infos),\n fi_processed * 100.0 / len(file_infos),\n int(round((time.time() - start_time) / 60.0))))\n fi.report()\n\n if separate == 0:\n for band in range(1, bands + 1):\n fi.copy_into(t_fh, band, band, nodata, verbose)\n else:\n for band in range(1, fi.bands + 1):\n fi.copy_into(t_fh, band, t_band, nodata, verbose)\n t_band = t_band + 1\n\n fi_processed = fi_processed + 1\n if quiet == 0 and verbose == 0:\n progress(fi_processed / float(len(file_infos)))\n\n # Force file to be closed.\n t_fh = None\n\ndef main(argv=sys.argv):\n return gdal_merge(argv)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n" ]
[ [ "numpy.isnan", "numpy.choose", "numpy.equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shashankaryan/sudoku-generator
[ "3d71c6f1e75bc924c41a5e1e8a15f03584267a02" ]
[ "sudoku-generator.py" ]
[ "import numpy as np\nimport math\nimport random\nimport os\nimport multiprocessing\nfrom colorama import init, Fore, Back, Style\n\n\nclass SudokuGenerator(object) :\n \"\"\" Generate unique sudoku solutions everytime for n*n grids. \"\"\"\n\n def __init__(self, num):\n\n self.num = num\n\n def generate_grid(self):\n \"\"\" Generate a grid of n*n numbers. \"\"\"\n\n grid = np.zeros((self.num,self.num), dtype=np.int)\n return grid\n\n def generate_check_lists(self):\n \"\"\" Returning a dict of n number of lists for row, column and sub-matrix.\n Each list will contain numbers from 1 to n.\n\n These lists will be used by sudoku_generator function for\n tracking available possibilities of number to fill a particular cell\n following the basic sudoku rules.\n \"\"\"\n\n checker= {}\n for i in range(1,self.num+1):\n checker['row'+str(i)]=list(range(1,self.num+1))\n checker['col'+str(i)]=list(range(1,self.num+1))\n checker['box'+str(i)]=list(range(1,self.num+1))\n return checker\n\n def get_submatrix_num(self, row_n, col_n, root_n):\n \"\"\" Getting the num of sub-matrix using the row and coloumn number. \"\"\"\n\n if row_n % root_n == 0: # root_n is square root of n\n row_t = int(row_n/root_n)\n else:\n row_t = int(row_n/root_n) + 1\n if col_n % root_n == 0:\n col_t = int(col_n/root_n)\n else:\n col_t = int(col_n/root_n) + 1\n box_n = col_t + (row_t-1)*root_n # formula for calculating which submatrix box, a (row,column) belongs\n return box_n\n\n\n def sudoku_gen(self, state=None):\n \"\"\" Pushing number for each cell of the generated grid, following sudoku rules.\n Each number is picked randomly from the list of elements obtained by the\n intersection of checker lists for that particular row, col and submatrix\n \"\"\"\n\n count = 0\n while True:\n\n if state != None and state.value == 1:\n # print ('Solver',os.getpid(),'quitting')\n break\n\n else:\n\n m = self.generate_check_lists()\n sudoku = self.generate_grid()\n count+=1 #to get number of attempts tried to get the solution.\n\n try:\n\n for row_n in range(1, self.num+1):\n for col_n in range(1, self.num+1):\n\n box_n = self.get_submatrix_num(row_n, col_n, int(math.sqrt(self.num)))\n row = 'row' + str(row_n)\n col = 'col' + str(col_n)\n box = 'box' + str(box_n)\n # print('target row, column, box => ' + row, col, box)\n\n common_list = list(set(m[row]).intersection(m[col],m[box])) # creating commom list.\n # print(common_list)\n\n rand_num = random.choice(common_list) # picking a number from common list.\n sudoku[row_n-1][col_n-1] = rand_num\n m[row].remove(rand_num)\n m[col].remove(rand_num)\n m[box].remove(rand_num)\n\n if sudoku[self.num-1][self.num-1]>0: # checking if solution is ready, then break out.\n print('Total Number of attempts: ' + str(count))\n self.display(sudoku)\n\n if state != None:\n state.value = 1 # signalling other processes to quit solving\n # print ('Solver '+ str(os.getpid()), + ' solved the problem!')\n break\n\n except IndexError: # Handling Out of Index Error\n continue\n\n\n def cprint(self, msg, foreground = \"black\", background = \"white\"):\n \"\"\"This function is used to provide color to the sudoku cells.\"\"\"\n fground = foreground.upper()\n bground = background.upper()\n style = getattr(Fore, fground) + getattr(Back, bground)\n print(style + \" \" + msg + \" \" + Style.RESET_ALL, end=\"\", flush=True)\n\n def display(self, solution):\n # Printing the Sudoku.\n for i in range(len(solution)):\n for j in range(len(solution)):\n self.cprint(str(solution[i][j]), \"black\", \"green\")\n print('')\n\n\n\nclass SudokuConcurrentSolver(object):\n\n def __init__(self, dimension):\n\n self.dimension = dimension\n\n def solve(self, nprocs):\n \"\"\" Solve for Soduko's concurrntly. \"\"\"\n\n state = multiprocessing.Value('i', 0)\n\n solver = SudokuGenerator(self.dimension)\n proc = [multiprocessing.Process(target=solver.sudoku_gen, args=(state,)) for x in range(nprocs)]\n\n # Run processes\n for p in proc:\n p.start()\n\n # Exit the completed processes\n for p in proc:\n p.join()\n\n\nif __name__ == \"__main__\":\n\n import sys\n import argparse\n\n parser = argparse.ArgumentParser(description='It takes number as optional argument.')\n parser.add_argument('-c',dest='concurrent', action=\"store_true\", help='For implementing concurrency')\n parser.add_argument('-n', dest='gridnum', required=True, help='Grid number for generating sudoku')\n parser.add_argument('-p', dest='procs',default = multiprocessing.cpu_count(),type = int, help='No. of processes to use for concurrent solution.')\n\n args = parser.parse_args()\n\n grid_number = int(args.gridnum)\n proc = int(args.procs)\n init() #initialising the colorama\n\n if args.concurrent:\n\n instance = SudokuConcurrentSolver(grid_number)\n solution = instance.solve(proc)\n\n else:\n\n instance = SudokuGenerator(grid_number)\n solution = instance.sudoku_gen()\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jayjung121/CSE163-Python
[ "70f86826b81c17851c91e6593506f10e2b8e30b0" ]
[ "hw2/hw2_pandas.py" ]
[ "\nimport pandas as pd\n\n\n# Write your functions here!\ndef parse(file_name):\n '''\n This function takes .csv file and return pandas dataframe.\n '''\n return pd.read_csv(file_name, header=0)\n\n\ndef species_count(df):\n '''\n This function returns the number of unique Pokemon\n species (determined by the name attribute) found in the dataset.\n '''\n return len(pd.unique(df.loc[:, 'name']))\n\n\ndef max_level(df):\n '''\n This funciton return the max_level pokemon with its name and level.\n '''\n result = df[df['level'] == max(df['level'])].iloc[0, :]\n return (result['name'], result['level'])\n\n\ndef filter_range(df, min, max):\n '''\n This funciton smallest (inclusive) and largest (exclusive) level value\n and returns a list of Pokemon names having a level within that range\n '''\n return df[(df['level'] >= min) & (df['level'] < max)]['name'].tolist()\n\n\ndef mean_attack_for_type(df, type):\n '''\n This function takes a Pokemon type (string) as an argument\n and that returns the average attack stat for all the Pokemon\n in the dataset with that type.\n '''\n atk = df[df['type'] == type]['atk']\n return atk.mean()\n\n\ndef count_types(df):\n '''\n This function returns a dictionary with keys that are Pokemon\n types and values that are the number of times that type appears\n in the dataset.\n '''\n type_df = df.groupby(by='type').apply(len)\n result = {}\n for type, num in type_df.items():\n result[type] = num\n return result\n\n\ndef highest_stage_per_type(df):\n '''\n This function dictionary that has keys that are the Pokemon types\n and values that are the highest value of stage column for that type\n of Pokemon.\n '''\n stage_df = df.groupby('type')['stage'].apply(max)\n result = {}\n for type, stage in stage_df.items():\n result[type] = stage\n return result\n\n\ndef mean(values):\n '''\n Return mean\n '''\n return sum(values) / len(values)\n\n\ndef mean_attack_per_type(df):\n '''\n This function return a dictionary that has keys that are\n the Pokemon types and values that are the average attack\n for that Pokemon type.\n '''\n df = df.groupby('type')['atk'].apply(mean)\n result = {}\n for key, value in df.items():\n result[key] = round(value, 2)\n return result\n" ]
[ [ "pandas.read_csv", "pandas.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
cxclark/hive_ml
[ "9e00b8028bfc5a6a913ac9319f4de5ea407977ec" ]
[ "hive_ml/layers/activation.py" ]
[ "import numpy as np\n\nclass ReluLayer:\n \"\"\"\n Implements ReLU nonlinearity elementwise.\n f(x) = max(0, x)\n \"\"\"\n\n def __init__(self):\n self.cache = {}\n self.type = 'relu'\n\n def forward(self, Z):\n \"\"\"\n Applies ReLU activation function to input Z.\n \"\"\"\n # Save the input value for backpropagation.\n self.cache['Z'] = Z\n \n # Apply the relu activation to the input.\n return np.where(Z < 0, 0, Z)\n \n def backward(self, dA, lr):\n \"\"\"\n Flows gradient dA back where values in forward propagation were non-negative.\n \"\"\"\n # Extract the input value.\n Z = self.cache['Z']\n \n # Flow the gradient backward according to ReLU's derivative.\n return dA * np.where(Z < 0, 0, 1)" ]
[ [ "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MonikaFJ/CarND-Behavioral-Cloning-P3
[ "94a5550f6e5acddc86e48b482d2fe6e3c861d713" ]
[ "model_inception.py" ]
[ "import csv\nimport cv2\nimport numpy as np\nimport os\nfrom keras.applications.inception_v3 import InceptionV3\nimport tensorflow as tf\nfrom keras.models import Model\n\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom keras.layers import Cropping2D, GlobalAveragePooling2D, Input\nfrom keras.applications.inception_v3 import preprocess_input\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.applications.inception_v3 import preprocess_input\n\n\nimport sklearn\nfrom math import ceil\nimport matplotlib.pyplot as plt\n\n\nif os.path.exists(\"./model.h5\"):\n os.remove(\"./model.h5\")\n\nsamples = []\nbatch_size = 32\nwith open('./data/driving_log.csv') as logs:\n reader = csv.reader(logs)\n next(reader)\n for line in reader:\n samples.append(line)\n\nsamples = samples[0:1000]\n\ndef my_preprocess(path):\n img = cv2.imread(path)\n #img = img[50:130, :]\n return img\n\n#def generator(samples, batch_size=32):\n# num_samples = len(samples)\n# while 1: # Loop forever so the generator never terminates\n# sklearn.utils.shuffle(samples)\n# for offset in range(0, num_samples, batch_size):\ndef generate_batch(samples_part):\n angles = []\n images = []\n for sample in samples_part:\n #batch_samples = samples[offset:offset+batch_size]\n\n measurment = float(sample[3])\n center_image = my_preprocess(os.path.join('data', sample[0]))\n image_left = my_preprocess(os.path.join('data', sample[1].strip()))\n image_right = my_preprocess(os.path.join('data', sample[2].strip()))\n\n offset = 0.2\n #print(measurment)\n angles.extend([measurment, measurment - offset, measurment + offset])\n images.extend([center_image, image_left, image_right])\n #X_train.append(center_image)\n #y_train.append(measurment)\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n return sklearn.utils.shuffle(X_train, y_train)\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2)\nX_train, y_train = generate_batch(train_samples)\nX_val, y_val = generate_batch(validation_samples)\n\n#train_generator = generator(train_samples, batch_size=batch_size)\n#validation_generator = generator(validation_samples, batch_size=batch_size)\n\ninput_shape=(160, 320, 3)\n\ninput_shape_resized=(90, 320, 3)\n\nfreeze_flag = True # `True` to freeze layers, `False` for full training\nweights_flag = 'imagenet' # 'imagenet' or None\npreprocess_flag = True # Should be true for ImageNet pre-trained typically\n\ninception = InceptionV3(weights=weights_flag, include_top=False,\n input_shape=input_shape_resized)\n\n\nif freeze_flag == True:\n for layer in inception.layers:\n layer.trainable = False\n\n\nif preprocess_flag == True:\n datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\n val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)\nelse:\n datagen = ImageDataGenerator()\n val_datagen = ImageDataGenerator()\n\ninput = Input(shape=input_shape)\n\n# Re-sizes the input with Kera's Lambda layer & attach to cifar_input\nresized_input = Cropping2D(cropping=((50,20), (0,0)), input_shape=(160,320,3))(input)\n#Lambda(lambda image: image[50:130, :])(resized_input)\n\ninp = inception(resized_input)\n\n\nx = GlobalAveragePooling2D()(inp)\ndense1 = Dense(512, activation = 'relu')(x)\ndense2 = Dense(82, activation = 'relu')(dense1)\nprediction = Dense(1)(dense2)\n\nmodel = Model(inputs=input, outputs=prediction)\n\n# Compile the model\nmodel.compile(optimizer='Adam', loss='mse')\n\n# Check the summary of this new model to confirm the architecture\nmodel.summary()\n\n# history_object = model.fit_generator(train_generator,\n# steps_per_epoch=ceil(len(train_samples)/batch_size),\n# validation_data=validation_generator,\n# validation_steps=ceil(len(validation_samples)/batch_size),\n# epochs=5, verbose=1)\n\nhistory_object = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),\n steps_per_epoch=len(X_train)/batch_size, epochs=5, verbose=1,\n validation_data=val_datagen.flow(X_val, y_val, batch_size=batch_size),\n validation_steps=len(X_val)/batch_size)\n\n\n### print the keys contained in the history object\nmodel.save('model.h5')\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "sklearn.utils.shuffle", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seanrmcneil/CS7641-ML-public
[ "cb8a56c0422a8d80d6d833f34094fb5b6b5694fd" ]
[ "assignment1/run_experiment.py" ]
[ "import argparse\nfrom datetime import datetime\nimport logging\nimport numpy as np\n\nimport experiments\nfrom data import loader\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\ndef run_experiment(experiment_details, experiment, timing_key, verbose, timings):\n t = datetime.now()\n for details in experiment_details:\n exp = experiment(details, verbose=verbose)\n\n logger.info(\"Running {} experiment: {}\".format(timing_key, details.ds_readable_name))\n exp.perform()\n t_d = datetime.now() - t\n timings[timing_key] = t_d.seconds\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Perform some SL experiments')\n parser.add_argument('--threads', type=int, default=1, help='Number of threads (defaults to 1, -1 for auto)')\n parser.add_argument('--seed', type=int, help='A random seed to set, if desired')\n parser.add_argument('--ann', action='store_true', help='Run the ANN experiment')\n parser.add_argument('--boosting', action='store_true', help='Run the Boosting experiment')\n parser.add_argument('--dt', action='store_true', help='Run the Decision Tree experiment')\n parser.add_argument('--knn', action='store_true', help='Run the KNN experiment')\n parser.add_argument('--svm', action='store_true', help='Run the SVM experiment')\n parser.add_argument('--all', action='store_true', help='Run all experiments')\n parser.add_argument('--verbose', action='store_true', help='If true, provide verbose output')\n args = parser.parse_args()\n verbose = args.verbose\n threads = args.threads\n\n seed = args.seed\n if seed is None:\n seed = np.random.randint(0, (2 ** 32) - 1)\n print(\"Using seed {}\".format(seed))\n\n print(\"Loading data\")\n print(\"----------\")\n\n # ds1_details = {\n # 'data': loader.CreditDefaultData(verbose=verbose, seed=seed),\n # 'name': 'credit_default',\n # 'readable_name': 'Credit Default',\n # }\n\n ds1_details = {\n 'data': loader.AdultData(verbose=verbose, seed=seed),\n 'name': 'adult',\n 'readable_name': 'Adult',\n }\n ds2_details = {\n 'data': loader.PhishingData(verbose=verbose, seed=seed),\n 'name': 'phishing',\n 'readable_name': 'Phishing',\n }\n ds3_details = {\n 'data': loader.ContraceptiveMethodChoiceData(verbose=verbose, seed=seed),\n 'name': 'cmc',\n 'readable_name': 'Contraceptive Method Choice',\n }\n ds4_details = {\n 'data': loader.SpamData(verbose=verbose, seed=seed),\n 'name': 'spam_data',\n 'readable_name': 'Spam Data',\n }\n\n\n if verbose:\n print(\"----------\")\n print(\"Running experiments\")\n\n timings = {}\n\n datasets = [\n # ds1_details,\n #ds2_details,\n ds3_details,\n ds4_details\n ]\n\n experiment_details = []\n for ds in datasets:\n data = ds['data']\n data.load_and_process()\n data.build_train_test_split()\n data.scale_standard()\n experiment_details.append(experiments.ExperimentDetails(\n data, ds['name'], ds['readable_name'],\n threads=threads,\n seed=seed\n ))\n\n if args.ann or args.all:\n run_experiment(experiment_details, experiments.ANNExperiment, 'ANN', verbose, timings)\n\n if args.boosting or args.all:\n run_experiment(experiment_details, experiments.BoostingExperiment, 'Boosting', verbose, timings)\n\n if args.dt or args.all:\n run_experiment(experiment_details, experiments.DTExperiment, 'DT', verbose, timings)\n\n if args.knn or args.all:\n run_experiment(experiment_details, experiments.KNNExperiment, 'KNN', verbose, timings)\n\n if args.svm or args.all:\n run_experiment(experiment_details, experiments.SVMExperiment, 'SVM', verbose, timings)\n\n print(timings)\n" ]
[ [ "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ReturnCloud/hindsight-experience-replay
[ "a7a461cc10b49815c29a12277a39b9615153a117" ]
[ "her_modules/her.py" ]
[ "import numpy as np\n\nclass her_sampler:\n def __init__(self, replay_strategy, replay_k, reward_func=None, is_her=False):\n self.replay_strategy = replay_strategy\n self.replay_k = replay_k\n if self.replay_strategy == 'future':\n self.future_p = 1 - (1. / (1 + replay_k))\n else:\n self.future_p = 0\n self.reward_func = reward_func\n self.is_her = is_her\n\n def sample_her_transitions(self, episode_batch, batch_size_in_transitions):\n T = episode_batch['actions'].shape[1]\n rollout_batch_size = episode_batch['actions'].shape[0]\n batch_size = batch_size_in_transitions\n # select which rollouts and which timesteps to be used\n episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)\n t_samples = np.random.randint(T, size=batch_size)\n transitions = {key: episode_batch[key][episode_idxs, t_samples].copy() for key in episode_batch.keys()}\n # her idx\n her_indexes = np.where(np.random.uniform(size=batch_size) < self.future_p)\n future_offset = np.random.uniform(size=batch_size) * (T - t_samples)\n future_offset = future_offset.astype(int)\n future_t = (t_samples + 1 + future_offset)[her_indexes]\n # replace go with achieved goal\n if self.is_her:\n future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]\n transitions['g'][her_indexes] = future_ag\n\n # to get the params to re-compute reward\n transitions['r'] = np.expand_dims(self.reward_func(transitions['ag_next'], transitions['g'], None), 1)\n transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:]) for k in transitions.keys()}\n # for k in transitions.keys():\n # print (transitions[k].shape)\n\n return transitions\n" ]
[ [ "numpy.random.uniform", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alexliberzonlab/mixed_age_algae_population_modeling
[ "56735952515dd918b1e9abcabf914d7a1caa7f7d" ]
[ "algae_population.py" ]
[ "# collect all the functions to remove duplication\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport math\n\n# Create a new variable and store all\n# built-in functions within it using dir( ).\nnot_my_data = set(dir())\n\n\n# some definitions that we keep through the simulations\nt0 = 0\ntend = 360\n\n# we solve for daily population discretization:\ndays = np.arange(t0, tend)\nn_days = len(days)\n\nK = 10 # kg/m^3 total density kind of thing\nlamda = 1 # day by day aging\n\ntheta = 0.1*np.exp(-days/(120/math.log(2)))\nmu = 0.05*np.exp(-days/(120/math.log(2)))\n\n# default values were constant dilution ratio of 10%\ndilution = 5.0 # percents\ntau = np.inf\n\n# external supply of inhibitor by nutrients, units of I,\n# like direct supply to water\ngammai = 0.0\n\n\n# initial mass\nm0 = 0.2\n\nscenarios = {'100/0': [(0, m0)],\n '90/10': [(0, m0*0.90), (120, m0*0.10)],\n '80/20': [(0, m0*0.80), (120, m0*0.20)],\n '70/30': [(0, m0*0.70), (120, m0*0.30)],\n '60/40': [(0, m0*0.60), (120, m0*0.40)],\n '50/50': [(0, m0/2), (120, m0/2)],\n '40/60': [(0, m0*0.40), (120, m0*0.60)],\n '30/70': [(0, m0*0.30), (120, m0*0.70)], \n '20/80': [(0, m0*0.2), (120, m0*0.8)],\n '10/90': [(0, m0*0.10), (120, m0*0.90)], \n '0/100': [(120, m0)],\n\n}\n\n\nmethods = ['RK45', 'RK23', 'DOP853', 'Radau', 'BDF', 'LSODA']\nmethod = methods[0]\n\n\ndef logistic(x,L=1,k=1,x0=0):\n \"\"\" General logistic function \"\"\"\n return L / (1 + np.exp(-k*(x-x0)))\n\ndef f(x):\n \"\"\" inverted logistic function to encounter the effect of inhibitor \n values decrease from 1 to 0 in the range of [0,1]\n todo: we need to find the range of inhibitor, so far considered as a \n proportion of the total mass of algae\n \"\"\"\n return 1-logistic(x,L=1,k=10,x0=.5)\n\ndef r(t):\n \"\"\" growth function \n measured in percents of growth per day, e.g. \n 0.5 means 50% per day\n Alex G. suggested to use from 50% for the young ones to 5% for the\n 120 days old ones and then keep it at 5% roughly\n\n \"\"\"\n return 0.45*(0.1+np.exp(-t/(30/math.log(2))))\n\ndef sigma(t):\n \"\"\" destruction function \n \n Alex G. suggested to set it to the 1/2 lambda, i.e. if \n the growth is day by day, then the destruction is two days,\n empirically seems to be very strong, or the f(I) needs to be\n much weaker.\n \"\"\"\n return 0.3 \n\ndef xi(t, dilution=10, tau=np.inf):\n \"\"\" time varying leakage or water replacement\n measured in percents of inhibitor concentration or amount \n 0.5 means 50% per day\n max = 100 # in per-cents - removes everything\n tau = 1 means replacement every day\n tai = np.inf means constant value in the output\n\n \"\"\"\n tau = tau/np.log(2) # half-time\n dilution = dilution/100 # units not percents\n out = dilution * np.exp(-t/tau)\n return out\n\ndef evolution(t, y, K, lamda, xi, gammai, theta, mu, dilution, tau, sigma):\n \"\"\" t : time\n y : vector of state variables containing: \n a : vector of age masses, let's start with a0, a1 \n I : inhibitor's content\n r : growth rate vector (per age)\n K : saturation (logistic growth model)\n lambda : vector of 1/time resolution, e.g. 1/7 (for day by day age)\n sigma : rate of algae degradation /destruction when there are no inhibitors\n theta : rate of creation of inhibitor\n mu : rate of uptake of inhibitor from the surrounding\n xi : rate of leakage, destruction of inhibitor, losses.\n gammai : is the nutrient supply flux in the units of inhibitor concentration \n \n\n \"\"\"\n a = y[:-1]\n I = y[-1]\n \n dydt = np.zeros_like(y) \n # age 0, birth\n dydt[0] = ((r(0)*a[0]*(1-np.sum(a)/K)) - lamda * a[0] - sigma(0)*a[0]*f(I))\n # from age 1 and on:\n for i in range(1,len(a)):\n dydt[i] = ((r(i)*a[i]*(1-np.sum(a)/K)) + lamda*a[i-1] - lamda*a[i] - sigma(i)*a[i]*f(I))\n\n dydt[-1] = np.sum(a@theta) - I * np.sum(a@mu) - xi(i, dilution, tau)*I + gammai\n\n # prevent negative population\n for i, j in zip(y, dydt):\n if i < 0.005 and j < 0.: # less then 5 gram or .5% and negative slope\n j = 0.\n if i < 0:\n j = 0\n\n return dydt\n\n\ndef sporulation(t, y, K, lamda, xi, gammai, theta, mu, dilution, tau, sigma):\n \"\"\" this event tracking and events = limit, limit.terminal = True will\n stop the simulation on catastrophy of the death of the population\n when all the mass has disappeared\n \"\"\"\n return np.sum(y[:-1])\n\n\ndef scenario_to_age_distribution(s):\n \"\"\" scenarios see above, every dict provides the name of the\n scenario and age mass distribution. here we convert it to the\n initial values for the ODE solver\n \"\"\"\n\n ages = s[1]\n a = np.zeros((n_days))\n for ag in ages:\n a[ag[0]] = ag[1]\n\n return a\n\n\ndef plot_all_results(solutions, tend=None, K=10):\n fig, ax = plt.subplots(1, 2, figsize=(14, 6))\n\n for sol in solutions:\n\n t0 = sol.t[0]\n if tend is None:\n tend = sol.t[-1]\n\n if sol.t_events[0].size > 0 and sol.t_events[0] < tend:\n print(f'sporulation event at {sol.t_events[0]}')\n tend = sol.t_events[0]\n\n t = np.arange(t0, tend)\n z = sol.sol(t)\n\n\n \n # mass and inhibitor\n biomass = z[:-1, :] \n I = z[-1,:]\n\n # what we gain is:\n revenue = np.sum( biomass.T - biomass[:,0], axis=1)\n \n\n ax[0].plot(t, revenue,'-o',label = sol['s'][0])\n ax[0].set_xlabel('days')\n ax[0].set_ylabel(r'Revenue')\n ax[0].set_ylim([-1, K])\n ax[0].set_yscale('symlog')\n ax[0].legend()\n\n ax[1].plot(t,I,'-o',label= sol['s'][0])\n ax[1].set_xlabel('days')\n ax[1].set_ylabel(r'$I$')\n\n fmt = mpl.ticker.StrMethodFormatter(\"{x:g}\")\n ax[0].yaxis.set_major_formatter(fmt)\n ax[0].yaxis.set_minor_formatter(fmt)\n\n ax[1].yaxis.set_major_formatter(fmt)\n ax[1].yaxis.set_minor_formatter(fmt)\n\n plt.show()\n return fig, ax\n\ndef interp2d_with_nan(x, y, z):\n \"\"\" Interpolates 2D matrix z removing NaNs \"\"\"\n from scipy.interpolate import interp2d\n\n # Generate some test data:\n xx, yy = np.meshgrid(x, y)\n\n # Interpolation functions:\n nan_map = np.zeros_like( z )\n nan_map[ np.isnan(z) ] = 1\n\n filled_z = z.copy()\n filled_z[ np.isnan(z) ] = 0\n\n f = interp2d(x, y, filled_z, kind='linear')\n f_nan = interp2d(x, y, nan_map, kind='linear') \n\n # Interpolation on new points:\n xnew = np.linspace(x.min(), x.max(), 30)\n ynew = np.linspace(y.min(), y.max(), 30)\n\n z_new = f(xnew, ynew)\n nan_new = f_nan( xnew, ynew )\n z_new[ nan_new > 0.5 ] = np.nan\n\n return xnew, ynew, z_new" ]
[ [ "numpy.log", "matplotlib.ticker.StrMethodFormatter", "numpy.meshgrid", "numpy.isnan", "numpy.arange", "matplotlib.pyplot.subplots", "numpy.zeros_like", "numpy.exp", "scipy.interpolate.interp2d", "matplotlib.pyplot.show", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
carlosb1/kornia
[ "a2b34d497314e7ed65f114401efdd3cc9ba2077c" ]
[ "kornia/augmentation/functional.py" ]
[ "from typing import Tuple, List, Union, Dict, cast, Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom kornia.constants import Resample, BorderType, pi\nfrom kornia.geometry import (\n get_perspective_transform,\n get_rotation_matrix2d,\n get_affine_matrix2d,\n warp_perspective,\n rotate,\n crop_by_boxes,\n warp_affine,\n hflip,\n vflip,\n deg2rad\n)\nfrom kornia.color import (\n adjust_brightness,\n adjust_contrast,\n adjust_saturation,\n adjust_hue,\n adjust_gamma,\n rgb_to_grayscale\n)\nfrom kornia.filters import motion_blur\nfrom kornia.geometry.transform.affwarp import _compute_rotation_matrix, _compute_tensor_center\n\nfrom . import random_generator as rg\nfrom .utils import _transform_input, _validate_input_shape, _validate_input_dtype\nfrom .types import (\n TupleFloat,\n UnionFloat,\n UnionType,\n FloatUnionType\n)\n\n\ndef random_hflip(input: torch.Tensor, p: float = 0.5, return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_prob_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_hflip` for details.\n \"\"\"\n input = _transform_input(input)\n batch_size, _, h, w = input.size()\n params = rg.random_prob_generator(batch_size, p=p)\n output = apply_hflip(input, params)\n if return_transform:\n return output, compute_hflip_transformation(input, params)\n return output\n\n\ndef random_vflip(input: torch.Tensor, p: float = 0.5, return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_prob_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_vflip` for details.\n \"\"\"\n input = _transform_input(input)\n batch_size, _, h, w = input.size()\n params = rg.random_prob_generator(batch_size, p=p)\n output = apply_vflip(input, params)\n if return_transform:\n return output, compute_vflip_transformation(input, params)\n return output\n\n\ndef color_jitter(input: torch.Tensor, brightness: FloatUnionType = 0.,\n contrast: FloatUnionType = 0., saturation: FloatUnionType = 0.,\n hue: FloatUnionType = 0., return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_color_jitter_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_color_jitter` for details.\n \"\"\"\n input = _transform_input(input)\n batch_size, _, h, w = input.size()\n params = rg.random_color_jitter_generator(batch_size, brightness, contrast, saturation, hue)\n output = apply_color_jitter(input, params)\n if return_transform:\n return output, compute_intensity_transformation(input, params)\n return output\n\n\ndef random_grayscale(input: torch.Tensor, p: float = 0.5, return_transform: bool = False):\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_prob_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_grayscale` for details.\n \"\"\"\n input = _transform_input(input)\n batch_size, _, h, w = input.size()\n params = rg.random_prob_generator(batch_size, p=p)\n\n output = apply_grayscale(input, params)\n if return_transform:\n return output, compute_intensity_transformation(input, params)\n return output\n\n\ndef random_perspective(input: torch.Tensor,\n distortion_scale: float = 0.5,\n p: float = 0.5,\n return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_perspective_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_perspective` for details.\n \"\"\"\n\n input = _transform_input(input)\n batch_size, _, height, width = input.size()\n params: Dict[str, torch.Tensor] = rg.random_perspective_generator(\n batch_size, height, width, p, distortion_scale)\n output = apply_perspective(input, params)\n if return_transform:\n transform = compute_perspective_transformation(input, params)\n return output, transform\n return output\n\n\ndef random_affine(input: torch.Tensor,\n degrees: UnionFloat,\n translate: Optional[TupleFloat] = None,\n scale: Optional[TupleFloat] = None,\n shear: Optional[UnionFloat] = None,\n resample: Union[str, int, Resample] = Resample.BILINEAR.name,\n return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_affine_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_affine` for details.\n \"\"\"\n\n input = _transform_input(input)\n batch_size, _, height, width = input.size()\n params: Dict[str, torch.Tensor] = rg.random_affine_generator(\n batch_size, height, width, degrees, translate, scale, shear, resample)\n output = apply_affine(input, params)\n if return_transform:\n transform = compute_affine_transformation(input, params)\n return output, transform\n return output\n\n\ndef random_rectangle_erase(\n input: torch.Tensor,\n p: float = 0.5,\n scale: Tuple[float, float] = (0.02, 0.33),\n ratio: Tuple[float, float] = (0.3, 3.3),\n return_transform: bool = False\n) -> UnionType:\n r\"\"\"\n Function that erases a random selected rectangle for each image in the batch, putting\n the value to zero.\n The rectangle will have an area equal to the original image area multiplied by a value uniformly\n sampled between the range [scale[0], scale[1]) and an aspect ratio sampled\n between [aspect_ratio_range[0], aspect_ratio_range[1])\n\n Args:\n input (torch.Tensor): input images.\n scale (Tuple[float, float]): range of proportion of erased area against input image.\n ratio (Tuple[float, float]): range of aspect ratio of erased area.\n\n See :func:`~kornia.augmentation.random_generator.random_rectangles_params_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_erase_rectangles` for details.\n \"\"\"\n input = _transform_input(input)\n b, _, h, w = input.size()\n params = rg.random_rectangles_params_generator(\n b, h, w, p, scale, ratio\n )\n output = apply_erase_rectangles(input, params)\n if return_transform:\n return output, compute_intensity_transformation(input, params)\n return output\n\n\ndef random_rotation(input: torch.Tensor, degrees: FloatUnionType, return_transform: bool = False) -> UnionType:\n r\"\"\"Generate params and apply operation on input tensor.\n\n See :func:`~kornia.augmentation.random_generator.random_rotation_generator` for details.\n See :func:`~kornia.augmentation.functional.apply_rotation` for details.\n \"\"\"\n input = _transform_input(input)\n batch_size, _, _, _ = input.size()\n params = rg.random_rotation_generator(batch_size, degrees=degrees)\n output = apply_rotation(input, params)\n if return_transform:\n return output, compute_rotate_tranformation(input, params)\n return output\n\n\ndef apply_hflip(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply Horizontally flip on a tensor image or a batch of tensor images with given random parameters.\n Input should be a tensor of shape (H, W), (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The horizontally flipped input\n \"\"\"\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n flipped: torch.Tensor = input.clone()\n\n to_flip = params['batch_prob'].to(input.device)\n flipped[to_flip] = hflip(input[to_flip])\n\n return flipped\n\n\ndef compute_hflip_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n to_flip = params['batch_prob'].to(input.device)\n trans_mat: torch.Tensor = torch.eye(3, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n w: int = input.shape[-1]\n flip_mat: torch.Tensor = torch.tensor([[-1, 0, w],\n [0, 1, 0],\n [0, 0, 1]])\n trans_mat[to_flip] = flip_mat.type_as(input)\n\n return trans_mat\n\n\ndef apply_vflip(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply vertically flip on a tensor image or a batch of tensor images with given random parameters.\n Input should be a tensor of shape (H, W), (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The vertically flipped input\n \"\"\"\n # TODO: params validation\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n flipped: torch.Tensor = input.clone()\n to_flip = params['batch_prob'].to(input.device)\n flipped[to_flip] = vflip(input[to_flip])\n\n return flipped\n\n\ndef compute_vflip_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n to_flip = params['batch_prob'].to(input.device)\n trans_mat: torch.Tensor = torch.eye(3, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n\n h: int = input.shape[-2]\n flip_mat: torch.Tensor = torch.tensor([[1, 0, 0],\n [0, -1, h],\n [0, 0, 1]])\n\n trans_mat[to_flip] = flip_mat.type_as(input)\n\n return trans_mat\n\n\ndef apply_color_jitter(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply Color Jitter on a tensor image or a batch of tensor images with given random parameters.\n Input should be a tensor of shape (H, W), (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['brightness_factor']: The brightness factor.\n - params['contrast_factor']: The contrast factor.\n - params['hue_factor']: The hue factor.\n - params['saturation_factor']: The saturation factor.\n - params['order']: The order of applying color transforms.\n 0 is brightness, 1 is contrast, 2 is saturation, 4 is hue.\n\n Returns:\n torch.Tensor: The color jitterred input\n \"\"\"\n # TODO: params validation\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transforms = [\n lambda img: apply_adjust_brightness(img, params),\n lambda img: apply_adjust_contrast(img, params),\n lambda img: apply_adjust_saturation(img, params),\n lambda img: apply_adjust_hue(img, params)\n ]\n\n jittered = input\n for idx in params['order'].tolist():\n t = transforms[idx]\n jittered = t(jittered)\n\n return jittered\n\n\ndef compute_intensity_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]):\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`. Returns identity transformations.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n identity: torch.Tensor = torch.eye(3, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n return identity\n\n\ndef apply_grayscale(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply Gray Scale on a tensor image or a batch of tensor images with given random parameters.\n Input should be a tensor of shape (3, H, W) or a batch of tensors :math:`(*, 3, H, W)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor that indicating whether if to transform an image in a batch.\n\n Returns:\n torch.Tensor: The grayscaled input\n \"\"\"\n # TODO: params validation\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n if not _validate_input_shape(input, 1, 3):\n raise ValueError(f\"Input size must have a shape of (*, 3, H, W). Got {input.shape}\")\n\n grayscale: torch.Tensor = input.clone()\n\n to_gray = params['batch_prob'].to(input.device)\n\n grayscale[to_gray] = rgb_to_grayscale(input[to_gray])\n\n return grayscale\n\n\ndef apply_perspective(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Perform perspective transform of the given torch.Tensor or batch of tensors.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n - params['start_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the orignal image with shape Bx4x2.\n - params['end_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the transformed image with shape Bx4x2.\n - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: Perspectively transformed tensor.\n \"\"\"\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n # arrange input data\n x_data: torch.Tensor = input.view(-1, *input.shape[-3:])\n\n _, _, height, width = x_data.shape\n\n # compute the homography between the input points\n transform: torch.Tensor = compute_perspective_transformation(input, params)\n\n out_data: torch.Tensor = x_data.clone()\n\n # process valid samples\n mask: torch.Tensor = params['batch_prob'].to(input.device)\n\n # TODO: look for a workaround for this hack. In CUDA it fails when no elements found.\n # TODO: this if statement is super weird and sum here is not the propeer way to check\n # it's valid. In addition, 'interpolation' shouldn't be a reason to get into the branch.\n\n if bool(mask.sum() > 0) and ('interpolation' in params):\n # apply the computed transform\n height, width = x_data.shape[-2:]\n resample_name: str = Resample(params['interpolation'].item()).name.lower()\n align_corners: bool = cast(bool, params['align_corners'].item())\n\n out_data[mask] = warp_perspective(\n x_data[mask], transform[mask], (height, width),\n flags=resample_name, align_corners=align_corners)\n\n return out_data.view_as(input)\n\n\ndef compute_perspective_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['batch_prob']: A boolean tensor thatindicating whether if to transform an image in a batch.\n - params['start_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the orignal image with shape Bx4x2.\n - params['end_points']: Tensor containing [top-left, top-right, bottom-right,\n bottom-left] of the transformed image with shape Bx4x2.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n transform: torch.Tensor = get_perspective_transform(\n params['start_points'], params['end_points']).type_as(input)\n return transform\n\n\ndef apply_affine(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Random affine transformation of the image keeping center invariant.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['angle']: Degrees of rotation.\n - params['translations']: Horizontal and vertical translations.\n - params['center']: Rotation center.\n - params['scale']: Scaling params.\n - params['sx']: Shear param toward x-axis.\n - params['sy']: Shear param toward y-axis.\n - params['resample']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: The transfromed input\n \"\"\"\n\n if not torch.is_tensor(input):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(input)}\")\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n # arrange input data\n x_data: torch.Tensor = input.view(-1, *input.shape[-3:])\n\n height, width = x_data.shape[-2:]\n\n # concatenate transforms\n transform: torch.Tensor = compute_affine_transformation(input, params)\n\n resample_name: str = Resample(params['resample'].item()).name.lower()\n align_corners: bool = cast(bool, params['align_corners'].item())\n\n out_data: torch.Tensor = warp_affine(x_data, transform[:, :2, :],\n (height, width), resample_name,\n align_corners=align_corners)\n return out_data.view_as(input)\n\n\ndef compute_affine_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['angle']: Degrees of rotation.\n - params['translations']: Horizontal and vertical translations.\n - params['center']: Rotation center.\n - params['scale']: Scaling params.\n - params['sx']: Shear param toward x-axis.\n - params['sy']: Shear param toward y-axis.\n - params['resample']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n transform = get_affine_matrix2d(\n params['translations'], params['center'], params['scale'], params['angle'],\n deg2rad(params['sx']), deg2rad(params['sy'])\n ).type_as(input)\n return transform\n\n\ndef apply_rotation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Rotate a tensor image or a batch of tensor images a random amount of degrees.\n Input should be a tensor of shape (C, H, W) or a batch of tensors :math:`(*, C, H, W)`.\n\n Args:\n input (torch.Tensor): input image.\n params (Dict[str, torch.Tensor]):\n - params['degrees']: degree to be applied.\n\n Returns:\n torch.Tensor: The cropped input\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n angles: torch.Tensor = params[\"degrees\"].type_as(input)\n\n resample_mode: str = Resample(params['interpolation'].item()).name.lower()\n align_corners: bool = cast(bool, params['align_corners'].item())\n\n transformed: torch.Tensor = rotate(input, angles, mode=resample_mode, align_corners=align_corners)\n\n return transformed\n\n\ndef compute_rotate_tranformation(input: torch.Tensor, params: Dict[str, torch.Tensor]):\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): input image.\n params (Dict[str, torch.Tensor]):\n - params['degrees']: degree to be applied.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n angles: torch.Tensor = params[\"degrees\"].type_as(input)\n\n # TODO: This part should be inferred from rotate directly\n center: torch.Tensor = _compute_tensor_center(input)\n rotation_mat: torch.Tensor = _compute_rotation_matrix(angles, center.expand(angles.shape[0], -1))\n\n # rotation_mat is B x 2 x 3 and we need a B x 3 x 3 matrix\n trans_mat: torch.Tensor = torch.eye(3, device=input.device, dtype=input.dtype).repeat(input.shape[0], 1, 1)\n trans_mat[:, 0] = rotation_mat[:, 0]\n trans_mat[:, 1] = rotation_mat[:, 1]\n\n return trans_mat\n\n\ndef apply_crop(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Apply cropping by src bounding box and dst bounding box.\n Order: top-left, top-right, bottom-right and bottom-left. The coordinates must be in the x, y order.\n\n Args:\n input (torch.Tensor): input image.\n params (Dict[str, torch.Tensor]):\n - params['src']: The applied cropping src matrix :math: `(*, 4, 2)`.\n - params['dst']: The applied cropping dst matrix :math: `(*, 4, 2)`.\n - params['interpolation']: Integer tensor. NEAREST = 0, BILINEAR = 1.\n - params['align_corners']: Boolean tensor.\n\n Returns:\n torch.Tensor: The cropped input.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n resample_mode: str = Resample.get(params['interpolation'].item()).name.lower() # type: ignore\n align_corners: bool = cast(bool, params['align_corners'].item())\n\n return crop_by_boxes(\n input, params['src'], params['dst'], resample_mode, align_corners=align_corners)\n\n\ndef compute_crop_transformation(input: torch.Tensor, params: Dict[str, torch.Tensor]):\n r\"\"\"Compute the applied transformation matrix :math: `(*, 3, 3)`.\n\n Args:\n input (torch.Tensor): input image.\n params (Dict[str, torch.Tensor]):\n - params['src']: The applied cropping src matrix :math: `(*, 4, 2)`.\n - params['dst']: The applied cropping dst matrix :math: `(*, 4, 2)`.\n\n Returns:\n torch.Tensor: The applied transformation matrix :math: `(*, 3, 3)`\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n transform: torch.Tensor = get_perspective_transform(params['src'].to(input.dtype), params['dst'].to(input.dtype))\n transform = transform.expand(input.shape[0], -1, -1).type_as(input)\n return transform\n\n\ndef apply_erase_rectangles(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Generate a {0, 1} mask with drawed rectangle having parameters defined by params\n and size by input.size()\n\n Args:\n input (torch.Tensor): input image.\n params (Dict[str, torch.Tensor]):\n - params['widths']: widths tensor\n - params['heights']: heights tensor\n - params['xs']: x positions tensor\n - params['ys']: y positions tensor\n - params['values']: the value to fill in\n\n Returns:\n torch.Tensor: Erased image.\n \"\"\"\n if not (params['widths'].size() == params['heights'].size() == params['xs'].size() == params['ys'].size()):\n raise TypeError(\n f\"''rectangle params components must have same shape\"\n )\n\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n mask = torch.zeros(input.size()).type_as(input)\n values = torch.zeros(input.size()).type_as(input)\n\n widths = params['widths']\n heights = params['heights']\n xs = params['xs']\n ys = params['ys']\n vs = params['values']\n for i_elem in range(input.size()[0]):\n h = widths[i_elem].item()\n w = heights[i_elem].item()\n y = ys[i_elem].item()\n x = xs[i_elem].item()\n v = vs[i_elem].item()\n mask[i_elem, :, int(y):int(y + w), int(x):int(x + h)] = 1.\n values[i_elem, :, int(y):int(y + w), int(x):int(x + h)] = v\n transformed = torch.where(mask == 1., values, input)\n return transformed\n\n\ndef apply_adjust_brightness(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n \"\"\" Wrapper for adjust_brightness for Torchvision-like param settings.\n\n Args:\n input (torch.Tensor): Image/Input to be adjusted in the shape of (*, N).\n params (Dict[str, torch.Tensor]):\n - params['brightness_factor']: Brightness adjust factor per element\n in the batch. 0 gives a black image, 1 does not modify the input image and 2 gives a\n white image, while any other number modify the brightness.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transformed = adjust_brightness(input, params['brightness_factor'].to(input.dtype) - 1)\n\n return transformed\n\n\ndef apply_adjust_contrast(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n \"\"\"Wrapper for adjust_contrast for Torchvision-like param settings.\n\n Args:\n input (torch.Tensor): Image to be adjusted in the shape of (*, N).\n params (Dict[str, torch.Tensor]):\n - params['contrast_factor']: Contrast adjust factor per element in the batch.\n 0 generates a compleatly black image, 1 does not modify the input image while any other\n non-negative number modify the brightness by this factor.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transformed = adjust_contrast(input, params['contrast_factor'].to(input.dtype))\n\n return transformed\n\n\ndef apply_adjust_saturation(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n \"\"\"Wrapper for adjust_saturation for Torchvision-like param settings.\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (*, N).\n params (Dict[str, torch.Tensor]):\n - params['saturation_factor']: How much to adjust the saturation. 0 will give a black\n and white image, 1 will give the original image while 2 will enhance the saturation\n by a factor of 2.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transformed = adjust_saturation(input, params['saturation_factor'].to(input.dtype))\n\n return transformed\n\n\ndef apply_adjust_hue(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n \"\"\"Wrapper for adjust_hue for Torchvision-like param settings.\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (*, N).\n params (Dict[str, torch.Tensor]):\n - params['hue_factor']: How much to shift the hue channel. Should be in [-0.5, 0.5].\n 0.5 and -0.5 give complete reversal of hue channel in HSV space in positive and negative\n direction respectively. 0 means no shift. Therefore, both -0.5 and 0.5 will give an\n image with complementary colors while 0 gives the original image.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transformed = adjust_hue(input, params['hue_factor'].to(input.dtype) * 2 * pi)\n\n return transformed\n\n\ndef apply_adjust_gamma(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Perform gamma correction on an image.\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*, N).\n params (Dict[str, torch.Tensor]):\n - params['gamma_factor']: Non negative real number, same as γ\\gammaγ in the equation.\n gamma larger than 1 make the shadows darker, while gamma smaller than 1 make\n dark regions lighter.\n\n Returns:\n torch.Tensor: Adjusted image.\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n transformed = adjust_gamma(input, params['gamma_factor'].to(input.dtype))\n\n return transformed\n\n\ndef apply_motion_blur(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor:\n r\"\"\"Perform motion blur on an image\n\n The input image is expected to be in the range of [0, 1].\n\n Args:\n input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\\*, C, H, W).\n params (Dict[str, torch.Tensor]):\n - params['ksize_factor']: motion kernel width and height (odd and positive).\n - params['angle_factor']: angle of the motion blur in degrees (anti-clockwise rotation).\n - params['direction_factor']: forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with\n angle provided via angle), while higher values towards 1.0 will point the motion\n blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur.\n - params['border_type']: the padding mode to be applied before convolving.\n CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3. Default: BorderType.CONSTANT.\n\n Returns:\n torch.Tensor: Adjusted image with the shape as the inpute (\\*, C, H, W).\n\n \"\"\"\n input = _transform_input(input)\n _validate_input_dtype(input, accepted_dtypes=[torch.float16, torch.float32, torch.float64])\n\n kernel_size: int = cast(int, params['ksize_factor'].item())\n # TODO: this params should be at some point, learnable tensors\n angle: float = cast(float, params['angle_factor'].item())\n direction: float = cast(float, params['direction_factor'].item())\n border_type: str = cast(str, BorderType(params['border_type'].item()).name.lower())\n\n return motion_blur(input, kernel_size, angle, direction, border_type)\n" ]
[ [ "torch.eye", "torch.is_tensor", "torch.where", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
michellab/Siamese-RNN-Self-Attention
[ "a730e02153709b9c0e7f83deb0042ae9f9c1ce15" ]
[ "model/snn.py" ]
[ "\"\"\"\nImplements the Recurrent Siamese Neural Network (RSNN) model.\n\n\"\"\"\n\nimport numpy as np # Linear algebra\nimport pandas as pd # Data wrangling\n\n# Deep learning\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as tnni\nimport torch.nn.utils.rnn as tnnur\nimport sys\n\nfrom torch import optim # Deep learning optimiser\n\nfrom tqdm import tqdm_notebook as tqdm # progress bar\n\n# Custom dependencies\nsys.path.append('/projects/../../PythonNotebooks/model/')\nfrom helper import *\nfrom metrics_calculator import *\n\nimport numpy as np # Linear algebra\nimport pandas as pd # Data wrangling\n\n# Deep learning\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.nn.init as tnni\nimport torch.nn.utils.rnn as tnnur\n\n# Interact with the system\nimport sys\n\nfrom torch import optim # Deep learning optimiser\n\nfrom tqdm import tqdm_notebook as tqdm # progress bar\n\nclass SNN(nn.Module):\n def __init__(self, **kwargs):\n \n \"\"\"\n Initialiser.\n \n \n : hidden_size (int): size of the hidden dimension of the RNN\n \n : num_layers (int): number of RNN layers\n : expansion_size (int): \n : dist_fn (str): energy function to calculate distance between the two SNN arms\n : cell_type (str): establish the RNN type (LSTM or GRU)\n : embedding_dropout (float): dropout for the embedding layer\n : dropout (float): dropout for the RNN layers\n : learning_rate (float): \n : weight_decay (float): L2 regularisation penalty (Ridge)\n : bidirectional (bool): toggle to regulate uni- or bidirectional RNN\n : init_weights (bool): toggle to regulate weight initialisation\n : embedding_dimensions (int, optional): size of the embedding layer dimension\n : embedding (bool): toggle to regulate the presence of an embedding\n \n \"\"\"\n \n super(SNN, self).__init__()\n \n # Ensuring reproducibility\n set_seed()\n \n # Defining the layers depending on SNN MLP or RSNN\n self._is_mlp = kwargs.get('is_mlp', False)\n \n if self._is_mlp:\n self._hidden_size = kwargs.get('hidden_size', 512) \n self._output_size = kwargs.get('output_size', 256)\n self._dist_fn = kwargs.get('dist_fn', 'cos')\n self._loss = kwargs.get('loss','mse')\n self._similarity_fn = kwargs.get('similarity_fn',None)\n self._initialisation_process = kwargs.get('initialisation_process',None)\n self._input_size = kwargs.get('input_size', 2048)\n self._learning_rate = kwargs.get('learning_rate', 0.005)\n \n if 'hidden_states_processor' in list(kwargs.keys()):\n raise NotImplementedError('No hidden states processor is used for SNN MLP.')\n \n else:\n self._hidden_size = kwargs.get('hidden_size',128)\n self._n_layers = kwargs.get('n_layers',3)\n self._dist_fn = kwargs.get('dist_fn','cos')\n self._similarity_fn = kwargs.get('similarity_fn','clamp')\n self._dist_fn = kwargs.get('dist_fn','cos')\n self._cell_type = kwargs.get('cell_type','LSTM')\n self._loss = kwargs.get('loss','logcosh')\n self._initialisation_process = kwargs.get('initialisation_process','kaiming_normal')\n self._input_size = kwargs.get('input_size',150)\n self._bidirectional = kwargs.get('bidirectional',True)\n self._normalisation = kwargs.get('normalisation', None)\n self._embedding_dimensions = kwargs.get('embedding_dimensions', 128)\n self._embedding_dropout_p = kwargs.get('embedding_dropout',0.05)\n self._learning_rate = kwargs.get('learning_rate', 0.0001)\n \n # Hidden states processing mechanism is defined in another class\n self._hidden_states_processor = kwargs.get('hidden_states_processor',None)\n \n # Avoiding dropout if the model only has one layer\n if self._n_layers > 1:\n self._dropout = kwargs.get('dropout',0.05)\n else:\n self._dropout = 0\n \n # Shared hyperparameters across RSNN and SNN MLP \n self._weight_decay = kwargs.get('weight_decay',0.00)\n self._is_regression = kwargs.get('is_regression', False)\n \n # Additional 'this' entities \n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self._eps = 10e-8 # to avoid division by 0\n \n def build(self):\n \"\"\"\n Instantiates layers.\n \n \"\"\"\n \n if not self._is_mlp:\n \n # Embedding\n if self._embedding_dimensions is not None:\n self.embedding = nn.Embedding(self._input_size, self._embedding_dimensions, padding_idx = 0).to(self.device)\n self.input_rnn = self._embedding_dimensions\n self.embedding_dropout = nn.Dropout(p = self._embedding_dropout_p)\n else:\n self.input_rnn = self._input_size\n\n # Defining the RNN layers\n if self._cell_type.lower() == 'gru':\n self.rnn = nn.GRU(self.input_rnn, self._hidden_size, self._n_layers, \n dropout = self._dropout, batch_first = True, bidirectional = self._bidirectional).to(self.device)\n elif self._cell_type.lower() == 'lstm': \n self.rnn = nn.LSTM(input_size = self.input_rnn, hidden_size = self._hidden_size, \n num_layers = self._n_layers, dropout = self._dropout, batch_first = True, \n bidirectional = self._bidirectional).to(self.device)\n else:\n raise ValueError(\"Value of the parameter should be 'GRU' or 'LSTM'\")\n\n # Tuning batch normalisation and fully connected layer depending on directionality of RNN\n if self._bidirectional:\n self.directions = 2\n else:\n self.directions = 1\n\n if self._normalisation == 'batch':\n self.norm0 = nn.BatchNorm1d(self.input_size).to(self.device)\n self.norm1 = nn.BatchNorm1d(self.hidden_size*self.directions).to(self.device)\n elif self._normalisation == 'layer':\n self.norm0 = nn.LayerNorm(self.input_size).to(self.device)\n self.norm1 = nn.LayerNorm(self.hidden_size*self.directions).to(self.device)\n elif self._normalisation is None:\n pass\n else:\n raise NotImplementedError('Normalisation should be batch, layer or None.')\n \n # Builds hidden state processing mechanism\n if self._hidden_states_processor is not None:\n if self._hidden_states_processor == 'attention':\n self._hidden_states_processor = SelfAttention()\n elif self._hidden_states_processor == 'internal_processing':\n self._hidden_states_processor = InternalProcessing()\n self._hidden_states_processor.build()\n print('Built RSNN model!')\n \n else:\n # Layers, dropout and non-linear activation functions are instantiated\n self.mlp = nn.Sequential(nn.Linear(self._input_size, self._hidden_size),\n nn.Dropout(p = 0.05),\n nn.LeakyReLU(),\n nn.Linear(self._hidden_size,self._output_size),\n nn.Dropout(p = 0.05),\n nn.LeakyReLU()).to(self.device)\n \n print('Built SNN MLP model!')\n \n if self._dist_fn != 'cos':\n # Linear layer after distance estimation\n self.dist_fc = nn.Linear(self._hidden_size*self.directions,1).to(self.device)\n\n # Weight initialisation\n if self._initialisation_process is not None:\n self.initialise_weights()\n \n # Get params and register optimiser\n info, params = self.get_model_params()\n \n self.optimiser = optim.Adam(params, lr = self._learning_rate, weight_decay = self._weight_decay)\n\n def forward_once(self, inputs, inputs_lens = None):\n \"\"\"\n Performs the forward pass for the arms of the Siamese Neural Network.\n \n : inputs (torch.Tensor):\n : inputs_lens (torch.Tensor, optional):\n \n \"\"\"\n # Determination of batch size\n batch_size = inputs.size(0)\n \n # Initialisation of hidden states\n h0 = self.initialise_hidden(batch_size)\n \n if self._embedding_dimensions is not None:\n embedded_inputs = self.embedding(inputs)\n inputs = self.embedding_dropout(embedded_inputs)\n \n if self._normalisation is not None: \n inputs = self.norm0(inputs)\n \n inputs_packed = tnnur.pack_padded_sequence(inputs, inputs_lens, batch_first = True, enforce_sorted = False)\n \n # RNN cell type\n if self._cell_type.lower() == 'lstm':\n output_packed, hidden = self.rnn(inputs_packed, h0)\n elif self._cell_type.lower() == 'gru':\n output_packed, hidden = self.rnn(inputs_packed)\n \n output, output_lens = tnnur.pad_packed_sequence(output_packed, batch_first = True, total_length = self._input_size)\n \n if self._hidden_states_processor is not None:\n # Applies relevant processing to last hidden states\n ht = self._hidden_states_processor(output)\n else:\n # Extract only last hidden state from last BiLSTM layer\n output_fw = output[:,-1,0:self._hidden_size]\n output_bw = output[:,0,self._hidden_size:]\n\n ht = torch.cat((output_fw, output_bw),-1)\n \n if self._normalisation is not None:\n ht = self.norm1(ht)\n \n return ht\n\n def forward(self, inputs1, inputs2, inputs1_lens = None, inputs2_lens = None, labels = None, predict = False): \n \"\"\"\n Performs the computation of the last hidden states of the two inputs in parallel.\n \n : inputs1 (torch.Tensor): tokenised input from the left-hand arm of the Siamese Neural Network\n : inputs2 (torch.Tensor): tokenised input form the right-hand arm of the Siamese Neural Network\n : inputs1_lens (np.array, optional): \n : inputs2_lens (np.array, optional): \n : labels (torch.Tensor, optional): class labels\n : predict (bool): enables display of performance metrics\n \n \"\"\"\n \n if self._is_mlp:\n output1 = self.mlp(inputs1.float())\n output2 = self.mlp(inputs2.float())\n else: \n output1 = self.forward_once(inputs1, inputs1_lens)\n output2 = self.forward_once(inputs2, inputs2_lens)\n \n # Energy function\n similarity = self.distance_layer(output1, output2, self._dist_fn)\n\n # Evaluation metrics\n if predict:\n metrics_calculator = CalculateMetrics(similarity, labels, is_regression = self._is_regression)\n metrics = metrics_calculator()\n \n return metrics, similarity\n else:\n return similarity\n \n def distance_layer(self, output1, output2, distance):\n \"\"\"\n Energy function. Estimates the distance between the two outputs of the Siamese Neural Network according to a given distance metric.\n \n : output1 (torch.Tensor):\n : output2 (torch.Tensor):\n : distance (str): metric to calculate the distance between outputs.\n \n \"\"\"\n \n # Check definition in http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n # Redefined with L1 as per http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf\n if self._loss == 'contrastive loss':\n distance = 'l1'\n \n try:\n assert distance in ['cos','l1','l2']\n except:\n print('Similarity metric must be cosine, L1 or L2 distances')\n \n if distance == 'cos':\n distance = F.cosine_similarity(output1, output2, dim = -1, eps = self._eps)\n elif distance == 'l1':\n distance = self.dist_fc(torch.abs(output1 - output2)).squeeze(1)\n elif distance == 'l2':\n distance = self.dist_fc(torch.abs(output1 - output2) ** 2).squeeze(1)\n \n if self._loss != 'contrastive loss':\n # Passing the distance vector through a similarity function to squish it between 0 and 1\n if self._similarity_fn == 'sigmoid':\n distances = torch.sigmoid(distance)\n elif self._similarity_fn == 'exp':\n distances = torch.exp(-torch.abs(distance))\n elif self._similarity_fn == 'clamp':\n distances = torch.clamp(distance, min = 0.0)\n elif self._similarity_fn is None:\n distances = distance\n \n return distances\n \n def get_loss(self, outputs, labels):\n \"\"\"\n Computes the specified loss function.\n \n : outputs (torch.Tensor):\n : labels (torch.Tensor):\n \n \"\"\"\n try:\n assert self._loss in ['mse','mae','l1','l2','huber','logcosh','bce','contrastive'], 'Specify correct loss function'\n except AssertionError as msg:\n sys.exit(msg)\n \n if self._loss == 'mse' or self._loss == 'l2':\n # L2 loss function\n self.criterion = lambda x,y: torch.pow(x - y,2)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mse':\n loss = torch.mean(loss)\n elif self._loss == 'l2':\n loss = torch.sum(loss)\n \n elif self._loss == 'mae' or self._loss == 'l1':\n # L1 loss function\n self.criterion = lambda x,y: torch.abs(x - y)\n loss = self.criterion(outputs, labels)\n \n # Adding up the losses (L1 loss) or meaning the losses (MAE loss)\n # of all batch instances\n if self._loss == 'mae':\n loss = torch.mean(loss)\n elif self._loss == 'l1':\n loss = torch.sum(loss)\n \n elif self._loss == 'huber':\n # Huber loss function\n self.criterion = torch.nn.SmoothL1Loss()\n loss = self.criterion(outputs.float(), labels.float())\n \n # Adding up the losses of all batch instances\n loss = torch.mean(loss)\n \n elif self._loss == 'logcosh':\n # Log-cosh loss function\n loss = torch.log(torch.cosh(outputs.float() - labels.float()))\n \n # Adding up the losses of all batch instances\n loss = torch.sum(loss) \n \n elif self._loss == 'bce':\n if self._dist_fn == 'cos':\n self.criterion = nn.BCEWithLogitsLoss()\n else:\n self.criterion = nn.BCELoss()\n loss = self.criterion(outputs.float(), labels.float())\n \n elif self._loss == 'contrastive':\n margin = 1\n loss = torch.sum((1-labels) * torch.pow(outputs,2)+ labels * torch.pow(torch.clamp(margin - outputs, min = 0.0),2))\n\n return loss\n\n def get_model_params(self):\n \"\"\"\n \n \"\"\"\n \n params = []\n total_size = 0\n \n def multiply_iter(parameter_list):\n \"\"\"\n \n : parameter_list ():\n \"\"\"\n \n out = 1\n for parameter in parameter_list:\n out *= parameter\n return out\n\n for parameter in self.parameters():\n if parameter.requires_grad:\n params.append(parameter)\n total_size += multiply_iter(parameter.size())\n\n return '{}\\nparam size: {:,}\\n'.format(self, total_size), params\n \n def initialise_hidden(self, batch_size):\n \"\"\"\n Initialisation of hidden states and cell states of LSTM to zero.\n Creation of two new tensors with sizes n_layers x batch_size x n_hidden.\n \n : batch_size (int): specified size of the batch\n \n \"\"\"\n \n weight = next(self.parameters()).data\n \n hidden = (weight.new(self._n_layers*self.directions, batch_size, self._hidden_size).zero_().to(self.device),\n weight.new(self._n_layers*self.directions, batch_size, self._hidden_size).zero_().to(self.device))\n \n return hidden \n \n def initialise_weights(self):\n \"\"\"\n Initialisation of weights and biases for the embedding layer and the RNN layers.\n \n \"\"\" \n \n def initialise_process(param):\n \n \"\"\"\n Initialises weights of a given parameter following either Xavier or Kaiming uniform or normal processes.\n \n : param (torch.Tensor):\n \n \"\"\"\n \n if self._initialisation_process == 'xavier_uniform':\n tnni.xavier_uniform_(param.data)\n elif self._initialisation_process == 'xavier_normal':\n tnni.xavier_normal_(param.data)\n elif self._initialisation_process == 'kaiming_uniform':\n tnni.kaiming_uniform_(param.data)\n elif self._initialisation_process == 'kaiming_normal':\n tnni.kaiming_normal_(param.data)\n \n if self._initialisation_process is not None:\n for m in self.modules():\n # Embedding\n if type(m) is nn.Embedding:\n tnni.normal_(self.embedding.weight)\n # RNN\n elif type(m) in [nn.GRU, nn.LSTM, nn.RNN]: \n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n initialise_process(param)\n #torch.nn.init.kaiming_normal_(param.data)\n elif 'weight_hh' in name:\n tnni.orthogonal_(param.data)\n elif 'bias' in name:\n # Bias initialised with zero will get the bias from\n # the forget gate\n param.data.fill_(0.0)\n param.data[self._hidden_size:self.directions*self._hidden_size].fill_(1.0)\n # Attention linear layer\n elif type(m) is nn.Linear:\n for name, param in m.named_parameters():\n if 'weight' in name:\n initialise_process(param.data)\n elif 'bias' in name:\n param.data.normal_()\n\n\"\"\"\nImplements attention as described in 'Indentifying Structure-Property Relationships through SMILES Syntax \nAnalysis with Self-Attention Mechanism'\n\n\"\"\"\n \nclass SelfAttention(nn.Module): \n def __init__(self, **kwargs):\n \"\"\"\n Initialiser.\n \n : expansion_size (int): adjustable hyperparameter. Intermediate dimension to yield attention weights\n : hidden_size (int): hidden units after concatenating the hidden units for both directions\n : attention_layers (int):\n : seqlen (int): length of the padded SMILES string\n \n \"\"\"\n \n super(SelfAttention, self).__init__()\n \n set_seed()\n\n self._hidden_size = kwargs.get('hidden_size',128)\n self._expansion_size = kwargs.get('expansion_size',1024)\n self._attention_layers = kwargs.get('attention_layers',512)\n self._activation_fn = kwargs.get('activation_fn','leaky ReLU')\n self._seqlen = kwargs.get('seqlen',150)\n \n # Device agnostic\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n def build(self):\n \"\"\"\n Generates matrixes and layers to implement internal processing.\n \n \"\"\"\n \n # Defining the layers\n self.w1 = nn.Linear(self._hidden_size*2, self._expansion_size, bias = False).to(self.device)\n self.tanh = nn.Tanh()\n self.w2 = nn.Linear(self._expansion_size, self._attention_layers, bias = False).to(self.device)\n self.softmax = nn.Softmax(dim = 2)\n self.fc = nn.Linear(self._attention_layers, 1).to(self.device)\n \n if self._activation_fn == 'tanh' or isinstance(self._activation_fn, torch.nn.modules.activation.Tanh):\n self._activation_fn = nn.Tanh()\n elif self._activation_fn == 'sigmoid' or isinstance(self._activation_fn, torch.nn.modules.activation.Sigmoid):\n self._activation_fn = nn.Sigmoid()\n elif self._activation_fn == 'leaky ReLU' or isinstance(self._activation_fn, torch.nn.modules.activation.LeakyReLU):\n self._activation_fn = nn.LeakyReLU()\n else:\n raise NotImplementedError('Non-linear activation function must be \"tanh\", \"sigmoid\" or \"leaky ReLU\"')\n \n # Passing it onto the relevant device\n self._activation_fn = self._activation_fn.to(self.device)\n \n def forward(self, output):\n \"\"\"\n Forward pass of the bidirectional hidden states.\n \n : output (torch.Tensor): matrix with hidden states and cell states\n \n \"\"\"\n \n hidden_states = self.extract_hidden_states(output)\n \n # Obtaining the attention weights\n weighted_states = self.w1(hidden_states)\n activated_states = self.tanh(weighted_states)\n score_weights = self.w2(activated_states)\n attention_weights = self.softmax(score_weights)\n \n # Applying attention to the matrix with hidden states\n attentional_vector = torch.bmm(torch.transpose(attention_weights,2,1),hidden_states) \n attentional_vector = self.fc(torch.transpose(attentional_vector,2,1)).squeeze(2)\n attentional_vector = self._activation_fn(attentional_vector)\n \n return attentional_vector\n \n def extract_hidden_states(self, output):\n \"\"\"\n Extracts last hidden states from both directions.\n \n : output (torch.Tensor): matrix with hidden states and cell states\n \n \"\"\"\n \n # Extracting the forward and backward hidden states from the last BiLSTM layer\n # output (batch_size, sequence length, 2 * hidden dim)\n output_fw = output[:,:,0:self._hidden_size]\n output_bw = output[:,:,self._hidden_size:]\n \n hidden_states = torch.cat((output_fw, output_bw),-1)\n \n return hidden_states\n \n\"\"\"\nImplements internal processing (matrices with weights initialised N~(0,1))\n\n\"\"\"\n \nclass InternalProcessing(nn.Module):\n def __init__(self, **kwargs):\n \"\"\"\n \n : expansion_size (int):\n : hidden_size (int):\n \n \"\"\"\n \n super(InternalProcessing, self).__init__()\n \n set_seed()\n \n self._hidden_size = kwargs.get('hidden_size',128)\n self._expansion_size = kwargs.get('expansion_size',128)\n self._activation_fn = kwargs.get('activation_fn','sigmoid')\n self._seqlen = kwargs.get('seqlen',150)\n \n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n \n def build(self, weight = 0.5):\n \"\"\"\n Generates matrixes and layers to implement internal processing.\n \n : batch_size (int):\n \n \"\"\"\n \n self.weight = weight\n \n # Defining weighting matrixes\n self.processing_fw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_bw = torch.randn((self._hidden_size, self._expansion_size), requires_grad = True).to(self.device)\n self.processing_last_ht = torch.randn((self._hidden_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n # These will only be applied to the intermediate hidden states\n self.linear_fw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n self.linear_bw = nn.Linear(self._seqlen - 1, 1).to(self.device)\n \n self.compression = torch.randn((self._expansion_size*2, self._hidden_size*2), requires_grad = True).to(self.device)\n \n if self._activation_fn == 'tanh' or isinstance(self._activation_fn, torch.nn.modules.activation.Tanh):\n self._activation_fn = nn.Tanh()\n elif self._activation_fn == 'sigmoid' or isinstance(self._activation_fn, torch.nn.modules.activation.Sigmoid):\n self._activation_fn = nn.Sigmoid()\n elif self._activation_fn == 'leaky ReLU' or isinstance(self._activation_fn, torch.nn.modules.activation.LeakyReLU):\n self._activation_fn = nn.LeakyReLU()\n else:\n raise ValueError('Non-linear activation function must be \"tanh\", \"sigmoid\" or \"leaky ReLU\"')\n \n # Passing it onto the relevant device\n self._activation_fn = self._activation_fn.to(self.device)\n \n def forward(self, output):\n \"\"\"\n Forward pass of the bidirectional hidden states.\n \n : output (torch.Tensor): matrix with hidden states and cell states\n \n \"\"\"\n \n last_ht, output_fw_intermediate, output_bw_intermediate = self.extract_hidden_states(output)\n \n # Intermediate hidden state internal processing\n output_reduced_fw = self.implement_processing(output_fw_intermediate, self.processing_fw)\n output_reduced_bw = self.implement_processing(output_bw_intermediate, self.processing_bw, forward = False)\n \n # Concatenation of intermediate hidden state outputs\n output_reduced = torch.cat((output_reduced_fw, output_reduced_bw),-1)\n \n # Reduction of concatenated outputs dimension to match last hidden state\n intermediate_ht = torch.matmul(output_reduced, self.compression)\n \n # Last hidden state internal processing\n last_ht = torch.matmul(last_ht, self.processing_last_ht)\n last_ht = (1-torch.tensor(self.weight))*last_ht\n \n # Weighted sum of hidden states\n ht = last_ht.add(intermediate_ht)\n \n return ht \n \n def extract_hidden_states(self, output):\n \"\"\"\n Extracts intermediate and last hidden states\n \n : output (torch.Tensor):\n \n \"\"\"\n # Intermediate hidden states\n output_fw_intermediate = output[:,:-1,0:self._hidden_size]\n output_bw_intermediate = output[:,1:,self._hidden_size:] \n \n # Last hidden states\n output_fw = output[:,-1,0:self._hidden_size]\n output_bw = output[:,0,self._hidden_size:]\n last_ht = torch.cat((output_fw, output_bw), -1)\n \n return last_ht, output_fw_intermediate, output_bw_intermediate\n \n def implement_processing(self, output_intermediate, processing_matrix, forward = True):\n \"\"\"\n Carries out internal processing for each hidden state output direction.\n \n : output_intermediate (torch.Tensor): unidirectional intermediate hidden states\n : processing_matrix (torch.Tensor): matrix with weights ~ N(0,1)\n : forward (bool): toggle to regulate output direction\n \n \"\"\"\n \n # Attention implementation\n output_intermediate = torch.matmul(output_intermediate, processing_matrix)\n output_intermediate = torch.transpose(output_intermediate,2,1)\n \n # Linear layer output reduction \n # from [batch, hidden_size, input_size - 1] to [batch, hidden_size,1]\n # different linear layers required to ensure good performance\n if forward:\n output_reduced = self.linear_fw(output_intermediate).squeeze(2)\n else:\n output_reduced = self.linear_bw(output_intermediate).squeeze(2)\n \n output_reduced = self._activation_fn(output_reduced)\n \n return output_reduced" ]
[ [ "torch.nn.Softmax", "torch.mean", "torch.transpose", "torch.abs", "torch.cat", "torch.nn.GRU", "torch.sum", "torch.nn.Embedding", "torch.nn.utils.rnn.pad_packed_sequence", "torch.nn.BCEWithLogitsLoss", "torch.cuda.is_available", "torch.pow", "torch.nn.Dropout", "torch.randn", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Sigmoid", "torch.tensor", "torch.optim.Adam", "torch.nn.SmoothL1Loss", "torch.sigmoid", "torch.nn.BatchNorm1d", "torch.nn.init.xavier_normal_", "torch.nn.BCELoss", "torch.nn.Linear", "torch.nn.functional.cosine_similarity", "torch.nn.init.normal_", "torch.nn.LeakyReLU", "torch.nn.LSTM", "torch.nn.Tanh", "torch.nn.init.kaiming_uniform_", "torch.matmul", "torch.nn.LayerNorm", "torch.nn.init.orthogonal_", "torch.nn.init.xavier_uniform_", "torch.clamp", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Leonui/BRECQ
[ "e455d62e93c70351961f8991c913b59435bd165f" ]
[ "linklink/dist_helper.py" ]
[ "import os\nimport torch\nimport pickle\nimport numpy as np\nimport linklink as link\n\n\ndef save_file(dict, name):\n if link.get_local_rank() == 0:\n torch.save(dict, name)\n\n\ndef dist_finalize():\n link.finalize()\n\n\nclass AllReduce(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input):\n output = torch.zeros_like(input)\n output.copy_(input)\n link.allreduce(output)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n in_grad = torch.zeros_like(grad_output)\n in_grad.copy_(grad_output)\n link.allreduce(in_grad)\n return in_grad\n\n\ndef allaverage(tensor):\n tensor.data /= link.get_world_size()\n link.allreduce(tensor.data)\n return tensor\n\n\ndef allaverage_autograd(tensor):\n if tensor.is_cuda is True:\n tensor /= link.get_world_size()\n tensor = AllReduce().apply(tensor)\n return tensor\n\n\ndef allreduce(tensor):\n link.allreduce(tensor.data)\n\n\ndef link_dist(func):\n\n def wrapper(*args, **kwargs):\n dist_init()\n func(*args, **kwargs)\n dist_finalize()\n\n return wrapper\n\n\ndef dist_init(method='slurm', device_id=0):\n if method == 'slurm':\n proc_id = int(os.environ['SLURM_PROCID'])\n # ntasks = int(os.environ['SLURM_NTASKS'])\n # node_list = os.environ['SLURM_NODELIST']\n num_gpus = torch.cuda.device_count()\n torch.cuda.set_device(proc_id % num_gpus)\n elif method == 'normal':\n torch.cuda.set_device(device_id)\n link.initialize(backend='nccl', job_envrion=method)\n world_size = link.get_world_size()\n rank = link.get_rank()\n\n return rank, world_size\n\n\ndef dist_finalize():\n link.finalize()\n\n\ndef simple_group_split(world_size, rank, num_groups):\n groups = []\n rank_list = np.split(np.arange(world_size), num_groups)\n rank_list = [list(map(int, x)) for x in rank_list]\n for i in range(num_groups):\n groups.append(link.new_group(rank_list[i]))\n group_size = world_size // num_groups\n return groups[rank//group_size]\n\n\nclass DistModule(torch.nn.Module):\n def __init__(self, module, sync=False):\n super(DistModule, self).__init__()\n self.module = module\n self.broadcast_params()\n\n self.sync = sync\n if not sync:\n self._grad_accs = []\n self._register_hooks()\n\n def forward(self, *inputs, **kwargs):\n return self.module(*inputs, **kwargs)\n\n def _register_hooks(self):\n for i, (name, p) in enumerate(self.named_parameters()):\n if p.requires_grad:\n p_tmp = p.expand_as(p)\n grad_acc = p_tmp.grad_fn.next_functions[0][0]\n grad_acc.register_hook(self._make_hook(name, p, i))\n self._grad_accs.append(grad_acc)\n\n def _make_hook(self, name, p, i):\n def hook(*ignore):\n link.allreduce_async(name, p.grad.data)\n return hook\n\n def sync_gradients(self):\n \"\"\" average gradients \"\"\"\n if self.sync and link.get_world_size() > 1:\n for name, param in self.module.named_parameters():\n if param.requires_grad and param.grad is not None:\n link.allreduce(param.grad.data)\n else:\n link.synchronize()\n\n def broadcast_params(self):\n \"\"\" broadcast model parameters \"\"\"\n for name, param in self.module.state_dict().items():\n link.broadcast(param, 0)\n\n\ndef _serialize_to_tensor(data, group=None):\n # backend = link.get_backend(group)\n # assert backend in [\"gloo\", \"nccl\"]\n # device = torch.device(\"cpu\" if backend == \"gloo\" else \"cuda\")\n device = torch.cuda.current_device()\n\n buffer = pickle.dumps(data)\n if len(buffer) > 1024 ** 3:\n import logging\n logger = logging.getLogger('global')\n logger.warning(\n \"Rank {} trying to all-gather {:.2f} GB of data on device {}\".format(\n link.get_rank(), len(buffer) / (1024 ** 3), device\n )\n )\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(device=device)\n return tensor\n\n\ndef broadcast_object(obj, group=None):\n \"\"\"make suare obj is picklable\n \"\"\"\n if link.get_world_size() == 1:\n return obj\n\n serialized_tensor = _serialize_to_tensor(obj).cuda()\n numel = torch.IntTensor([serialized_tensor.numel()]).cuda()\n link.broadcast(numel, 0)\n # serialized_tensor from storage is not resizable\n serialized_tensor = serialized_tensor.clone()\n serialized_tensor.resize_(numel)\n link.broadcast(serialized_tensor, 0)\n serialized_bytes = serialized_tensor.cpu().numpy().tobytes()\n deserialized_obj = pickle.loads(serialized_bytes)\n return deserialized_obj\n" ]
[ [ "torch.ByteTensor", "torch.cuda.set_device", "torch.cuda.current_device", "numpy.arange", "torch.zeros_like", "torch.cuda.device_count", "torch.ByteStorage.from_buffer", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MezerLab/age-pred-r1
[ "5e72fa3ede6306f779a262ded4e2c10d932df038" ]
[ "src/exploratory/qc_ringing.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 10 13:43:27 2019\n\n@author: asier.erramuzpe\n\"\"\"\n### RINGING DETECTION MODULE\n# file1 = OK, file2 = RINGING, file2 = SLIGHT RINGING, file3 = SLIGHT FRONTAL MOVE\n\nfile1 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s008/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\nfile2 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s007/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\nfile3 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s046/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\nfile4 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s064/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\nfile5 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s078/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\nfile6 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s106/mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'\n\nfile1 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s008/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\nfile2 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s007/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\nfile3 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s046/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\nfile4 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s064/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\nfile5 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s078/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\nfile6 = '/ems/elsc-labs/mezer-a/Mezer-Lab/analysis/reading_stanford/s106/mrQ_ver2/OutPutFiles_1/BrainMaps/TV_map.nii.gz'\n\n\ndef get_axial(file):\n import nibabel as nib\n \n try:\n file_data = nib.load(file).get_data()\n except:\n print('File {} does not exist'.format(file)) \n \n x, y, z = file_data.shape\n file_axial = file_data[:, :, z//2]\n \n return file_axial\n\ndef plot_spectrum(img):\n from matplotlib.colors import LogNorm\n from scipy import fftpack\n \n im_fft = fftpack.fft2(img)\n # A logarithmic colormap\n plt.figure()\n plt.imshow(np.abs(im_fft), norm=LogNorm(vmin=5))\n plt.colorbar()\n plt.title('Fourier transform')\n\n\nplot_spectrum(get_axial(file1))\nplot_spectrum(get_axial(file2))\nplot_spectrum(get_axial(file3))\nplot_spectrum(get_axial(file4))\nplot_spectrum(get_axial(file5))\n\n\nplt.imshow(get_axial(file1))\nplt.imshow(get_axial(file2))\nplt.imshow(get_axial(file3))\nplt.imshow(get_axial(file4))\nplt.imshow(get_axial(file5))\nplt.imshow(get_axial(file6))\n\nplot_fft2_power(file1)\nplot_fft2_power(file2)\nplot_fft2_power(file3)\nplot_fft2_power(file4)\nplot_fft2_power(file5)\nplot_fft2_power(file6)\n\n\npower_sum(file1)\npower_sum(file2)\npower_sum(file3)\npower_sum(file4)\npower_sum(file5)\npower_sum(file6)\n\n\ndef power_sum(file):\n \n from scipy import fftpack\n import numpy as np\n import pylab as py\n \n image = get_axial(file)\n # Take the fourier transform of the image.\n F1 = fftpack.fft2(image)\n # Now shift the quadrants around so that low spatial frequencies are in\n # the center of the 2D fourier transformed image.\n F2 = fftpack.fftshift( F1 )\n # Calculate a 2D power spectrum\n psd2D = np.abs( F2 )**2\n # Calculate the azimuthally averaged 1D power spectrum\n psd1D = azimuthalAverage(psd2D)\n return np.sum(psd1D)\n\n\n\ndef plot_fft2_power(file):\n \n from scipy import fftpack\n import numpy as np\n import pylab as py\n \n image = get_axial(file)\n # Take the fourier transform of the image.\n F1 = fftpack.fft2(image)\n # Now shift the quadrants around so that low spatial frequencies are in\n # the center of the 2D fourier transformed image.\n F2 = fftpack.fftshift( F1 )\n # Calculate a 2D power spectrum\n psd2D = np.abs( F2 )**2\n # Calculate the azimuthally averaged 1D power spectrum\n psd1D = azimuthalAverage(psd2D)\n # Now plot up both\n# py.figure(1)\n# py.clf()\n# py.imshow( np.log10( image ), cmap=py.cm.Greys)\n \n py.figure(2)\n py.clf()\n py.imshow( np.log10( psd2D ))\n \n# py.figure(3)\n# py.clf()\n# py.semilogy( psd1D )\n# py.xlabel('Spatial Frequency')\n# py.ylabel('Power Spectrum')\n# py.title(str(np.sum(psd1D)))\n py.show()\n\n\ndef azimuthalAverage(image, center=None):\n \"\"\"\n Calculate the azimuthally averaged radial profile.\n image - The 2D image\n center - The [x,y] pixel coordinates used as the center. The default is \n None, which then uses the center of the image (including \n fracitonal pixels).\n \n \"\"\"\n # Calculate the indices from the image\n y, x = np.indices(image.shape)\n\n if not center:\n center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])\n\n r = np.hypot(x - center[0], y - center[1])\n\n # Get sorted radii\n ind = np.argsort(r.flat)\n r_sorted = r.flat[ind]\n i_sorted = image.flat[ind]\n\n # Get the integer part of the radii (bin size = 1)\n r_int = r_sorted.astype(int)\n\n # Find all pixels that fall within each radial bin.\n deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented\n rind = np.where(deltar)[0] # location of changed radius\n nr = rind[1:] - rind[:-1] # number of radius bin\n \n # Cumulative sum to figure out sums for each radius bin\n csim = np.cumsum(i_sorted, dtype=float)\n tbin = csim[rind[1:]] - csim[rind[:-1]]\n\n radial_prof = tbin / nr\n\n return radial_prof\n\n\n\"\"\"\nQC reports\n\"\"\"\nimport os\nfrom os.path import join as opj\n\nimport numpy as np\nimport scipy.io as sio\nfrom src.visualization.visualize import multipage\nfrom dotenv import find_dotenv, load_dotenv\ndotenv_path = find_dotenv()\nload_dotenv(dotenv_path)\nanalysis_data_path = os.environ.get(\"ANALYSIS_DATA_PATH\")\n\n\ndef create_qc_report_mrq(dataset, file_path):\n \"\"\"\n Creates a visual report with axial middle slices.\n \n dataset = dataset to choose from\n file_paths = dictionary {file: path_to_file inside mrQver2 folder}\n as many files as wanted\n \"\"\"\n \n input_path = opj(analysis_data_path,\n dataset)\n figures = []\n for sub_idx, sub in enumerate(sorted(os.listdir(input_path))):\n print(sub)\n \n for file, file_path in file_paths.items():\n target_file = opj(input_path, sub, file_path)\n if os.path.exists(target_file):\n axial_slice = get_axial(target_file) \n # make figure\n plt.imshow(axial_slice, cmap='gray')\n plt.clim(0, 4)\n plt.colorbar()\n ax = plt.title('Subject {}. File {}'.format(sub, file))\n fig = ax.get_figure()\n figures.append(fig)\n plt.close()\n \n output_folder = opj('./reports',\n 'qc')\n \n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n \n multipage(opj(output_folder,\n 'report_' + dataset + '.pdf'),\n figures,\n dpi=250)\n\n\n\n# possible datasets = kalanit_stanford ms_stanford_run1 stanford_2 reading_stanford anorexia_stanford gotlib_stanford amblyopia_stanford\ndatasets = ['kalanit_stanford', 'ms_stanford_run1', 'stanford_2',\n 'reading_stanford', 'anorexia_stanford',\n 'gotlib_stanford', 'amblyopia_stanford']\n\nfile_paths = {'T1': 'mrQ_ver2/OutPutFiles_1/BrainMaps/T1_map_Wlin.nii.gz'} \n\nfor dataset in datasets: \n create_qc_report_mrq(dataset, file_paths)\n" ]
[ [ "matplotlib.colors.LogNorm", "numpy.abs", "numpy.indices", "numpy.cumsum", "scipy.fftpack.fft2", "numpy.log10", "numpy.argsort", "numpy.where", "numpy.sum", "numpy.hypot", "scipy.fftpack.fftshift" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
YongyiTang92/tensorpack
[ "49675590da8a39c649b5f0f5ba522a22b90e2d69" ]
[ "examples/FasterRCNN/train.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: train.py\n\nimport argparse\nimport itertools\nimport numpy as np\nimport os\nimport shutil\nimport cv2\nimport six\nassert six.PY3, \"FasterRCNN requires Python 3!\"\nimport tensorflow as tf\nimport tqdm\n\nimport tensorpack.utils.viz as tpviz\nfrom tensorpack import *\nfrom tensorpack.tfutils import optimizer\nfrom tensorpack.tfutils.common import get_tf_version_tuple\nfrom tensorpack.tfutils.summary import add_moving_summary\n\nimport model_frcnn\nimport model_mrcnn\nfrom basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone\nfrom dataset import DetectionDataset\nfrom config import finalize_configs, config as cfg\nfrom data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow\nfrom eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback\nfrom model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align\nfrom model_cascade import CascadeRCNNHead\nfrom model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses\nfrom model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets\nfrom model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head\nfrom model_rpn import generate_rpn_proposals, rpn_head, rpn_losses\nfrom viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall\n\ntry:\n import horovod.tensorflow as hvd\nexcept ImportError:\n pass\n\n\nclass DetectionModel(ModelDesc):\n def preprocess(self, image):\n image = tf.expand_dims(image, 0)\n image = image_preprocess(image, bgr=True)\n return tf.transpose(image, [0, 3, 1, 2])\n\n @property\n def training(self):\n return get_current_tower_context().is_training\n\n def optimizer(self):\n lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)\n tf.summary.scalar('learning_rate-summary', lr)\n\n # The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.\n lr = lr / 8.\n opt = tf.train.MomentumOptimizer(lr, 0.9)\n if cfg.TRAIN.NUM_GPUS < 8:\n opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)\n return opt\n\n def get_inference_tensor_names(self):\n \"\"\"\n Returns two lists of tensor names to be used to create an inference callable.\n\n Returns:\n [str]: input names\n [str]: output names\n \"\"\"\n out = ['output/boxes', 'output/scores', 'output/labels']\n if cfg.MODE_MASK:\n out.append('output/masks')\n return ['image'], out\n\n def build_graph(self, *inputs):\n inputs = dict(zip(self.input_names, inputs))\n\n image = self.preprocess(inputs['image']) # 1CHW\n\n features = self.backbone(image)\n anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}\n proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?\n\n targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]\n head_losses = self.roi_heads(image, features, proposals, targets)\n\n if self.training:\n wd_cost = regularize_cost(\n '.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')\n total_cost = tf.add_n(\n rpn_losses + head_losses + [wd_cost], 'total_cost')\n add_moving_summary(total_cost, wd_cost)\n return total_cost\n\n\nclass ResNetC4Model(DetectionModel):\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image'),\n tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),\n tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def backbone(self, image):\n return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]\n\n def rpn(self, image, features, inputs):\n featuremap = features[0]\n rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)\n anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])\n anchors = anchors.narrow_to(featuremap)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox\n proposal_boxes, proposal_scores = generate_rpn_proposals(\n tf.reshape(pred_boxes_decoded, [-1, 4]),\n tf.reshape(rpn_label_logits, [-1]),\n image_shape2d,\n cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,\n cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)\n\n if self.training:\n losses = rpn_losses(\n anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n featuremap = features[0]\n\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n # sample proposal boxes in training\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n # The boxes to be used to crop RoIs.\n # Use all proposal boxes in inference\n\n boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)\n roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)\n\n feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7\n # Keep C5 feature to be shared with mask branch\n feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)\n\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,\n tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n # In training, mask branch shares the same C5 feature.\n fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 14,\n pad_border=False) # nfg x 1x14x14\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n\n if cfg.MODE_MASK:\n roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)\n feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])\n mask_logits = maskrcnn_upXconv_head(\n 'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\nclass ResNetFPNModel(DetectionModel):\n\n def inputs(self):\n ret = [\n tf.placeholder(tf.float32, (None, None, 3), 'image')]\n num_anchors = len(cfg.RPN.ANCHOR_RATIOS)\n for k in range(len(cfg.FPN.ANCHOR_STRIDES)):\n ret.extend([\n tf.placeholder(tf.int32, (None, None, num_anchors),\n 'anchor_labels_lvl{}'.format(k + 2)),\n tf.placeholder(tf.float32, (None, None, num_anchors, 4),\n 'anchor_boxes_lvl{}'.format(k + 2))])\n ret.extend([\n tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),\n tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0\n if cfg.MODE_MASK:\n ret.append(\n tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')\n ) # NR_GT x height x width\n return ret\n\n def slice_feature_and_anchors(self, p23456, anchors):\n for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):\n with tf.name_scope('FPN_slice_lvl{}'.format(i)):\n anchors[i] = anchors[i].narrow_to(p23456[i])\n\n def backbone(self, image):\n c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)\n p23456 = fpn_model('fpn', c2345)\n return p23456\n\n def rpn(self, image, features, inputs):\n assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)\n\n image_shape2d = tf.shape(image)[2:] # h,w\n all_anchors_fpn = get_all_anchors_fpn()\n multilevel_anchors = [RPNAnchors(\n all_anchors_fpn[i],\n inputs['anchor_labels_lvl{}'.format(i + 2)],\n inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]\n self.slice_feature_and_anchors(features, multilevel_anchors)\n\n # Multi-Level RPN Proposals\n rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))\n for pi in features]\n multilevel_label_logits = [k[0] for k in rpn_outputs]\n multilevel_box_logits = [k[1] for k in rpn_outputs]\n multilevel_pred_boxes = [anchor.decode_logits(logits)\n for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]\n\n proposal_boxes, proposal_scores = generate_fpn_proposals(\n multilevel_pred_boxes, multilevel_label_logits, image_shape2d)\n\n if self.training:\n losses = multilevel_rpn_losses(\n multilevel_anchors, multilevel_label_logits, multilevel_box_logits)\n else:\n losses = []\n\n return BoxProposals(proposal_boxes), losses\n\n def roi_heads(self, image, features, proposals, targets):\n image_shape2d = tf.shape(image)[2:] # h,w\n assert len(features) == 5, \"Features have to be P23456!\"\n gt_boxes, gt_labels, *_ = targets\n\n if self.training:\n proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)\n\n fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)\n if not cfg.FPN.CASCADE:\n roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)\n\n head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)\n fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(\n 'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)\n fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,\n gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))\n else:\n def roi_func(boxes):\n return multilevel_roi_align(features[:4], boxes, 7)\n\n fastrcnn_head = CascadeRCNNHead(\n proposals, roi_func, fastrcnn_head_func,\n (gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)\n\n if self.training:\n all_losses = fastrcnn_head.losses()\n\n if cfg.MODE_MASK:\n gt_masks = targets[2]\n # maskrcnn loss\n roi_feature_maskrcnn = multilevel_roi_align(\n features[:4], proposals.fg_boxes(), 14,\n name_scope='multilevel_roi_align_mask')\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n\n target_masks_for_fg = crop_and_resize(\n tf.expand_dims(gt_masks, 1),\n proposals.fg_boxes(),\n proposals.fg_inds_wrt_gt, 28,\n pad_border=False) # fg x 1x28x28\n target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')\n all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))\n return all_losses\n else:\n decoded_boxes = fastrcnn_head.decoded_output_boxes()\n decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')\n label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')\n final_boxes, final_scores, final_labels = fastrcnn_predictions(\n decoded_boxes, label_scores, name_scope='output')\n if cfg.MODE_MASK:\n # Cascade inference needs roi transform with refined boxes.\n roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)\n maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)\n mask_logits = maskrcnn_head_func(\n 'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28\n indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)\n final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28\n tf.sigmoid(final_mask_logits, name='output/masks')\n return []\n\n\ndef do_visualize(model, model_path, nr_visualize=100, output_dir='output'):\n \"\"\"\n Visualize some intermediate results (proposals, raw predictions) inside the pipeline.\n \"\"\"\n df = get_train_dataflow() # we don't visualize mask stuff\n df.reset_state()\n\n pred = OfflinePredictor(PredictConfig(\n model=model,\n session_init=get_model_loader(model_path),\n input_names=['image', 'gt_boxes', 'gt_labels'],\n output_names=[\n 'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),\n 'fastrcnn_all_scores',\n 'output/boxes',\n 'output/scores',\n 'output/labels',\n ]))\n\n if os.path.isdir(output_dir):\n shutil.rmtree(output_dir)\n utils.fs.mkdir_p(output_dir)\n with tqdm.tqdm(total=nr_visualize) as pbar:\n for idx, dp in itertools.islice(enumerate(df), nr_visualize):\n img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']\n\n rpn_boxes, rpn_scores, all_scores, \\\n final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)\n\n # draw groundtruth boxes\n gt_viz = draw_annotation(img, gt_boxes, gt_labels)\n # draw best proposals for each groundtruth, to show recall\n proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)\n # draw the scores for the above proposals\n score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])\n\n results = [DetectionResult(*args) for args in\n zip(final_boxes, final_scores, final_labels,\n [None] * len(final_labels))]\n final_viz = draw_final_outputs(img, results)\n\n viz = tpviz.stack_patches([\n gt_viz, proposal_viz,\n score_viz, final_viz], 2, 2)\n\n if os.environ.get('DISPLAY', None):\n tpviz.interactive_imshow(viz)\n cv2.imwrite(\"{}/{:03d}.png\".format(output_dir, idx), viz)\n pbar.update()\n\n\ndef do_evaluate(pred_config, output_file):\n num_gpu = cfg.TRAIN.NUM_GPUS\n graph_funcs = MultiTowerOfflinePredictor(\n pred_config, list(range(num_gpu))).get_predictors()\n\n for dataset in cfg.DATA.VAL:\n logger.info(\"Evaluating {} ...\".format(dataset))\n dataflows = [\n get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)\n for k in range(num_gpu)]\n all_results = multithread_predict_dataflow(dataflows, graph_funcs)\n output = output_file + '-' + dataset\n DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)\n\n\ndef do_predict(pred_func, input_file):\n img = cv2.imread(input_file, cv2.IMREAD_COLOR)\n results = predict_image(img, pred_func)\n final = draw_final_outputs(img, results)\n viz = np.concatenate((img, final), axis=1)\n cv2.imwrite(\"output.png\", viz)\n logger.info(\"Inference output written to output.png\")\n tpviz.interactive_imshow(viz)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')\n parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')\n parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')\n parser.add_argument('--evaluate', help=\"Run evaluation. \"\n \"This argument is the path to the output json evaluation file\")\n parser.add_argument('--predict', help=\"Run prediction on a given image. \"\n \"This argument is the path to the input image file\")\n parser.add_argument('--config', help=\"A list of KEY=VALUE to overwrite those defined in config.py\",\n nargs='+')\n\n if get_tf_version_tuple() < (1, 6):\n # https://github.com/tensorflow/tensorflow/issues/14657\n logger.warn(\"TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.\")\n\n args = parser.parse_args()\n if args.config:\n cfg.update_args(args.config)\n\n MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()\n DetectionDataset() # initialize the config with information from our dataset\n\n if args.visualize or args.evaluate or args.predict:\n if not tf.test.is_gpu_available():\n from tensorflow.python.framework import test_util\n assert test_util.IsMklEnabled(), \"Inference requires either GPU support or MKL support!\"\n assert args.load\n finalize_configs(is_training=False)\n\n if args.predict or args.visualize:\n cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS\n\n if args.visualize:\n do_visualize(MODEL, args.load)\n else:\n predcfg = PredictConfig(\n model=MODEL,\n session_init=get_model_loader(args.load),\n input_names=MODEL.get_inference_tensor_names()[0],\n output_names=MODEL.get_inference_tensor_names()[1])\n if args.predict:\n do_predict(OfflinePredictor(predcfg), args.predict)\n elif args.evaluate:\n assert args.evaluate.endswith('.json'), args.evaluate\n do_evaluate(predcfg, args.evaluate)\n else:\n is_horovod = cfg.TRAINER == 'horovod'\n if is_horovod:\n hvd.init()\n logger.info(\"Horovod Rank={}, Size={}\".format(hvd.rank(), hvd.size()))\n\n if not is_horovod or hvd.rank() == 0:\n logger.set_logger_dir(args.logdir, 'd')\n\n finalize_configs(is_training=True)\n stepnum = cfg.TRAIN.STEPS_PER_EPOCH\n\n # warmup is step based, lr is epoch based\n init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)\n warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]\n warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum\n lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]\n\n factor = 8. / cfg.TRAIN.NUM_GPUS\n for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):\n mult = 0.1 ** (idx + 1)\n lr_schedule.append(\n (steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))\n logger.info(\"Warm Up Schedule (steps, value): \" + str(warmup_schedule))\n logger.info(\"LR Schedule (epochs, value): \" + str(lr_schedule))\n train_dataflow = get_train_dataflow()\n # This is what's commonly referred to as \"epochs\"\n total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()\n logger.info(\"Total passes of the training set is: {:.5g}\".format(total_passes))\n\n callbacks = [\n PeriodicCallback(\n ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),\n every_k_epochs=20),\n # linear warmup\n ScheduledHyperParamSetter(\n 'learning_rate', warmup_schedule, interp='linear', step_based=True),\n ScheduledHyperParamSetter('learning_rate', lr_schedule),\n PeakMemoryTracker(),\n EstimatedTimeLeft(median=True),\n SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout\n ] + [\n EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)\n for dataset in cfg.DATA.VAL\n ]\n if not is_horovod:\n callbacks.append(GPUUtilizationTracker())\n\n if is_horovod and hvd.rank() > 0:\n session_init = None\n else:\n if args.load:\n session_init = get_model_loader(args.load)\n else:\n session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None\n\n traincfg = TrainConfig(\n model=MODEL,\n data=QueueInput(train_dataflow),\n callbacks=callbacks,\n steps_per_epoch=stepnum,\n max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,\n session_init=session_init,\n starting_epoch=cfg.TRAIN.STARTING_EPOCH\n )\n if is_horovod:\n trainer = HorovodTrainer(average=False)\n else:\n # nccl mode appears faster than cpu mode\n trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')\n launch_train_with_config(traincfg, trainer)\n" ]
[ [ "tensorflow.get_variable", "tensorflow.transpose", "tensorflow.constant", "tensorflow.gather_nd", "tensorflow.shape", "tensorflow.reshape", "tensorflow.sigmoid", "tensorflow.expand_dims", "tensorflow.placeholder", "tensorflow.squeeze", "numpy.concatenate", "tensorflow.cast", "tensorflow.train.MomentumOptimizer", "tensorflow.add_n", "tensorflow.test.is_gpu_available", "tensorflow.python.framework.test_util.IsMklEnabled", "tensorflow.summary.scalar", "tensorflow.size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
RyanStronge/AgileMTurk
[ "7f94173d3e9cf6e14081d9d624d26e1e1231ba4e" ]
[ "processing/processPolyline.py" ]
[ "import requests\nimport json\nfrom PIL import Image, ImageDraw\nimport numpy as np\nimport os\n#import cv2\nimport psycopg2\nimport time\n\nconn = psycopg2.connect(dbname=\"ddcdvtofrshbnj\", user=\"ntvhhmrhgzdmqh\", password=\"70f5719386ca8d7a4464e7ba903ff81ddbe1fe1d444071cc5ce4e1ad28059870\",\n host=\"ec2-54-247-89-181.eu-west-1.compute.amazonaws.com\", port=\"5432\")\ncur = conn.cursor()\n\n\ndef execute_command(command):\n try:\n cur.execute(command)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n return cur.fetchall()\n\n\ndef getcsvdataprocess():\n print(os.getcwd())\n print(os.listdir())\n now = time.strftime(\"%Y%m%d-%H%M%S\")\n timeName = \"img\"+str(now)\n os.mkdir(\"fullimgs/\"+timeName)\n os.mkdir(\"newimgs/\"+timeName)\n rows = execute_command(\"SELECT * FROM data\")\n for row in rows:\n name = \"img\"+str(row[0])\n file_name = \"fullimgs/\"+timeName+\"/\"+ name +\".jpg\"\n # row[0] is id: int\n # row[1] is url: str\n # row[2] is points: json\n\n img_data = requests.get(str(row[1])).content\n print(\"url: \"+str(row[1]))\n with open(file_name, 'wb') as handler:\n handler.write(img_data)\n jsonData = str(row[2])\n jsonObj = json.loads(jsonData)\n print(jsonObj)\n objName = jsonObj[\"name\"]\n yPoints = jsonObj[\"all_points_y\"]\n xPoints = jsonObj[\"all_points_x\"]\n\n yList = eval(str(yPoints))\n xList = eval(str(xPoints))\n\n if objName == \"polyline\":\n im = Image.open(file_name).convert(\"RGBA\")\n imArray = np.asarray(im)\n polygon = [None] * len(yList)\n for z in range(len(yList)):\n polygon[z] = (xList[z], yList[z])\n maskIm = Image.new(\n 'L', (imArray.shape[1], imArray.shape[0]), 0)\n ImageDraw.Draw(maskIm).polygon(polygon, outline=1, fill=1)\n mask = np.array(maskIm)\n newImArray = np.empty(imArray.shape, dtype='uint8')\n newImArray[:, :, :3] = imArray[:, :, :3]\n newImArray[:, :, 3] = mask*255\n newIm = Image.fromarray(newImArray, \"RGBA\")\n newIm.save(\"newimgs/\"+timeName+\"/\"+name + \".png\")\n\"\"\" im = cv2.imread(\"newimgs/\"+name+\"/\" +\n name+\".png\", cv2.IMREAD_UNCHANGED)\n \n y, x = im[:, :, 3].nonzero()\n minx = np.min(x)\n miny = np.min(y)\n maxx = np.max(x)\n maxy = np.max(y)\n\n cropImg = im[miny:maxy, minx:maxx]\n whiteCellsMask = np.logical_and(cropImg[:, :, 0] == 255, np.logical_and(\n cropImg[:, :, 1] == 255, cropImg[:, :, 2] == 255))\n cropImg[whiteCellsMask, :] = [255, 255, 255, 0]\n\n cv2.imwrite(\"newimgs/\"+name+\"/\" +\n name + \".png\", cropImg)\n cv2.waitKey(0) \"\"\"\n\n\ngetcsvdataprocess()\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Vevn/ENVISIoN
[ "d0e48a5ec38ed95375f632eafdc5814415f0f570" ]
[ "inviwo/modules/fermi/tests/unittests/scripts/brillouin_zone.py" ]
[ "#\n# ENVISIoN\n#\n# Copyright (c) 2020 Alexander Vevstad\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n##############################################################################################\nimport numpy as np\n\nfrom HDF5FermiSource import HDF5FermiSource\n\nprocess = HDF5FermiSource('fermi', 'fermi_source')\n\nmat4 = np.zeros((10, 10, 10))\nbasis = 0.2*np.eye(3)\n\nbrill = process.brillouin_zone(mat4, basis)\n\nlenx = int(brill.shape[0]/2)\nleny = int(brill.shape[1]/2)\nlenz = int(brill.shape[2]/2)\n\n# Basically just testing values around the brillouin zone border\n# might be a better way of testing all values inside/outside brillouin zone\nstatus = (\n brill.shape == (20, 20, 20) and\n # values inside brillouin zone are 0.0\n brill[lenx, leny, lenz] == 0.0 and\n # lower end\n brill[int(lenx*1/2) + 1, leny, lenz] == 0.0 and\n brill[lenx, int(leny*1/2) + 1, lenz] == 0.0 and\n brill[lenx, leny, int(lenz*1/2) + 1] == 0.0 and\n # upper end\n brill[int(lenx*3/2) - 1, leny, lenz] == 0.0 and\n brill[lenx, int(leny*3/2) - 1, lenz] == 0.0 and\n brill[lenx, leny, int(lenz*3/2) - 1] == 0.0 and\n\n # values outside brillouin zone are 1.0\n brill[0, 0, 0] == 1.0 and\n # lower end\n brill[int(lenx*1/2) - 1, leny, lenz] == 1.0 and\n brill[lenx, int(leny*1/2) - 1, lenz] == 1.0 and\n brill[lenx, leny, int(lenz*1/2) - 1] == 1.0 and\n # upper end\n brill[int(lenx*3/2) + 1, leny, lenz] == 1.0 and\n brill[lenx, int(leny*3/2) + 1, lenz] == 1.0 and\n brill[lenx, leny, int(lenz*3/2) + 1] == 1.0\n)\n" ]
[ [ "numpy.eye", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maroacc/padel-video-classification
[ "3fcbe360fb84e7840634dadac76676098a8894a4" ]
[ "extractor.py" ]
[ "from keras.preprocessing import image\nfrom keras.applications.inception_v3 import InceptionV3, preprocess_input\nfrom keras.models import Model, load_model\nfrom keras.layers import Input\nimport numpy as np\n\nclass Extractor():\n def __init__(self, image_shape=(299, 299, 3), weights=None):\n \"\"\"Either load pretrained from imagenet, or load our saved\n weights from our own training.\"\"\"\n\n self.weights = weights # so we can check elsewhere which model\n\n input_tensor = Input(image_shape)\n # Get model with pretrained weights.\n base_model = InceptionV3(\n input_tensor=input_tensor,\n weights='imagenet',\n include_top=True\n )\n\n # We'll extract features at the final pool layer.\n self.model = Model(\n inputs=base_model.input,\n outputs=base_model.get_layer('avg_pool').output\n )\n\n def extract(self, image_path):\n img = image.load_img(image_path)\n\n return self.extract_image(img)\n\n def extract_image(self, img):\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n # Get the prediction.\n features = self.model.predict(x)\n\n if self.weights is None:\n # For imagenet/default network:\n features = features[0]\n else:\n # For loaded network:\n features = features[0]\n\n return features\n" ]
[ [ "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
trandinhson3086/cp-vton
[ "0e86598a95b5306f4fd262086b3fd0c0759a72aa" ]
[ "score_fid_infinity.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import Parameter as P\nfrom torchvision.models.inception import inception_v3\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as transforms\nimport numpy as np\nimport math\nfrom sklearn.linear_model import LinearRegression\nimport math\nimport os\nimport glob\nfrom tqdm import tqdm\nfrom PIL import Image\nfrom scipy import linalg\n\n\nclass randn_sampler():\n \"\"\"\n Generates z~N(0,1) using random sampling or scrambled Sobol sequences.\n Args:\n ndim: (int)\n The dimension of z.\n use_sobol: (bool)\n If True, sample z from scrambled Sobol sequence. Else, sample\n from standard normal distribution.\n Default: False\n use_inv: (bool)\n If True, use inverse CDF to transform z from U[0,1] to N(0,1).\n Else, use Box-Muller transformation.\n Default: True\n cache: (bool)\n If True, we cache some amount of Sobol points and reorder them.\n This is mainly used for training GANs when we use two separate\n Sobol generators which helps stabilize the training.\n Default: False\n\n Examples::\n >>> sampler = randn_sampler(128, True)\n >>> z = sampler.draw(10) # Generates [10, 128] vector\n \"\"\"\n\n def __init__(self, ndim, use_sobol=False, use_inv=True, cache=False):\n self.ndim = ndim\n self.cache = cache\n if use_sobol:\n self.sampler = NormalQMCEngine(d=ndim, inv_transform=use_inv)\n self.cached_points = torch.tensor([])\n else:\n self.sampler = None\n\n def draw(self, batch_size):\n if self.sampler is None:\n return torch.randn([batch_size, self.ndim])\n else:\n if self.cache:\n if len(self.cached_points) < batch_size:\n # sample from sampler and reorder the points\n self.cached_points = self.sampler.draw(int(1e6))[torch.randperm(int(1e6))]\n\n # Sample without replacement from cached points\n samples = self.cached_points[:batch_size]\n self.cached_points = self.cached_points[batch_size:]\n return samples\n else:\n return self.sampler.draw(batch_size)\n\n\ndef calculate_FID_infinity(dataloader, gt_m, gt_s, batch_size, num_points=15):\n \"\"\"\n Calculates effectively unbiased FID_inf using extrapolation\n Args:\n gen_model: (nn.Module)\n The trained generator. Generator takes in z~N(0,1) and outputs\n an image of [-1, 1].\n ndim: (int)\n The dimension of z.\n batch_size: (int)\n The batch size of generator\n gt_path: (str)\n Path to saved FID statistics of true data.\n num_im: (int)\n Number of images we are generating to evaluate FID_inf.\n Default: 50000\n num_points: (int)\n Number of FID_N we evaluate to fit a line.\n Default: 15\n \"\"\"\n # load pretrained inception model\n inception_model = load_inception_net()\n \n # get all activations of generated images\n activations = get_activations(dataloader, inception_model).cpu().numpy()\n\n fids = []\n\n # Choose the number of images to evaluate FID_N at regular intervals over N\n fid_batches = np.linspace(5000, len(dataloader), num_points).astype('int32')\n\n # Evaluate FID_N\n for fid_batch_size in fid_batches:\n # sample with replacement\n np.random.shuffle(activations)\n fid_activations = activations[:fid_batch_size]\n fids.append(calculate_FID(fid_activations, gt_m, gt_s))\n fids = np.array(fids).reshape(-1, 1)\n\n # Fit linear regression\n reg = LinearRegression().fit(1 / fid_batches.reshape(-1, 1), fids)\n fid_infinity = reg.predict(np.array([[0]]))[0, 0]\n\n return fid_infinity\n\n\ndef calculate_IS_infinity(gen_model, ndim, batch_size, num_im=50000, num_points=15):\n \"\"\"\n Calculates effectively unbiased IS_inf using extrapolation\n Args:\n gen_model: (nn.Module)\n The trained generator. Generator takes in z~N(0,1) and outputs\n an image of [-1, 1].\n ndim: (int)\n The dimension of z.\n batch_size: (int)\n The batch size of generator\n num_im: (int)\n Number of images we are generating to evaluate IS_inf.\n Default: 50000\n num_points: (int)\n Number of IS_N we evaluate to fit a line.\n Default: 15\n \"\"\"\n # load pretrained inception model\n inception_model = load_inception_net()\n\n # define a sobol_inv sampler\n z_sampler = randn_sampler(ndim, True)\n\n # get all activations of generated images\n _, logits = accumulate_activations(gen_model, inception_model, num_im, z_sampler, batch_size)\n\n IS = []\n\n # Choose the number of images to evaluate IS_N at regular intervals over N\n IS_batches = np.linspace(5000, num_im, num_points).astype('int32')\n\n # Evaluate IS_N\n for IS_batch_size in IS_batches:\n # sample with replacement\n np.random.shuffle(logits)\n IS_logits = logits[:IS_batch_size]\n IS.append(calculate_inception_score(IS_logits)[0])\n IS = np.array(IS).reshape(-1, 1)\n\n # Fit linear regression\n reg = LinearRegression().fit(1 / IS_batches.reshape(-1, 1), IS)\n IS_infinity = reg.predict(np.array([[0]]))[0, 0]\n\n return IS_infinity\n\n\n################# Functions for calculating and saving dataset inception statistics ##################\nclass im_dataset(Dataset):\n def __init__(self, data_dir):\n self.data_dir = data_dir\n self.imgpaths = self.get_imgpaths()\n\n self.transform = transforms.Compose([\n transforms.ToTensor()])\n\n def get_imgpaths(self):\n paths = glob.glob(os.path.join(self.data_dir, \"**/*.jpg\"), recursive=True)\n return paths\n\n def __getitem__(self, idx):\n img_name = self.imgpaths[idx]\n image = self.transform(Image.open(img_name))\n return image\n\n def __len__(self):\n return len(self.imgpaths)\n\n\ndef load_path_statistics(path):\n \"\"\"\n Given path to dataset npz file, load and return mu and sigma\n \"\"\"\n if path.endswith('.npz'):\n f = np.load(path)\n m, s = f['mu'][:], f['sigma'][:]\n f.close()\n return m, s\n else:\n raise RuntimeError('Invalid path: %s' % path)\n\n\ndef compute_path_statistics(path, batch_size):\n \"\"\"\n Given path to a dataset, load and compute mu and sigma.\n Save to stats to out_path\n \"\"\"\n if not os.path.exists(path):\n raise RuntimeError('Invalid path: %s' % path)\n\n model = load_inception_net()\n dataloader = torch.utils.data.DataLoader(im_dataset(path), batch_size=batch_size, drop_last=False, **{'num_workers': 8, 'pin_memory': False})\n act = get_activations(dataloader, model).cpu().numpy()\n m, s = np.mean(act, axis=0), np.cov(act, rowvar=False)\n return m, s\n\n\ndef get_activations(dataloader, model):\n \"\"\"\n Get inception activations from dataset\n \"\"\"\n pool = []\n\n for images in tqdm(dataloader):\n images = images.cuda()\n with torch.no_grad():\n pool_val, logits_val = model(images)\n pool += [pool_val]\n\n return torch.cat(pool, 0)\n\n\n####################### Functions to help calculate FID and IS #######################\ndef calculate_FID(act, data_m, data_s):\n \"\"\"\n calculate score given activations and path to npz\n \"\"\"\n gen_m, gen_s = np.mean(act, axis=0), np.cov(act, rowvar=False)\n FID = numpy_calculate_frechet_distance(gen_m, gen_s, data_m, data_s)\n\n return FID\n\n\ndef calculate_inception_score(pred, num_splits=1):\n scores = []\n for index in range(num_splits):\n pred_chunk = pred[index * (pred.shape[0] // num_splits): (index + 1) * (pred.shape[0] // num_splits), :]\n kl_inception = pred_chunk * (np.log(pred_chunk) - np.log(np.expand_dims(np.mean(pred_chunk, 0), 0)))\n kl_inception = np.mean(np.sum(kl_inception, 1))\n scores.append(np.exp(kl_inception))\n return np.mean(scores), np.std(scores)\n\n\ndef accumulate_activations(gen_model, inception_model, num_im, z_sampler, batch_size):\n \"\"\"\n Generate images and compute their Inception activations.\n \"\"\"\n pool, logits = [], []\n for i in range(math.ceil(num_im / batch_size)):\n with torch.no_grad():\n z = z_sampler.draw(batch_size).cuda()\n fake_img = to_img(gen_model(z))\n\n pool_val, logits_val = inception_model(fake_img)\n pool += [pool_val]\n logits += [F.softmax(logits_val, 1)]\n\n pool = torch.cat(pool, 0)[:num_im]\n logits = torch.cat(logits, 0)[:num_im]\n\n return pool.cpu().numpy(), logits.cpu().numpy()\n\n\ndef to_img(x):\n \"\"\"\n Normalizes an image from [-1, 1] to [0, 1]\n \"\"\"\n x = 0.5 * (x + 1)\n x = x.clamp(0, 1)\n return x\n\n\n# Module that wraps the inception network to enable use with dataparallel and\n# returning pool features and logits.\nclass WrapInception(nn.Module):\n def __init__(self, net):\n super(WrapInception, self).__init__()\n self.net = net\n self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),\n requires_grad=False)\n self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),\n requires_grad=False)\n\n def forward(self, x):\n x = (x - self.mean) / self.std\n # Upsample if necessary\n if x.shape[2] != 299 or x.shape[3] != 299:\n x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)\n # 299 x 299 x 3\n x = self.net.Conv2d_1a_3x3(x)\n # 149 x 149 x 32\n x = self.net.Conv2d_2a_3x3(x)\n # 147 x 147 x 32\n x = self.net.Conv2d_2b_3x3(x)\n # 147 x 147 x 64\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 73 x 73 x 64\n x = self.net.Conv2d_3b_1x1(x)\n # 73 x 73 x 80\n x = self.net.Conv2d_4a_3x3(x)\n # 71 x 71 x 192\n x = F.max_pool2d(x, kernel_size=3, stride=2)\n # 35 x 35 x 192\n x = self.net.Mixed_5b(x)\n # 35 x 35 x 256\n x = self.net.Mixed_5c(x)\n # 35 x 35 x 288\n x = self.net.Mixed_5d(x)\n # 35 x 35 x 288\n x = self.net.Mixed_6a(x)\n # 17 x 17 x 768\n x = self.net.Mixed_6b(x)\n # 17 x 17 x 768\n x = self.net.Mixed_6c(x)\n # 17 x 17 x 768\n x = self.net.Mixed_6d(x)\n # 17 x 17 x 768\n x = self.net.Mixed_6e(x)\n # 17 x 17 x 768\n # 17 x 17 x 768\n x = self.net.Mixed_7a(x)\n # 8 x 8 x 1280\n x = self.net.Mixed_7b(x)\n # 8 x 8 x 2048\n x = self.net.Mixed_7c(x)\n # 8 x 8 x 2048\n pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)\n # 1 x 1 x 2048\n logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))\n # 1000 (num_classes)\n return pool, logits\n\n\ndef numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):\n \"\"\"Numpy implementation of the Frechet Distance.\n The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)\n and X_2 ~ N(mu_2, C_2) is\n d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).\n Stable version by Dougal J. Sutherland.\n Params:\n -- mu1 : Numpy array containing the activations of a layer of the\n inception net (like returned by the function 'get_predictions')\n for generated samples.\n -- mu2 : The sample mean over activations, precalculated on an\n representative data set.\n -- sigma1: The covariance matrix over activations for generated samples.\n -- sigma2: The covariance matrix over activations, precalculated on an\n representative data set.\n Returns:\n -- : The Frechet Distance.\n \"\"\"\n\n mu1 = np.atleast_1d(mu1)\n mu2 = np.atleast_1d(mu2)\n\n sigma1 = np.atleast_2d(sigma1)\n sigma2 = np.atleast_2d(sigma2)\n\n assert mu1.shape == mu2.shape, \\\n 'Training and test mean vectors have different lengths'\n assert sigma1.shape == sigma2.shape, \\\n 'Training and test covariances have different dimensions'\n\n diff = mu1 - mu2\n\n # Product might be almost singular\n covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)\n if not np.isfinite(covmean).all():\n msg = ('fid calculation produces singular product; '\n 'adding %s to diagonal of cov estimates') % eps\n print(msg)\n offset = np.eye(sigma1.shape[0]) * eps\n covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))\n\n # Numerical error might give slight imaginary component\n if np.iscomplexobj(covmean):\n if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):\n m = np.max(np.abs(covmean.imag))\n raise ValueError('Imaginary component {}'.format(m))\n covmean = covmean.real\n\n tr_covmean = np.trace(covmean)\n\n return (diff.dot(diff) + np.trace(sigma1) +\n np.trace(sigma2) - 2 * tr_covmean)\n\n\n# Load and wrap the Inception model\ndef load_inception_net(parallel=False):\n inception_model = inception_v3(pretrained=True, transform_input=False)\n inception_model = WrapInception(inception_model.eval()).cuda()\n if parallel:\n inception_model = nn.DataParallel(inception_model)\n return inception_model\n\n\nif __name__ == '__main__':\n from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('--batch-size', type=int, default=512,\n help='Batch size to use')\n\n args = parser.parse_args()\n\n root_path = \"test_files_dir/residual_gan_train_new_more_consist_loss_base_model_2_step_070000/\"\n paths = [\"baseline\", \"refined\"]\n \n data_m, data_s = compute_path_statistics(\"test_files_dir/GMM/gt\", args.batch_size)\n\n for path in paths:\n dataloader = torch.utils.data.DataLoader(im_dataset(root_path + path), \n batch_size=args.batch_size, \n drop_last=False, **{'num_workers': 8, 'pin_memory': False})\n\n FID = calculate_FID_infinity(dataloader, data_m, data_s, args.batch_size)\n print(\"FID inf\", FID)\n" ]
[ [ "torch.nn.functional.softmax", "numpy.linspace", "torch.cat", "torch.nn.functional.dropout", "numpy.mean", "torch.no_grad", "torch.nn.functional.interpolate", "numpy.iscomplexobj", "numpy.exp", "numpy.trace", "torch.randn", "numpy.eye", "torch.tensor", "numpy.atleast_1d", "numpy.std", "numpy.load", "torch.nn.functional.max_pool2d", "numpy.log", "numpy.atleast_2d", "numpy.cov", "numpy.array", "numpy.sum", "numpy.diagonal", "numpy.abs", "numpy.isfinite", "numpy.random.shuffle", "sklearn.linear_model.LinearRegression", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akash-chowdhary/akash-data-x-plaksha
[ "81a264ce8990c4d93e95c57ddb8f09d5d2a4a4d9" ]
[ "18-dataviz_elias/DASH_Plotly/dash_intro.py" ]
[ "#!/usr/bin/env python3\n\nimport pandas as pd\nimport plotly.express as px # (version 4.7.0)\nimport plotly.graph_objects as go\n\nimport dash # (version 1.12.0) pip install dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\napp = dash.Dash(__name__)\n\n# ------------------------------------------------------------------------------\n# Import and clean data (importing csv into pandas)\ndf = pd.read_csv(\"./data/bii_data_w_categories.csv\")\n\ndf = df.groupby(['State', 'ANSI', 'Affected by', 'Year', 'state_code'])[['Pct of Colonies Impacted']].mean()\ndf.reset_index(inplace=True)\nprint(df[:5])\n\n# ------------------------------------------------------------------------------\n# App layout\napp.layout = html.Div([\n\n html.H1(\"Web Application Dashboards with Dash\", style={'text-align': 'center'}),\n\n dcc.Dropdown(id=\"slct_year\",\n options=[\n {\"label\": \"2015\", \"value\": 2015},\n {\"label\": \"2016\", \"value\": 2016},\n {\"label\": \"2017\", \"value\": 2017},\n {\"label\": \"2018\", \"value\": 2018}],\n multi=False,\n value=2015,\n style={'width': \"40%\"}\n ),\n\n html.Div(id='output_container', children=[]),\n html.Br(),\n\n dcc.Graph(id='my_bee_map', figure={})\n\n])\n\n\n# ------------------------------------------------------------------------------\n# Connect the Plotly graphs with Dash Components\[email protected](\n [Output(component_id='output_container', component_property='children'),\n Output(component_id='my_bee_map', component_property='figure')],\n [Input(component_id='slct_year', component_property='value')]\n)\ndef update_graph(option_slctd):\n print(option_slctd)\n print(type(option_slctd))\n\n container = \"The year chosen by user was: {}\".format(option_slctd)\n\n dff = df.copy()\n dff = dff[dff[\"Year\"] == option_slctd]\n dff = dff[dff[\"Affected by\"] == \"Varroa_mites\"]\n\n # Plotly Express\n fig = px.choropleth(\n data_frame=dff,\n locationmode='USA-states',\n locations='state_code',\n scope=\"usa\",\n color='Pct of Colonies Impacted',\n hover_data=['State', 'Pct of Colonies Impacted'],\n color_continuous_scale=px.colors.sequential.YlOrRd,\n labels={'Pct of Colonies Impacted': '% of Bee Colonies'},\n template='plotly_dark'\n )\n\n # Plotly Graph Objects (GO)\n # fig = go.Figure(\n # data=[go.Choropleth(\n # locationmode='USA-states',\n # locations=dff['state_code'],\n # z=dff[\"Pct of Colonies Impacted\"].astype(float),\n # colorscale='Reds',\n # )]\n # )\n #\n # fig.update_layout(\n # title_text=\"Bees Affected by Mites in the USA\",\n # title_xanchor=\"center\",\n # title_font=dict(size=24),\n # title_x=0.5,\n # geo=dict(scope='usa'),\n # )\n\n return container, fig\n\n\n# ------------------------------------------------------------------------------\nif __name__ == '__main__':\n app.run_server(debug=True)" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
PINE4PPLE/transformer-lm
[ "da76a4afd29d1fd023ba866ccc21a49901ad46f2" ]
[ "utils/beam_search.py" ]
[ "import torch\nEOS_ID = 1\nINF = 1. * 1e7\n\ndef _merge_beam_dim(tensor):\n \"\"\"Reshapes first two dimensions in to single dimension.\n\n Args:\n tensor: Tensor to reshape of shape [A, B, ...]\n\n Returns:\n Reshaped tensor of shape [A*B, ...]\n \"\"\"\n shape = list(tensor.shape)\n shape[0] *= shape[1] # batch -> batch * beam_size\n shape.pop(1) # Remove beam dim\n return torch.reshape(tensor, shape)\n\ndef _unmerge_beam_dim(tensor, batch_size, beam_size):\n \"\"\"Reshapes first dimension back to [batch_size, beam_size].\n\n Args:\n tensor: Tensor to reshape of shape [batch_size*beam_size, ...]\n batch_size: Tensor, original batch size.\n beam_size: int, original beam size.\n\n Returns:\n Reshaped tensor of shape [batch_size, beam_size, ...]\n \"\"\"\n shape = list(tensor.shape)\n new_shape = [batch_size] + [beam_size] + shape[1:]\n return torch.reshape(tensor, new_shape)\n\ndef _expand_to_beam_size(tensor, beam_size):\n \"\"\"Tiles a given tensor by beam_size.\n\n Args:\n tensor: tensor to tile [batch_size, ...]\n beam_size: How much to tile the tensor by.\n\n Returns:\n Tiled tensor [batch_size, beam_size, ...]\n \"\"\"\n tensor = torch.unsqueeze(tensor, 1)\n tile_dims = [1] * len(tensor.shape)\n tile_dims[1] = beam_size\n\n return tensor.repeat(tile_dims)\n\ndef compute_batch_indices(batch_size, beam_size):\n \"\"\"Computes the i'th coordinate that contains the batch index for gathers.\n\n Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which\n batch the beam item is in. This will create the i of the i,j coordinate\n needed for the gather.\n\n Args:\n batch_size: Batch size\n beam_size: Size of the beam.\n Returns:\n batch_pos: [batch_size, beam_size] tensor of ids\n \"\"\"\n batch_pos = torch.arange(batch_size * beam_size)// beam_size\n batch_pos = torch.reshape(batch_pos, [batch_size, beam_size])\n return batch_pos\ndef compute_seq_length(topk,seq_len):\n tensor = torch.unsqueeze(topk, -1)\n tile_dims = [1] * len(tensor.shape)\n tile_dims[-1] = seq_len\n\n res = tensor.repeat(tile_dims)\n return res\n\ndef compute_topk_scores_and_seq(sequences,\n scores,\n scores_to_gather,\n flags,\n beam_size,\n batch_size\n ):\n \"\"\"Given sequences and scores, will gather the top k=beam size sequences.\n\n This function is used to grow alive, and finished. It takes sequences,\n scores, and flags, and returns the top k from sequences, scores_to_gather,\n and flags based on the values in scores.\n\n This method permits easy introspection using tfdbg. It adds three named ops\n that are prefixed by `prefix`:\n - _topk_seq: the tensor for topk_seq returned by this method.\n - _topk_flags: the tensor for topk_finished_flags returned by this method.\n - _topk_scores: the tensor for tokp_gathered_scores returned by this method.\n\n Args:\n sequences: Tensor of sequences that we need to gather from.\n [batch_size, beam_size, seq_length]\n scores: Tensor of scores for each sequence in sequences.\n [batch_size, beam_size]. We will use these to compute the topk.\n scores_to_gather: Tensor of scores for each sequence in sequences.\n [batch_size, beam_size]. We will return the gathered scores from here.\n Scores to gather is different from scores because for grow_alive, we will\n need to return log_probs, while for grow_finished, we will need to return\n the length penalized scores.\n flags: Tensor of bools for sequences that say whether a sequence has reached\n EOS or not\n beam_size: int\n batch_size: int\n\n Returns:\n Tuple of\n (topk_seq [batch_size, beam_size, decode_length],\n topk_gathered_scores [batch_size, beam_size],\n topk_finished_flags[batch_size, beam_size])\n \"\"\"\n\n _, topk_indexes = torch.top_k(scores, k=beam_size) #[batch_size, beam_size ]\n seq_indexes = compute_seq_length(topk_indexes, sequences.shape[-1])\n # The next three steps are to create coordinates for tf.gather_nd to pull\n # out the topk sequences from sequences based on scores.\n # batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which\n # batch the beam item is in. This will create the i of the i,j coordinate\n # needed for the gather\n #batch_pos = compute_batch_indices(batch_size, beam_size)\n\n # top coordinates will give us the actual coordinates to do the gather.\n # stacking will create a tensor of dimension batch * beam * 2, where the\n # last dimension contains the i,j gathering coordinates.\n #top_coordinates = torch.stack((batch_pos, topk_indexes), axis=2)\n\n # Gather up the highest scoring sequences. For each operation added, give\n # it a concrete name to simplify observing these operations with tfdbg.\n # Clients can capture these tensors by watching these node names.\n #topk_seq = torch.gather(sequences, top_coordinates)\n #topk_flags = torch.gather(flags, top_coordinates)\n #topk_gathered_scores = torch.gather(scores_to_gather, top_coordinates)\n topk_seq = torch.gather(sequences,1, seq_indexes)\n topk_flags = torch.gather(flags, 1, topk_indexes)\n topk_gathered_scores = torch.gather(scores_to_gather,1, topk_indexes)\n\n return topk_seq, topk_gathered_scores, topk_flags\n\ndef beam_search(symbols_to_logits_fn,\n initial_ids,\n beam_size,\n decode_length,\n vocab_size,\n alpha,\n states=None,\n eos_id=EOS_ID,\n stop_early=True):\n \"\"\"Beam search with length penalties.\n\n Requires a function that can take the currently decoded symbols and return\n the logits for the next symbol. The implementation is inspired by\n https://arxiv.org/abs/1609.08144.\n\n When running, the beam search steps can be visualized by using tfdbg to watch\n the operations generating the output ids for each beam step. These operations\n have the pattern:\n (alive|finished)_topk_(seq,scores)\n\n Operations marked `alive` represent the new beam sequences that will be\n processed in the next step. Operations marked `finished` represent the\n completed beam sequences, which may be padded with 0s if no beams finished.\n\n Operations marked `seq` store the full beam sequence for the time step.\n Operations marked `scores` store the sequence's final log scores.\n\n The beam search steps will be processed sequentially in order, so when\n capturing observed from these operations, tensors, clients can make\n assumptions about which step is being recorded.\n\n WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this\n means that the shape of the 2nd dimension of these tensors will not be\n available (i.e. set to None) inside symbols_to_logits_fn.\n\n Args:\n symbols_to_logits_fn: Interface to the model, to provide logits.\n Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size]\n initial_ids: Ids to start off the decoding, this will be the first thing\n handed to symbols_to_logits_fn (after expanding to beam size)\n [batch_size]\n beam_size: Size of the beam.\n decode_length: Number of steps to decode for.\n vocab_size: Size of the vocab, must equal the size of the logits returned by\n symbols_to_logits_fn\n alpha: alpha for length penalty.\n states: dict (possibly nested) of decoding states.\n eos_id: ID for end of sentence.\n stop_early: a boolean - stop once best sequence is provably determined.\n use_tpu: A bool, whether to do beam search on TPU.\n use_top_k_with_unique: bool, whether to use a fast (but decreased precision)\n top_k during TPU beam search.\n\n Returns:\n Tuple of\n (decoded beams [batch_size, beam_size, decode_length]\n decoding probabilities [batch_size, beam_size])\n \"\"\"\n batch_size = initial_ids.shape[0]\n initial_log_probs = torch.tensor([[0.] + [-INF] * (beam_size - 1)])\n # Expand to beam_size (batch_size, beam_size)\n alive_log_probs = initial_log_probs.repeat([batch_size, 1])\n alive_seq = _expand_to_beam_size(initial_ids, beam_size)\n alive_seq = torch.unsqueeze(alive_seq, 2)# (batch_size, beam_size, 1)\n # Finished will keep track of all the sequences that have finished so far\n # Finished log probs will be negative infinity in the beginning\n # finished_flags will keep track of booleans\n finished_seq = torch.zeros(list(alive_seq.shape), torch.int32)\n # Setting the scores of the initial to negative infinity.\n finished_scores = torch.ones([batch_size, beam_size]) * -INF\n finished_flags = torch.zeros([batch_size, beam_size], torch.bool)\n\n def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq,\n curr_scores, curr_finished):\n \"\"\"Given sequences and scores, will gather the top k=beam size sequences.\n\n Args:\n finished_seq: Current finished sequences.\n [batch_size, beam_size, current_decoded_length]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n finished_flags: finished bools for each of these sequences.\n [batch_size, beam_size]\n curr_seq: current topk sequence that has been grown by one position.\n [batch_size, beam_size, current_decoded_length]\n curr_scores: scores for each of these sequences. [batch_size, beam_size]\n curr_finished: Finished flags for each of these sequences.\n [batch_size, beam_size]\n Returns:\n Tuple of\n (Topk sequences based on scores,\n log probs of these sequences,\n Finished flags of these sequences)\n \"\"\"\n \n finished_seq = torch.cat(\n [finished_seq,\n torch.zeros([batch_size, beam_size, 1], torch.int32)], axis=2)\n\n # Set the scores of the unfinished seq in curr_seq to large negative\n # values\n curr_scores += (1. - curr_finished.to(torch.float)) * -INF\n # concatenating the sequences and scores along beam axis\n curr_finished_seq = torch.cat([finished_seq, curr_seq], 1)\n curr_finished_scores = torch.cat([finished_scores, curr_scores], 1)\n curr_finished_flags = torch.cat([finished_flags, curr_finished], 1)\n return compute_topk_scores_and_seq(\n curr_finished_seq,\n curr_finished_scores,\n curr_finished_scores,\n curr_finished_flags,\n beam_size,\n batch_size\n )\n \n def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished):\n \"\"\"Given sequences and scores, will gather the top k=beam size sequences.\n\n Args:\n curr_seq: current topk sequence that has been grown by one position.\n [batch_size, beam_size, i+1]\n curr_scores: scores for each of these sequences. [batch_size, beam_size]\n curr_log_probs: log probs for each of these sequences.\n [batch_size, beam_size]\n curr_finished: Finished flags for each of these sequences.\n [batch_size, beam_size]\n Returns:\n Tuple of\n (Topk sequences based on scores,\n log probs of these sequences,\n Finished flags of these sequences)\n \"\"\"\n # Set the scores of the finished seq in curr_seq to large negative\n # values\n curr_scores += curr_finished.to(torch.float) * -INF\n return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs,\n curr_finished, beam_size, batch_size) \n \n def grow_topk(i, alive_seq, alive_log_probs):\n \"\"\"Inner beam search loop.\n This function takes the current alive sequences, and grows them to topk\n sequences where k = 2*beam. We use 2*beam because, we could have beam_size\n number of sequences that might hit <EOS> and there will be no alive\n sequences to continue. With 2*beam_size, this will not happen. This relies\n on the assumption the vocab size is > beam size. If this is true, we'll\n have at least beam_size non <EOS> extensions if we extract the next top\n 2*beam words.\n Length penalty is given by = (5+len(decode)/6) ^ -\\alpha. Pls refer to\n https://arxiv.org/abs/1609.08144.\n\n Args:\n i: loop index\n alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]\n alive_log_probs: probabilities of these sequences. [batch_size, beam_size]\n states: dict (possibly nested) of decoding states.\n Returns:\n Tuple of\n (Topk sequences extended by the next word,\n The log probs of these sequences,\n The scores with length penalty of these sequences,\n Flags indicating which of these sequences have finished decoding,\n dict of transformed decoding states)\n \"\"\"\n # Get the logits for all the possible next symbols \n flat_ids = torch.reshape(alive_seq, [batch_size * beam_size, -1])\n # (batch_size * beam_size, decoded_length) \n flat_logits = symbols_to_logits_fn(flat_ids)\n logits = torch.reshape(flat_logits, [batch_size, beam_size, -1])\n # Convert logits to normalized log probs\n candidate_log_probs = torch.log(logits)\n # Multiply the probabilities by the current probabilities of the beam.\n # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1)\n log_probs = candidate_log_probs + torch.unsqueeze(alive_log_probs, 2)\n length_penalty = torch.pow(((5. + torch.tensor(i+1, dtype=torch.float)) / 6.), alpha)\n curr_scores = log_probs / length_penalty\n # Flatten out (beam_size, vocab_size) probs in to a list of possibilities\n flat_curr_scores = torch.reshape(curr_scores, [-1, beam_size * vocab_size]) \n topk_scores, topk_ids = torch.top_k(flat_curr_scores, k=beam_size * 2)#[batch_size,beam_size*2]\n # Recovering the log probs because we will need to send them back\n topk_log_probs = topk_scores * length_penalty\n # Work out what beam the top probs are in.\n topk_beam_index = topk_ids // vocab_size\n topk_ids %= vocab_size # Unflatten the ids \n # The next three steps are to create coordinates for tf.gather_nd to pull\n # out the correct sequences from id's that we need to grow.\n # We will also use the coordinates to gather the booleans of the beam\n # items that survived.\n batch_pos = compute_batch_indices(batch_size, beam_size * 2)\n # top beams will give us the actual coordinates to do the gather.\n # stacking will create a tensor of dimension batch * beam * 2, where the\n # last dimension contains the i,j gathering coordinates.\n topk_coordinates = torch.stack([batch_pos, topk_beam_index], axis=2)\n seq_indexes = compute_seq_length(topk_beam_index, alive_seq.shape[-1])\n # Gather up the most probable 2*beams both for the ids and\n # finished_in_alive bools\n topk_seq = torch.gather(alive_seq,1, seq_indexes) \n # Append the most probable alive\n topk_seq = torch.concat([topk_seq, torch.unsqueeze(topk_ids, 2)], axis=2) \n topk_finished = (topk_ids== eos_id)\n return topk_seq, topk_log_probs, topk_scores, topk_finished, states \n \n def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores,\n finished_flags, states):\n \"\"\"Inner beam search loop.\n\n There are three groups of tensors, alive, finished, and topk.\n The alive group contains information about the current alive sequences\n The topk group contains information about alive + topk current decoded words\n the finished group contains information about finished sentences, that is,\n the ones that have decoded to <EOS>. These are what we return.\n The general beam search algorithm is as follows:\n While we haven't terminated (pls look at termination condition)\n 1. Grow the current alive to get beam*2 topk sequences\n 2. Among the topk, keep the top beam_size ones that haven't reached EOS\n into alive\n 3. Among the topk, keep the top beam_size ones have reached EOS into\n finished\n Repeat\n To make things simple with using fixed size tensors, we will end\n up inserting unfinished sequences into finished in the beginning. To stop\n that we add -ve INF to the score of the unfinished sequence so that when a\n true finished sequence does appear, it will have a higher score than all the\n unfinished ones.\n\n Args:\n i: loop index\n alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1]\n alive_log_probs: probabilities of the beams. [batch_size, beam_size]\n finished_seq: Current finished sequences.\n [batch_size, beam_size, i+1]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n finished_flags: finished bools for each of these sequences.\n [batch_size, beam_size]\n states: dict (possibly nested) of decoding states.\n\n Returns:\n Tuple of\n (Incremented loop index\n New alive sequences,\n Log probs of the alive sequences,\n New finished sequences,\n Scores of the new finished sequences,\n Flags indicating which sequence in finished as reached EOS,\n dict of final decoding states)\n \"\"\"\n\n # Each inner loop, we carry out three steps:\n # 1. Get the current topk items.\n # 2. Extract the ones that have finished and haven't finished\n # 3. Recompute the contents of finished based on scores.\n topk_seq, topk_log_probs, topk_scores, topk_finished = grow_topk(\n i, alive_seq, alive_log_probs)\n alive_seq, alive_log_probs, _ = grow_alive(\n topk_seq, topk_scores, topk_log_probs, topk_finished)\n finished_seq, finished_scores, finished_flags, _ = grow_finished(\n finished_seq, finished_scores, finished_flags, topk_seq, topk_scores,\n topk_finished)\n\n return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores,\n finished_flags) \n \n def _is_not_finished(i, alive_log_probs,finished_scores):\n \"\"\"Checking termination condition.\n\n We terminate when we decoded up to decode_length or the lowest scoring item\n in finished has a greater score that the highest prob item in alive divided\n by the max length penalty\n\n Args:\n i: loop index\n alive_log_probs: probabilities of the beams. [batch_size, beam_size]\n finished_scores: scores for each of these sequences.\n [batch_size, beam_size]\n\n Returns:\n Bool.\n \"\"\"\n max_length_penalty = torch.pow(((5. + torch.tensor(decode_length, dtype=torch.float)) / 6.), alpha)\n # The best possible score of the most likely alive sequence.\n lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty\n\n if not stop_early:\n # by considering the min score (in the top N beams) we ensure that\n # the decoder will keep decoding until there is at least one beam\n # (in the top N) that can be improved (w.r.t. the alive beams).\n # any unfinished beam will have score -INF - thus the min\n # will always be -INF if there is at least one unfinished beam -\n # which means the bound_is_met condition cannot be true in this case.\n lowest_score_of_finished_in_finished,_ = torch.min(finished_scores, axis=1)\n else:\n # by taking the max score we only care about the first beam;\n # as soon as this first beam cannot be beaten from the alive beams\n # the beam decoder can stop.\n # similarly to the above, if the top beam is not completed, its\n # finished_score is -INF, thus it will not activate the\n # bound_is_met condition. (i.e., decoder will keep going on).\n # note we need to find the max for every sequence eparately - so, we need\n # to keep the batch dimension (see axis=1)\n lowest_score_of_finished_in_finished,_ = torch.max(finished_scores,axis=1)\n bound_is_met = torch.all(torch.gt(lowest_score_of_finished_in_finished,\n lower_bound_alive_scores))\n\n return torch.logical_and(\n torch.lt(i, decode_length), torch.logical_not(bound_is_met))\n \n cur_len = torch.tensor(0, dtype=torch.int)\n while(_is_not_finished(cur_len, alive_log_probs, finished_scores)):\n cur_len, alive_seq, alive_log_probs, finished_seq, finished_scores, finished_flags=\\\n inner_loop(cur_len, alive_seq, alive_log_probs, finished_seq, finished_scores, finished_flags)\n # Accounting for corner case: It's possible that no sequence in alive for a\n # particular batch item ever reached EOS. In that case, we should just copy\n # the contents of alive for that batch item. tf.reduce_any(finished_flags, 1)\n # if 0, means that no sequence for that batch index had reached EOS. We need\n # to do the same for the scores as well.\n alive_seq = alive_seq.reshape((batch_size, beam_size, -1))\n finished_seq=finished_seq.reshape((batch_size, beam_size, -1))\n all_finished = torch.any(finished_flags, 1).reshape(batch_size,1,1)\n all_finished = all_finished.repeat(1,beam_size,alive_seq.shape[-1])\n finished_seq = torch.where(all_finished, finished_seq, alive_seq)\n finished_scores = torch.where(all_finished, finished_scores, alive_log_probs)\n return finished_seq, finished_scores, states\n" ]
[ [ "torch.ones", "torch.max", "torch.zeros", "torch.cat", "torch.reshape", "torch.min", "torch.lt", "torch.logical_not", "torch.unsqueeze", "torch.tensor", "torch.top_k", "torch.any", "torch.log", "torch.where", "torch.arange", "torch.gather", "torch.gt", "torch.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jamfly/segmented-phone-ST
[ "4df11fd14e79f8116243a06e6fcbeceb60cd0e6f" ]
[ "espnet/nets/pytorch_backend/conformer/decoder_layer.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2019 Shigeki Karita\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"Decoder self-attention layer definition.\"\"\"\n\nimport torch\nfrom torch import nn\n\nfrom espnet.nets.pytorch_backend.transformer.layer_norm import LayerNorm\n\n\nclass DecoderLayer(nn.Module):\n \"\"\"Single decoder layer module.\n\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` instance can be used as the argument.\n src_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\n can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool): Whether to use layer_norm before the first block.\n concat_after (bool): Whether to concat attention layer's input and output.\n if True, additional linear will be applied.\n i.e. x -> x + linear(concat(x, att(x)))\n if False, no additional linear will be applied. i.e. x -> x + att(x)\n\n\n \"\"\"\n\n def __init__(\n self,\n fusion_type,\n size,\n self_attn,\n src_attn,\n auxiliary_src_attn,\n feed_forward,\n dropout_rate,\n normalize_before=True,\n concat_after=False,\n ):\n \"\"\"Construct an DecoderLayer object.\"\"\"\n super(DecoderLayer, self).__init__()\n self.size = size\n self.self_attn = self_attn\n self.src_attn = src_attn\n self.auxiliary_src_attn = auxiliary_src_attn\n\n self.feed_forward = feed_forward\n self.norm1 = LayerNorm(size)\n self.norm2 = LayerNorm(size)\n self.norm3 = LayerNorm(size)\n self.norm4 = LayerNorm(size)\n \n self.fusion_type = fusion_type\n \n if fusion_type == \"stacked\":\n self.concat_linear3 = nn.Linear(size + size, size)\n elif fusion_type == \"gate\":\n self.gate = nn.Sequential(\n nn.Linear(size + size, size),\n nn.Sigmoid(),\n )\n else:\n AssertionError(f'not support {fusion_type} yet')\n \n self.dropout = nn.Dropout(dropout_rate)\n self.normalize_before = normalize_before\n self.concat_after = concat_after\n \n if self.concat_after:\n self.concat_linear1 = nn.Linear(size + size, size)\n self.concat_linear2 = nn.Linear(size + size, size)\n\n def forward(self, tgt, tgt_mask, memory, memory_mask, phone, phone_mask, cache=None):\n \"\"\"Compute decoded features.\n\n Args:\n tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size).\n tgt_mask (torch.Tensor): Mask for input tensor (#batch, maxlen_out).\n memory (torch.Tensor): Encoded memory, float32 (#batch, maxlen_in, size).\n memory_mask (torch.Tensor): Encoded memory mask (#batch, maxlen_in).\n cache (List[torch.Tensor]): List of cached tensors.\n Each tensor shape should be (#batch, maxlen_out - 1, size).\n\n Returns:\n torch.Tensor: Output tensor(#batch, maxlen_out, size).\n torch.Tensor: Mask for output tensor (#batch, maxlen_out).\n torch.Tensor: Encoded memory (#batch, maxlen_in, size).\n torch.Tensor: Encoded memory mask (#batch, maxlen_in).\n\n \"\"\"\n residual = tgt\n if self.normalize_before:\n tgt = self.norm1(tgt)\n\n if cache is None:\n tgt_q = tgt\n tgt_q_mask = tgt_mask\n else:\n # compute only the last frame query keeping dim: max_time_out -> 1\n assert cache.shape == (\n tgt.shape[0],\n tgt.shape[1] - 1,\n self.size,\n ), f\"{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}\"\n tgt_q = tgt[:, -1:, :]\n residual = residual[:, -1:, :]\n tgt_q_mask = None\n if tgt_mask is not None:\n tgt_q_mask = tgt_mask[:, -1:, :]\n\n if self.concat_after:\n tgt_concat = torch.cat(\n (tgt_q, self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)), dim=-1\n )\n x = residual + self.concat_linear1(tgt_concat)\n else:\n x = residual + self.dropout(self.self_attn(tgt_q, tgt, tgt, tgt_q_mask))\n if not self.normalize_before:\n x = self.norm1(x)\n\n residual = x\n if self.normalize_before:\n x = self.norm2(x)\n if self.concat_after:\n x_concat = torch.cat(\n (x, self.src_attn(x, memory, memory, memory_mask)), dim=-1\n )\n x = residual + self.concat_linear2(x_concat)\n else:\n src_attn = self.src_attn(x, memory, memory, memory_mask)\n if self.fusion_type == \"gate\":\n p_attn = self.auxiliary_src_attn(x, phone, phone, phone_mask)\n g = self.gate(torch.cat(\n (src_attn, p_attn), dim=-1)\n )\n x = g * src_attn + (1 - g) * p_attn\n x = residual + self.dropout(x)\n else:\n x = residual + self.dropout(src_attn)\n if not self.normalize_before:\n x = self.norm2(x)\n \n # fusion attention\n if self.fusion_type == \"stacked\":\n residual = x\n if self.normalize_before:\n x = self.norm3(x)\n if self.concat_after:\n attn = self.auxiliary_src_attn(x, phone, phone, phone_mask)\n x_concat = torch.cat(\n (x, attn), dim=-1\n )\n x = residual + self.concat_linear3(x_concat)\n else:\n attn = self.auxiliary_src_attn(x, phone, phone, phone_mask)\n x = residual + self.dropout(attn)\n if not self.normalize_before:\n x = self.norm3(x)\n\n residual = x\n\n if self.normalize_before:\n x = self.norm4(x)\n x = residual + self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm4(x)\n\n if cache is not None:\n x = torch.cat([cache, x], dim=1)\n\n return x, tgt_mask, memory, memory_mask, phone, phone_mask\n" ]
[ [ "torch.nn.Linear", "torch.nn.Dropout", "torch.nn.Sigmoid", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Di-Is/stylegan2-ada
[ "c1228c08a27fda80e512cfecf3b10c3c93c8b6d3" ]
[ "training/dataset.py" ]
[ "# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\n\"\"\"Streaming images and labels from dataset created with dataset_tool.py.\"\"\"\n\nimport os\nimport glob\nimport numpy as np\nimport tensorflow.compat.v1 as tensorflow\ntf = tensorflow\ntf.disable_v2_behavior()\nimport dnnlib.tflib as tflib\n\n#----------------------------------------------------------------------------\n# Dataset class that loads images from tfrecords files.\n\nclass TFRecordDataset:\n def __init__(self,\n tfrecord_dir, # Directory containing a collection of tfrecords files.\n resolution = None, # Dataset resolution, None = autodetect.\n label_file = None, # Relative path of the labels file, None = autodetect.\n max_label_size = 0, # 0 = no labels, 'full' = full labels, <int> = N first label components.\n max_images = None, # Maximum number of images to use, None = use all images.\n max_validation = 10000, # Maximum size of the validation set, None = use all available images.\n mirror_augment = False, # Apply mirror augment?\n repeat = True, # Repeat dataset indefinitely?\n shuffle = True, # Shuffle images?\n shuffle_mb = 4096, # Shuffle data within specified window (megabytes), 0 = disable shuffling.\n prefetch_mb = 2048, # Amount of data to prefetch (megabytes), 0 = disable prefetching.\n buffer_mb = 256, # Read buffer size (megabytes).\n num_threads = 2, # Number of concurrent threads.\n _is_validation = False,\n):\n self.tfrecord_dir = tfrecord_dir\n self.resolution = None\n self.resolution_log2 = None\n self.shape = [] # [channels, height, width]\n self.dtype = 'uint8'\n self.label_file = label_file\n self.label_size = None # components\n self.label_dtype = None\n self.has_validation_set = None\n self.mirror_augment = mirror_augment\n self.repeat = repeat\n self.shuffle = shuffle\n self._max_validation = max_validation\n self._np_labels = None\n self._tf_minibatch_in = None\n self._tf_labels_var = None\n self._tf_labels_dataset = None\n self._tf_datasets = dict()\n self._tf_iterator = None\n self._tf_init_ops = dict()\n self._tf_minibatch_np = None\n self._cur_minibatch = -1\n self._cur_lod = -1\n\n # List files in the dataset directory.\n assert os.path.isdir(self.tfrecord_dir)\n all_files = sorted(glob.glob(os.path.join(self.tfrecord_dir, '*')))\n self.has_validation_set = (self._max_validation > 0) and any(os.path.basename(f).startswith('validation-') for f in all_files)\n all_files = [f for f in all_files if os.path.basename(f).startswith('validation-') == _is_validation]\n\n # Inspect tfrecords files.\n tfr_files = [f for f in all_files if f.endswith('.tfrecords')]\n assert len(tfr_files) >= 1\n tfr_shapes = []\n for tfr_file in tfr_files:\n tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)\n for record in tf.python_io.tf_record_iterator(tfr_file, tfr_opt):\n tfr_shapes.append(self.parse_tfrecord_np(record).shape)\n break\n\n # Autodetect label filename.\n if self.label_file is None:\n guess = [f for f in all_files if f.endswith('.labels')]\n if len(guess):\n self.label_file = guess[0]\n elif not os.path.isfile(self.label_file):\n guess = os.path.join(self.tfrecord_dir, self.label_file)\n if os.path.isfile(guess):\n self.label_file = guess\n\n # Determine shape and resolution.\n max_shape = max(tfr_shapes, key=np.prod)\n self.resolution = resolution if resolution is not None else max_shape[1]\n self.resolution_log2 = int(np.log2(self.resolution))\n self.shape = [max_shape[0], self.resolution, self.resolution]\n tfr_lods = [self.resolution_log2 - int(np.log2(shape[1])) for shape in tfr_shapes]\n assert all(shape[0] == max_shape[0] for shape in tfr_shapes)\n assert all(shape[1] == shape[2] for shape in tfr_shapes)\n assert all(shape[1] == self.resolution // (2**lod) for shape, lod in zip(tfr_shapes, tfr_lods))\n assert all(lod in tfr_lods for lod in range(self.resolution_log2 - 1))\n\n # Load labels.\n assert max_label_size == 'full' or max_label_size >= 0\n self._np_labels = np.zeros([1<<30, 0], dtype=np.float32)\n if self.label_file is not None and max_label_size != 0:\n self._np_labels = np.load(self.label_file)\n assert self._np_labels.ndim == 2\n if max_label_size != 'full' and self._np_labels.shape[1] > max_label_size:\n self._np_labels = self._np_labels[:, :max_label_size]\n if max_images is not None and self._np_labels.shape[0] > max_images:\n self._np_labels = self._np_labels[:max_images]\n self.label_size = self._np_labels.shape[1]\n self.label_dtype = self._np_labels.dtype.name\n\n # Build TF expressions.\n with tf.name_scope('Dataset'), tf.device('/cpu:0'), tf.control_dependencies(None):\n self._tf_minibatch_in = tf.placeholder(tf.int64, name='minibatch_in', shape=[])\n self._tf_labels_var = tflib.create_var_with_large_initial_value(self._np_labels, name='labels_var')\n self._tf_labels_dataset = tf.data.Dataset.from_tensor_slices(self._tf_labels_var)\n for tfr_file, tfr_shape, tfr_lod in zip(tfr_files, tfr_shapes, tfr_lods):\n if tfr_lod < 0:\n continue\n dset = tf.data.TFRecordDataset(tfr_file, compression_type='', buffer_size=buffer_mb<<20)\n if max_images is not None:\n dset = dset.take(max_images)\n dset = dset.map(self.parse_tfrecord_tf, num_parallel_calls=num_threads)\n dset = tf.data.Dataset.zip((dset, self._tf_labels_dataset))\n bytes_per_item = np.prod(tfr_shape) * np.dtype(self.dtype).itemsize\n if self.shuffle and shuffle_mb > 0:\n dset = dset.shuffle(((shuffle_mb << 20) - 1) // bytes_per_item + 1)\n if self.repeat:\n dset = dset.repeat()\n if prefetch_mb > 0:\n dset = dset.prefetch(((prefetch_mb << 20) - 1) // bytes_per_item + 1)\n dset = dset.batch(self._tf_minibatch_in)\n self._tf_datasets[tfr_lod] = dset\n self._tf_iterator = tf.data.Iterator.from_structure(self._tf_datasets[0].output_types, self._tf_datasets[0].output_shapes)\n self._tf_init_ops = {lod: self._tf_iterator.make_initializer(dset) for lod, dset in self._tf_datasets.items()}\n\n def close(self):\n pass\n\n # Use the given minibatch size and level-of-detail for the data returned by get_minibatch_tf().\n def configure(self, minibatch_size, lod=0):\n lod = int(np.floor(lod))\n assert minibatch_size >= 1 and lod in self._tf_datasets\n if self._cur_minibatch != minibatch_size or self._cur_lod != lod:\n self._tf_init_ops[lod].run({self._tf_minibatch_in: minibatch_size})\n self._cur_minibatch = minibatch_size\n self._cur_lod = lod\n\n # Get next minibatch as TensorFlow expressions.\n def get_minibatch_tf(self):\n images, labels = self._tf_iterator.get_next()\n if self.mirror_augment:\n images = tf.cast(images, tf.float32)\n images = tf.where(tf.random_uniform([tf.shape(images)[0]]) < 0.5, images, tf.reverse(images, [3]))\n images = tf.cast(images, self.dtype)\n return images, labels\n\n # Get next minibatch as NumPy arrays.\n def get_minibatch_np(self, minibatch_size, lod=0): # => (images, labels) or (None, None)\n self.configure(minibatch_size, lod)\n if self._tf_minibatch_np is None:\n with tf.name_scope('Dataset'):\n self._tf_minibatch_np = self.get_minibatch_tf()\n try:\n return tflib.run(self._tf_minibatch_np)\n except tf.errors.OutOfRangeError:\n return None, None\n\n # Get random labels as TensorFlow expression.\n def get_random_labels_tf(self, minibatch_size): # => labels\n with tf.name_scope('Dataset'):\n if self.label_size > 0:\n with tf.device('/cpu:0'):\n return tf.gather(self._tf_labels_var, tf.random_uniform([minibatch_size], 0, self._np_labels.shape[0], dtype=tf.int32))\n return tf.zeros([minibatch_size, 0], self.label_dtype)\n\n # Get random labels as NumPy array.\n def get_random_labels_np(self, minibatch_size): # => labels\n if self.label_size > 0:\n return self._np_labels[np.random.randint(self._np_labels.shape[0], size=[minibatch_size])]\n return np.zeros([minibatch_size, 0], self.label_dtype)\n\n # Load validation set as NumPy array.\n def load_validation_set_np(self):\n images = []\n labels = []\n if self.has_validation_set:\n validation_set = TFRecordDataset(\n tfrecord_dir=self.tfrecord_dir, resolution=self.shape[2], max_label_size=self.label_size,\n max_images=self._max_validation, repeat=False, shuffle=False, prefetch_mb=0, _is_validation=True)\n validation_set.configure(1)\n while True:\n image, label = validation_set.get_minibatch_np(1)\n if image is None:\n break\n images.append(image)\n labels.append(label)\n images = np.concatenate(images, axis=0) if len(images) else np.zeros([0] + self.shape, dtype=self.dtype)\n labels = np.concatenate(labels, axis=0) if len(labels) else np.zeros([0, self.label_size], self.label_dtype)\n assert list(images.shape[1:]) == self.shape\n assert labels.shape[1] == self.label_size\n assert images.shape[0] <= self._max_validation\n return images, labels\n\n # Parse individual image from a tfrecords file into TensorFlow expression.\n @staticmethod\n def parse_tfrecord_tf(record):\n features = tf.parse_single_example(record, features={\n 'shape': tf.FixedLenFeature([3], tf.int64),\n 'data': tf.FixedLenFeature([], tf.string)})\n data = tf.decode_raw(features['data'], tf.uint8)\n return tf.reshape(data, features['shape'])\n\n # Parse individual image from a tfrecords file into NumPy array.\n @staticmethod\n def parse_tfrecord_np(record):\n ex = tf.train.Example()\n ex.ParseFromString(record)\n shape = ex.features.feature['shape'].int64_list.value # pylint: disable=no-member\n data = ex.features.feature['data'].bytes_list.value[0] # pylint: disable=no-member\n return np.fromstring(data, np.uint8).reshape(shape)\n\n#----------------------------------------------------------------------------\n# Construct a dataset object using the given options.\n\ndef load_dataset(path=None, resolution=None, max_images=None, max_label_size=0, mirror_augment=False, repeat=True, shuffle=True, seed=None):\n _ = seed\n assert os.path.isdir(path)\n return TFRecordDataset(\n tfrecord_dir=path,\n resolution=resolution, max_images=max_images, max_label_size=max_label_size,\n mirror_augment=mirror_augment, repeat=repeat, shuffle=shuffle)\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.log2", "numpy.dtype", "numpy.concatenate", "numpy.fromstring", "numpy.floor", "numpy.prod", "numpy.load", "numpy.zeros", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
junwon1994/Coursera-ML
[ "91e96c3c14c058cd6d745a4fada1baf40d91458f" ]
[ "ex7/plotDataPoints.py" ]
[ "import matplotlib.pyplot as plt\nfrom show import show\n\n\ndef plotDataPoints(X, idx):\n \"\"\"plots data points in X, coloring them so that those\n with the same index assignments in idx have the same color\n \"\"\"\n # Create palette\n # palette = hsv(K + 1)\n # colors = palette(idx, :)\n #\n # # Plot the data\n\n cmap = plt.get_cmap(\"jet\")\n idxn = idx.astype('float') / max(idx.astype('float'))\n colors = cmap(idxn)\n plt.scatter(\n X[:, 0],\n X[:, 1],\n 15,\n edgecolors=colors,\n marker='o',\n facecolors='none',\n lw=0.5)\n" ]
[ [ "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.scatter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pzivich/MossSpider
[ "43cb6d22959afb47a9862f73754965473f42ddc1" ]
[ "mossspider/estimators/utils.py" ]
[ "import warnings\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport statsmodels.api as sm\n\n\ndef probability_to_odds(prob):\n \"\"\"Converts given probability (proportion) to odds\n\n Parameters\n ----------\n prob : float, array\n Probability or array of probabilities to convert to odds\n \"\"\"\n return prob / (1 - prob)\n\n\ndef odds_to_probability(odds):\n \"\"\"Converts given odds to probability\n\n Parameters\n ----------\n odds : float, array\n Odds or array of odds to convert to probabilities\n \"\"\"\n return odds / (1 + odds)\n\n\ndef exp_map(graph, var):\n \"\"\"Slow implementation of the exposure mapping functionality. Only supports the sum summary measure.\n Still used by the dgm files.\n\n Note\n ----\n Depreciated and no longer actively used by any functions.\n\n Parameters\n ----------\n graph : networkx.Graph\n Network to calculate the summary measure for.\n var : str\n Variable in the graph to calculate the summary measure for\n\n Returns\n -------\n array\n One dimensional array of calculated summary measure\n \"\"\"\n # get adjacency matrix\n matrix = nx.adjacency_matrix(graph, weight=None)\n # get node attributes\n y_vector = np.array(list(nx.get_node_attributes(graph, name=var).values()))\n # multiply the weight matrix by node attributes\n wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()\n return np.asarray(wy_matrix).flatten() # I hate converting between arrays and matrices...\n\n\ndef fast_exp_map(matrix, y_vector, measure):\n r\"\"\"Improved (computation-speed-wise) implementation of the exposure mapping functionality. Further supports a\n variety of summary measures. This is accomplished by using the adjacency matrix and vectors to efficiently\n calculate the summary measures (hence the function name). This is an improvement on previous iterations of this\n function.\n\n Available summary measures are\n\n Sum (``'sum'``) :\n\n .. math::\n\n X_i^s = \\sum_{j=1}^n X_j \\mathcal{G}_{ij}\n\n Mean (``'mean'``) :\n\n .. math::\n\n X_i^s = \\sum_{j=1}^n X_j \\mathcal{G}_{ij} / \\sum_{j=1}^n \\mathcal{G}_{ij}\n\n Variance (``'var'``):\n\n .. math::\n\n \\bar{X}_j = \\sum_{j=1}^n X_j \\mathcal{G}_{ij} \\\\\n X_i^s = \\sum_{j=1}^n (X_j - \\bar{X}_j)^2 \\mathcal{G}_{ij} / \\sum_{j=1}^n \\mathcal{G}_{ij}\n\n Mean distance (``'mean_dist'``) :\n\n .. math::\n\n X_i^s = \\sum_{j=1}^n (X_i - X_j) \\mathcal{G}_{ij} / \\sum_{j=1}^n \\mathcal{G}_{ij}\n\n Variance distance (``'var_dist'``) :\n\n .. math::\n\n \\bar{X}_{ij} = \\sum_{j=1}^n (X_i - X_j) \\mathcal{G}_{ij} \\\\\n X_i^s = \\sum_{j=1}^n ((X_j - X_j) - \\bar{X}_{ij})^2 \\mathcal{G}_{ij} / \\sum_{j=1}^n \\mathcal{G}_{ij}\n\n Note\n ----\n If you would like other summary measures to be added or made available, please reach out via GitHub.\n\n Parameters\n ----------\n matrix : array\n Adjacency matrix. Should be extract from a ``networkx.Graph`` via ``nx.adjacency_matrix(...)``\n y_vector : array\n Array of the variable to calculate the summary measure for. Should be in same order as ``matrix`` for\n calculation to work as intended.\n measure : str\n Summary measure to calculate. Options are provided above.\n\n Returns\n -------\n array\n One dimensional array of calculated summary measure\n \"\"\"\n if measure.lower() == 'sum':\n # multiply the weight matrix by node attributes\n wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()\n return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...\n elif measure.lower() == 'mean':\n rowsum_vector = np.sum(matrix, axis=1) # calculate row-sum (denominator / degree)\n with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)\n warnings.simplefilter('ignore', RuntimeWarning)\n weight_matrix = matrix / rowsum_vector.reshape((matrix.shape[0]), 1) # calculate each nodes weight\n wy_matrix = weight_matrix * y_vector.reshape((matrix.shape[0]), 1) # multiply matrix by node attributes\n return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...\n elif measure.lower() == 'var':\n a = matrix.toarray() # Convert matrix to array\n a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's\n with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)\n warnings.simplefilter('ignore', RuntimeWarning)\n return np.nanvar(a * y_vector, axis=1)\n elif measure.lower() == 'mean_dist':\n a = matrix.toarray() # Convert matrix to array\n a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's\n c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)\n with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)\n warnings.simplefilter('ignore', RuntimeWarning)\n return np.nanmean(c.transpose(), # back-transpose\n axis=1)\n elif measure.lower() == 'var_dist':\n a = matrix.toarray() # Convert matrix to array\n a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's\n c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)\n with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)\n warnings.simplefilter('ignore', RuntimeWarning)\n return np.nanvar(c.transpose(), # back-transpose\n axis=1)\n else:\n raise ValueError(\"The summary measure mapping\" + str(measure) + \"is not available\")\n\n\ndef exp_map_individual(network, variable, max_degree):\n \"\"\"Summary measure calculate for the non-parametric mapping approach described in Sofrygin & van der Laan (2017).\n This approach works best for networks with uniform degree distributions. This summary measure generates a number\n of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep\n all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree.\n\n Parameters\n ----------\n network : networkx.Graph\n The NetworkX graph object to calculate the summary measure for.\n variable : str\n Variable to calculate the summary measure for (this will always be the exposure variable internally).\n max_degree : int\n Maximum degree in the network (defines the number of columns to generate).\n\n Returns\n -------\n dataframe\n Data set containing all generated columns\n \"\"\"\n attrs = []\n for i in network.nodes:\n j_attrs = []\n for j in network.neighbors(i):\n j_attrs.append(network.nodes[j][variable])\n attrs.append(j_attrs[:max_degree])\n\n return pd.DataFrame(attrs,\n columns=[variable+'_map'+str(x+1) for x in range(max_degree)])\n\n\ndef network_to_df(graph):\n \"\"\"Take input network and converts all node attributes to a pandas DataFrame object. This dataframe is then used\n within ``NetworkTMLE`` internally.\n\n Parameters\n ----------\n graph : networkx.Graph\n Graph with node attributes to transform into data set\n\n Returns\n -------\n dataframe\n Data set containing all node attributes\n \"\"\"\n return pd.DataFrame.from_dict(dict(graph.nodes(data=True)), orient='index')\n\n\ndef bounding(ipw, bound):\n \"\"\"Internal function to bound or truncate the estimated inverse probablity weights.\n\n Parameters\n ----------\n ipw : array\n Estimate inverse probability weights to truncate.\n bound : list, float, int, set, array\n Bounds to truncate weights by.\n\n Returns\n -------\n array\n Truncated inverse probability weights.\n \"\"\"\n if type(bound) is float or type(bound) is int: # Symmetric bounding\n if bound > 1:\n ipw = np.where(ipw > bound, bound, ipw)\n ipw = np.where(ipw < 1 / bound, 1 / bound, ipw)\n elif 0 < bound < 1:\n ipw = np.where(ipw < bound, bound, ipw)\n ipw = np.where(ipw > 1 / bound, 1 / bound, ipw)\n else:\n raise ValueError('Bound must be a positive value')\n elif type(bound) is str: # Catching string inputs\n raise ValueError('Bounds must either be a float or integer, or a collection')\n else: # Asymmetric bounds\n if bound[0] > bound[1]:\n raise ValueError('Bound thresholds must be listed in ascending order')\n if len(bound) > 2:\n warnings.warn('It looks like your specified bounds is more than two floats. Only the first two '\n 'specified bounds are used by the bound statement. So only ' +\n str(bound[0:2]) + ' will be used', UserWarning)\n if type(bound[0]) is str or type(bound[1]) is str:\n raise ValueError('Bounds must be floats or integers')\n if bound[0] < 0 or bound[1] < 0:\n raise ValueError('Both bound values must be positive values')\n ipw = np.where(ipw < bound[0], bound[0], ipw)\n ipw = np.where(ipw > bound[1], bound[1], ipw)\n return ipw\n\n\ndef outcome_learner_fitting(ml_model, xdata, ydata):\n \"\"\"Internal function to fit custom_models for the outcome nuisance model.\n\n Parameters\n ----------\n ml_model :\n Unfitted model to be fit.\n xdata : array\n Covariate data to fit the model with\n ydata : array\n Outcome data to fit the model with\n\n Returns\n -------\n Fitted user-specified model\n \"\"\"\n try:\n fm = ml_model.fit(X=xdata, y=ydata)\n except TypeError:\n raise TypeError(\"Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This \"\n \"covers both sklearn and supylearner. If there is a predictive model you would \"\n \"like to use, please open an issue at https://github.com/pzivich/zepid and I \"\n \"can work on adding support\")\n return fm\n\n\ndef outcome_learner_predict(ml_model_fit, xdata):\n \"\"\"Internal function to take a fitted custom_model for the outcome nuisance model and generate the predictions.\n\n Parameters\n ----------\n ml_model_fit :\n Fitted user-specified model\n xdata : array\n Covariate data to generate the predictions with.\n\n Returns\n -------\n array\n Predicted values for the outcome (probability if binary, and expected value otherwise)\n \"\"\"\n if hasattr(ml_model_fit, 'predict_proba'):\n g = ml_model_fit.predict_proba(xdata)\n if g.ndim == 1: # allows support for pygam.LogisticGAM\n return g\n else:\n return g[:, 1]\n elif hasattr(ml_model_fit, 'predict'):\n return ml_model_fit.predict(xdata)\n else:\n raise ValueError(\"Currently custom_model must have 'predict' or 'predict_proba' attribute\")\n\n\ndef exposure_machine_learner(ml_model, xdata, ydata, pdata):\n \"\"\"Internal function to fit custom_models for the exposure nuisance model and generate the predictions.\n\n Parameters\n ----------\n ml_model :\n Unfitted model to be fit.\n xdata : array\n Covariate data to fit the model with\n ydata : array\n Outcome data to fit the model with\n pdata : array\n Covariate data to generate the predictions with.\n\n Returns\n -------\n array\n Predicted values for the outcome (probability if binary, and expected value otherwise)\n \"\"\"\n # Fitting model\n try:\n fm = ml_model.fit(X=xdata, y=ydata)\n except TypeError:\n raise TypeError(\"Currently custom_model must have the 'fit' function with arguments 'X', 'y'. This \"\n \"covers both sklearn and supylearner. If there is a predictive model you would \"\n \"like to use, please open an issue at https://github.com/pzivich/zepid and I \"\n \"can work on adding support\")\n\n # Generating predictions\n if hasattr(fm, 'predict_proba'):\n g = fm.predict_proba(pdata)\n if g.ndim == 1: # allows support for pygam.LogisticGAM\n return g\n else:\n return g[:, 1]\n elif hasattr(fm, 'predict'):\n g = fm.predict(pdata)\n return g\n else:\n raise ValueError(\"Currently custom_model must have 'predict' or 'predict_proba' attribute\")\n\n\ndef targeting_step(y, q_init, ipw, verbose):\n r\"\"\"Estimate :math:`\\eta` via the targeting model\n\n Parameters\n ----------\n y : array\n Observed outcome values.\n q_init : array\n Predicted outcome values under the observed values of exposure.\n ipw : array\n Estimated inverse probability weights.\n verbose : bool\n Whether to print the summary details of the targeting model.\n\n Returns\n -------\n float\n Estimated value to use to target the outcome model predictions\n \"\"\"\n f = sm.families.family.Binomial()\n log = sm.GLM(y, # Outcome / dependent variable\n np.repeat(1, y.shape[0]), # Generating intercept only model\n offset=np.log(probability_to_odds(q_init)), # Offset by g-formula predictions\n freq_weights=ipw, # Weighted by calculated IPW\n family=f).fit(maxiter=500)\n\n if verbose: # Optional argument to print each intermediary result\n print('==============================================================================')\n print('Targeting Model')\n print(log.summary())\n\n return log.params[0] # Returns single-step estimated Epsilon term\n\n\ndef tmle_unit_bounds(y, mini, maxi):\n \"\"\"Bounding for continuous outcomes for TMLE.\n\n Parameters\n ----------\n y : array\n Observed outcome values\n mini : float\n Lower bound to apply\n maxi : float\n Upper bound to apply\n\n Returns\n -------\n array\n Bounded outcomes\n \"\"\"\n return (y - mini) / (maxi - mini)\n\n\ndef tmle_unit_unbound(ystar, mini, maxi):\n \"\"\"Unbound the bounded continuous outcomes for presentation of results.\n\n Parameters\n ----------\n ystar : array\n Bounded outcome values\n mini : float\n Lower bound to apply\n maxi : float\n Upper bound to apply\n\n Returns\n -------\n array\n Unbounded outcomes\n \"\"\"\n return ystar*(maxi - mini) + mini\n\n\ndef create_threshold(data, variables, thresholds):\n \"\"\"Internal function to create threshold variables given setup information.\n\n Parameters\n ----------\n data : dataframe\n Data set to calculate the measure for\n variables : list, set\n List of variable names to create the threshold variables for\n thresholds : list, set\n List of values (float or int) to create the thresholds at.\n\n Returns\n -------\n None\n \"\"\"\n for v, t in zip(variables, thresholds):\n if type(t) is float:\n label = v + '_t' + str(int(t * 100))\n else:\n label = v + '_t' + str(t)\n data[label] = np.where(data[v] > t, 1, 0)\n\n\ndef create_categorical(data, variables, bins, labels, verbose=False):\n \"\"\"\n\n Parameters\n ----------\n data : dataframe\n Data set to calculate the measure for\n variables : list, set\n List of variable names to create the threshold variables for\n bins : list, set\n List of lists of values (float or int) to create bins at.\n labels : list, set\n List of lists of labels (str) to apply as the new column names\n verbose : bool, optional\n Whether to warn the user if any NaN values occur (a result of bad or incompletely specified bins). Interally,\n this option is always set to be True (since important for user to recognize this issue).\n\n Returns\n -------\n None\n \"\"\"\n for v, b, l in zip(variables, bins, labels):\n col_label = v + '_c'\n data[col_label] = pd.cut(data[v],\n bins=b,\n labels=l,\n include_lowest=True).astype(float)\n if verbose:\n if np.any(data[col_label].isna()):\n warnings.warn(\"It looks like some of your categories have missing values when being generated on the \"\n \"input data. Please check pandas.cut to make sure the `bins` and `labels` arguments are \"\n \"being used correctly.\", UserWarning)\n" ]
[ [ "numpy.asarray", "numpy.nanvar", "pandas.cut", "numpy.repeat", "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
NeLy-EPFL/utils2p
[ "104620a55e0f5b6cf368e89131003773a5e877a4" ]
[ "utils2p/synchronization.py" ]
[ "\"\"\"\nSynchronization module\n======================\n\nThis module provides functions to process the synchronization data\nacquired with Thor Sync during imaging.\n\"\"\"\nimport warnings\nimport json\n\nimport numpy as np\nimport h5py\nimport scipy.signal\n\nimport utils2p.main as main\n\n\nclass SynchronizationError(Exception):\n \"\"\"The input data is not consistent with synchronization assumption.\"\"\"\n\n\ndef get_lines_from_h5_file(file_path, line_names):\n warnings.warn(\n \"get_lines_from_h5_file is deprecated use get_lines_from_sync_file instead\",\n DeprecationWarning)\n return get_lines_from_sync_file(file_path, line_names)\n\n\ndef get_lines_from_sync_file(file_path, line_names):\n \"\"\"\n This function returns the values of the requested lines save in\n an h5 generated by ThorSync.\n\n Parameters\n ----------\n file_path : string\n Path to h5 file.\n line_names : list of strings\n List of the ThorSync line names to be returned.\n\n Returns\n -------\n lines : tuple\n Line arrays in the same order as given in line_names.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_z_stack\")\n >>> line_names = [\"Frame Counter\", \"Capture On\"]\n >>> frame_counter, capture_on = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> type(frame_counter)\n <class 'numpy.ndarray'>\n >>> frame_counter.shape\n (54000,)\n >>> type(capture_on)\n <class 'numpy.ndarray'>\n >>> capture_on.shape\n (54000,)\n \"\"\"\n lines = []\n\n with h5py.File(file_path, \"r\") as f:\n for name in line_names:\n lines_with_this_name = []\n for line_type in (\"DI\", \"CI\", \"AI\"):\n try:\n lines_with_this_name.append(\n f[line_type][name][:].squeeze())\n except KeyError:\n pass\n if len(lines_with_this_name) == 1:\n lines.append(lines_with_this_name[0])\n elif len(lines_with_this_name) == 0:\n DI_keys = list(f[\"DI\"].keys())\n CI_keys = list(f[\"CI\"].keys())\n AI_keys = list(f[\"AI\"].keys())\n raise KeyError(\n f\"No line named '{name}' exists. The digital lines are \" +\n f\"{DI_keys}, the continuous lines are {CI_keys}, \" +\n f\"and the analogue inputs are {AI_keys}.\")\n else:\n DI_keys = list(f[\"DI\"].keys())\n CI_keys = list(f[\"CI\"].keys())\n AI_keys = list(f[\"AI\"].keys())\n raise KeyError(\n f\"Multiple lines named '{name}' exist. \" +\n f\"The digital lines are {DI_keys}, the continuous lines \" +\n + f\"are {CI_keys}, and the analogue inputs are {AI_keys}.\"\n )\n return tuple(lines)\n\n\ndef get_times(length, freq):\n \"\"\"\n This function returns the time point of each tick\n for a given sequence length and tick frequency.\n\n Parameters\n ----------\n length : int\n Length of sequence.\n freq : float\n Frequency in Hz.\n\n Returns\n -------\n times : array\n Times in seconds.\n\n Examples\n --------\n >>> import utils2p.synchronization\n >>> utils2p.synchronization.get_times(5, 20)\n array([0. , 0.05, 0.1 , 0.15, 0.2 ])\n \"\"\"\n times = np.arange(0, length / freq, 1 / freq)\n return times\n\n\ndef edges(line, size=0, correct_possible_split_edges=True):\n \"\"\"\n Returns the indices of edges in a line. An\n edge is change in value of the line. A size\n argument can be specified to filter for changes\n of specific magnitude. By default only rising\n edges (increases in value) are returned.\n\n Parameters\n ----------\n line : numpy array\n Line signal from h5 file.\n size : float or tuple\n Size of the rising edge. If float it is used as minimum.\n Tuples specify a range. To get falling edges use negative values.\n Only one boundary can be applied using np.inf as one of the values.\n All boundaries are excluding the specified value.\n correct_possible_split_edges : boolean\n The rise or fall of an edge can in some cases be spread over\n several ticks. If `True` these \"blurry\" edges are sharpened\n with :func:`utils2p.synchronization.correct_split_edges`.\n Default is True.\n\n Returns\n -------\n indices : list\n Indices of the rising edges.\n\n Examples\n --------\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> binary_line = np.array([0, 1, 1, 0, 1, 1])\n >>> utils2p.synchronization.edges(binary_line)\n (array([1, 4]),)\n >>> utils2p.synchronization.edges(binary_line, size=2)\n (array([], dtype=int64),)\n >>> utils2p.synchronization.edges(binary_line, size=(-np.inf, np.inf))\n (array([1, 3, 4]),)\n >>> continuous_line = np.array([0, 0, 3, 3, 3, 5, 5, 8, 8, 10, 10, 10])\n >>> utils2p.synchronization.edges(continuous_line)\n (array([2, 5, 7, 9]),)\n >>> utils2p.synchronization.edges(continuous_line, size=2)\n (array([2, 7]),)\n >>> utils2p.synchronization.edges(continuous_line, size=(-np.inf, 3))\n (array([5, 9]),)\n \"\"\"\n if correct_possible_split_edges:\n line = correct_split_edges(line)\n diff = np.diff(line.astype(np.float64))\n if isinstance(size, tuple):\n zero_elements = np.isclose(diff, np.zeros_like(diff))\n edges_in_range = np.logical_and(diff > size[0], diff < size[1])\n valid_edges = np.logical_and(edges_in_range,\n np.logical_not(zero_elements))\n indices = np.where(valid_edges)\n else:\n indices = np.where(diff > size)\n indices = tuple(i + 1 for i in indices)\n return indices\n\n\ndef correct_split_edges(line):\n \"\"\"\n This function corrects edges that are spread over multiple ticks.\n\n Parameters\n ----------\n line : numpy array\n The line for which the edges should be corrected.\n\n Returns\n -------\n line : numpy array\n Line with corrected edges.\n\n Examples\n --------\n >>> import numpy as np\n >>> import utils2p.synchronization\n >>> line = np.array([0, 0, 0, 1, 2, 3, 3, 3, 2, 1, 0, 0, 0])\n >>> utils2p.synchronization.correct_split_edges(line)\n array([0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0])\n \"\"\"\n rising_edges = np.where(np.diff(line) > 0)[0] + 1\n falling_edges = np.where(np.diff(line) < 0)[0]\n\n split_rising_edges = np.where(np.diff(rising_edges) == 1)[0]\n split_falling_edges = np.where(np.diff(falling_edges) == 1)[0]\n\n if len(split_rising_edges) == 0 and len(split_falling_edges) == 0:\n return line\n\n first_halfs_rising = rising_edges[split_rising_edges]\n second_halfs_rising = rising_edges[split_rising_edges + 1]\n line[first_halfs_rising] = line[second_halfs_rising]\n\n first_halfs_falling = falling_edges[split_falling_edges]\n second_halfs_falling = falling_edges[split_falling_edges + 1]\n line[second_halfs_falling] = line[first_halfs_falling]\n\n # Recursive to get edges spread over more than two ticks\n return correct_split_edges(line)\n\n\ndef get_start_times(line, times, zero_based_counter=False):\n \"\"\"\n Get the start times of a digital signal,\n i.e. the times of the rising edges.\n If the line is a zero based counter, such as the processed \n `frame_counter` or the processed `cam_line`, there is a\n possibility that the first element in line is already zero.\n This corresponds to the case where the acquisition of the\n first frame was triggered before ThorSync started.\n If `zero_based_counter` is `False` this frame will be\n dropped, i.e. no time for the frame is returned, since\n there is no rising edge corresponding to the frame.\n\n Parameters\n ----------\n line : numpy array\n Line signal from h5 file.\n times : numpy array\n Times returned by :func:`utils2p.synchronization.get_times`\n zero_based_counter : boolean\n Indicates whether the line is a zero based counter.\n\n Returns\n -------\n time_points : list\n List of the start times.\n\n Examples\n --------\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> binary_line = np.array([0, 1, 1, 0, 1, 1])\n >>> times = utils2p.synchronization.get_times(len(binary_line), freq=20)\n >>> times\n array([0. , 0.05, 0.1 , 0.15, 0.2 , 0.25])\n >>> utils2p.synchronization.get_start_times(binary_line, times)\n array([0.05, 0.2 ])\n \"\"\"\n indices = edges(line, size=(0, np.inf))\n if zero_based_counter and line[0] >= 0:\n if line[0] > 0:\n warnings.warn(f\"The counter start with value {line[0]}\")\n indices_with_first_frame = np.zeros(len(indices[0]) + 1, dtype=int)\n indices_with_first_frame[1:] = indices[0]\n indices = (indices_with_first_frame, )\n time_points = times[indices]\n return time_points\n\n\ndef _capture_metadata(n_frames, dropped_frames=None):\n \"\"\"\n Returns a dictionary as it is usually saved by the seven\n camera setup in the \"capture_metadata.json\" file.\n It assumes that no frames where dropped.\n\n Parameters\n ----------\n n_frames : list of integers\n Number of frames for each camera.\n dropped_frames : list of list of integers\n Frames that were dropped for each camera.\n Default is None which means no frames where\n dropped.\n\n Returns\n -------\n capture_info : dict\n Default metadata dictionary for the seven camera\n system.\n \"\"\"\n if dropped_frames is None:\n dropped_frames = [[] for i in range(len(n_frames))]\n capture_info = {\"Frame Counts\": {}}\n for cam_idx, n in enumerate(n_frames):\n frames_dict = {}\n current_frame = 0\n for i in range(n):\n while current_frame in dropped_frames[cam_idx]:\n current_frame += 1\n frames_dict[str(i)] = current_frame\n current_frame += 1\n capture_info[\"Frame Counts\"][str(cam_idx)] = frames_dict\n return capture_info\n\n\ndef process_cam_line(line, seven_camera_metadata):\n \"\"\"\n Removes superfluous signals and uses frame numbers in array.\n The cam line signal form the h5 file is a binary sequence.\n Rising edges mark the acquisition of a new frame.\n The setup keeps producing rising edges after the acquisition of the\n last frame. These rising edges are ignored.\n This function converts the binary line to frame numbers using the\n information stored in the metadata file of the seven camera setup.\n In the metadata file the keys are the indices of the file names\n and the values are the grabbed frame numbers. Suppose the 3\n frame was dropped. Then the entries in the dictionary will\n be as follows:\n \"2\": 2\n \"3\": 4\n \"4\": 5\n\n Parameters\n ----------\n line : numpy array\n Line signal from h5 file.\n seven_camera_metadata : string\n Path to the json file saved by our camera software.\n This file is usually located in the same folder as the frames\n and is called 'capture_metadata.json'. If None, it is assumed\n that no frames were dropped.\n\n Returns\n -------\n processed_line : numpy array\n Array with frame number for each time point.\n If no frame is available for a given time,\n the value is -9223372036854775808.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> seven_camera_metadata = utils2p.find_seven_camera_metadata_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"Basler\"]\n >>> (cam_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> set(np.diff(cam_line))\n {0, 8, 4294967288}\n >>> processed_cam_line = utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata)\n >>> set(np.diff(processed_cam_line))\n {0, 1, -9223372036854775808, 9223372036854775749}\n >>> cam_line = np.array([0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0])\n >>> utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata=None)\n array([-9223372036854775808, 0, 0,\n 0, 0, 0,\n 1, 1, 1,\n 1, 1])\n \"\"\"\n # Check that sequence is binary\n if len(set(line)) > 2:\n raise ValueError(\"Invalid line argument. Sequence is not binary.\")\n\n # Find indices of the start of each frame acquisition\n rising_edges = edges(line, (0, np.inf))[0]\n\n # Load capture metadata or generate default\n if seven_camera_metadata is not None:\n with open(seven_camera_metadata, \"r\") as f:\n capture_info = json.load(f)\n else:\n capture_info = _capture_metadata([\n len(rising_edges),\n ])\n\n # Find the number of frames for each camera\n n_frames = []\n for cam_idx in capture_info[\"Frame Counts\"].keys():\n max_in_json = max(capture_info[\"Frame Counts\"][cam_idx].values())\n n_frames.append(max_in_json + 1)\n\n # Ensure all cameras acquired the same number of frames\n if len(np.unique(n_frames)) > 1:\n raise SynchronizationError(\n \"The frames across cameras are not synchronized.\")\n\n # Last rising edge that corresponds to a frame\n last_tick = max(n_frames)\n\n # check that there is a rising edge for every frame\n if len(rising_edges) < last_tick:\n raise ValueError(\n \"The provided cam line and metadata are inconsistent. \" +\n \"cam line has less frame acquisitions than metadata.\")\n\n # Ensure correct handling if no rising edges are present after last frame\n if len(rising_edges) == int(last_tick):\n average_frame_length = int(np.mean(np.diff(rising_edges)))\n last_rising_edge = rising_edges[-1]\n additional_edge = last_rising_edge + average_frame_length\n if additional_edge > len(line):\n additional_edge = len(line)\n rising_edges = list(rising_edges)\n rising_edges.append(additional_edge)\n rising_edges = np.array(rising_edges)\n\n processed_line = np.ones_like(line) * np.nan\n\n current_frame = 0\n first_camera_used = sorted(list(capture_info[\"Frame Counts\"].keys()))[0]\n for i, (start, stop) in enumerate(\n zip(rising_edges[:last_tick], rising_edges[1:last_tick + 1])):\n if capture_info[\"Frame Counts\"][first_camera_used][str(current_frame +\n 1)] <= i:\n current_frame += 1\n processed_line[start:stop] = current_frame\n return processed_line.astype(np.int)\n\n\ndef process_frame_counter(line, metadata=None, steps_per_frame=None):\n \"\"\"\n Converts the frame counter line to an array with frame numbers for each\n time point.\n\n Parameters\n ----------\n line : numpy array\n Line signal from h5 file.\n metadata : :class:`utils2p.Metadata`\n :class:`utils2p.Metadata` object that holding the 2p imaging\n metadata for the experiment. Optional. If metadata is not\n given steps_per_frame has to be set.\n steps_per_frame : int\n Number of steps the frame counter takes per frame.\n This includes fly back frames and averaging, i.e. if you\n acquire one frame and flyback frames is set to 3 this number\n should be 4.\n\n Returns\n -------\n processed_frame_counter : numpy array\n Array with frame number for each time point.\n If no frame was recorded at a time point, \n the value is -9223372036854775808.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_z_stack\")\n >>> line_names = [\"Frame Counter\",]\n >>> (frame_counter,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> set(frame_counter)\n {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}\n >>> metadata_file = utils2p.find_metadata_file(\"data/mouse_kidney_z_stack\")\n >>> metadata = utils2p.Metadata(metadata_file)\n >>> processed_frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, metadata)\n >>> set(processed_frame_counter)\n {0, -9223372036854775808}\n >>> steps_per_frame = metadata.get_n_z() * metadata.get_n_averaging()\n >>> steps_per_frame\n 30\n >>> processed_frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=steps_per_frame)\n >>> set(processed_frame_counter)\n {0, -9223372036854775808}\n\n By default the function treat volumes as frames.\n If you want to treat every slice of the volume as a separate frame,\n you can do so by `steps_per_frame`. The example has three steps in z.\n \n >>> steps_per_frame = metadata.get_n_averaging()\n >>> steps_per_frame\n 10\n >>> processed_frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=steps_per_frame)\n >>> set(processed_frame_counter)\n {0, 1, 2, -9223372036854775808}\n \"\"\"\n if metadata is not None and steps_per_frame is not None:\n warnings.warn(\"metadata argument will be ignored \" +\n \"because steps_per_frame argument was set.\")\n if metadata is not None and not isinstance(metadata, main.Metadata):\n raise TypeError(\n \"metadata argument must be of type utils2p.Metadata or None.\")\n if steps_per_frame is not None and not isinstance(steps_per_frame, int):\n raise TypeError(f\"steps_per_frame has to be of type int not {type(steps_per_frame)}\")\n\n if metadata is not None and steps_per_frame is None:\n if metadata.get_value(\"Streaming\", \"zFastEnable\") == \"0\":\n steps_per_frame = 1\n else:\n steps_per_frame = metadata.get_n_z()\n if metadata.get_value(\"Streaming\", \"enable\") == \"1\":\n steps_per_frame += metadata.get_n_flyback_frames()\n if metadata.get_value(\n \"LSM\",\n \"averageMode\") == \"1\" and metadata.get_area_mode() not in [\n \"line\", \"kymograph\"\n ]:\n steps_per_frame = steps_per_frame * metadata.get_n_averaging()\n elif steps_per_frame is None:\n raise ValueError(\"If no metadata object is given, \" +\n \"the steps_per_frame argument has to be set.\")\n\n processed_frame_counter = np.ones_like(line) * np.nan\n rising_edges = edges(line, (0, np.inf))[0]\n\n # Case of one frame/volume only\n if len(rising_edges) <= steps_per_frame:\n processed_frame_counter[rising_edges[0]:] = 0\n return processed_frame_counter.astype(np.int)\n\n for i, index in enumerate(\n range(0,\n len(rising_edges) - steps_per_frame, steps_per_frame)):\n processed_frame_counter[\n rising_edges[index]:rising_edges[index + steps_per_frame]] = i\n processed_frame_counter[rising_edges[-1 * steps_per_frame]:] = (\n processed_frame_counter[rising_edges[-1 * steps_per_frame] - 1] + 1)\n return processed_frame_counter.astype(np.int)\n\n\ndef process_stimulus_line(line):\n \"\"\"\n This function converts the stimulus line to an array with\n 0s and 1s for stimulus off and on respectively. The raw\n stimulus line can contain values larger than 1.\n\n Parameters\n ----------\n line : numpy array\n Line signal from h5 file.\n\n Returns\n -------\n processed_frame_counter : numpy array\n Array with binary stimulus state for each time point.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"CO2_Stim\"]\n >>> (stimulus_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> set(stimulus_line)\n {0, 4}\n >>> processed_stimulus_line = utils2p.synchronization.process_stimulus_line(stimulus_line)\n >>> set(processed_stimulus_line)\n {0, 1}\n \"\"\"\n processed_stimulus_line = np.zeros_like(line)\n indices = np.where(line > 0)\n processed_stimulus_line[indices] = 1\n return processed_stimulus_line.astype(np.int)\n\n\ndef process_optical_flow_line(line):\n \"\"\"\n This function converts the optical flow line\n into a step function. The value corresponds\n to the index of optical flow value at this\n time point. If the value is -9223372036854775808, no optical flow\n value was recorded for this time point.\n\n Note: Due to the time it takes to transfer the data\n from the Arduino to the computer it is possible that\n the last optical flow data point is missing, i.e.\n the processed optical flow line indicates one more\n data point than the text file contains. This can be\n solved by cropping all lines before the acquisition\n of the last optical flow data point. Lines can be\n cropped with :func:`crop_lines`.\n\n Parameters\n ----------\n line : numpy array\n Line signal for h5 file.\n\n Returns\n -------\n processed_optical_flow_line : numpy array\n Array with monotonically increasing step\n function.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"OpFlow\"]\n >>> (optical_flow_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> set(optical_flow_line)\n {0, 16}\n >>> processed_optical_flow_line = utils2p.synchronization.process_optical_flow_line(optical_flow_line)\n >>> len(set(processed_optical_flow_line))\n 1409\n \"\"\"\n processed_optical_flow_line = np.ones_like(line) * np.nan\n rising_edges = edges(line, (0, np.inf))[0]\n for i in range(0, len(rising_edges) - 1):\n processed_optical_flow_line[rising_edges[i]:rising_edges[i + 1]] = i\n processed_optical_flow_line[rising_edges[-1]:] = (\n processed_optical_flow_line[rising_edges[-1] - 1] + 1)\n return processed_optical_flow_line.astype(np.int)\n\n\ndef crop_lines(mask, lines):\n \"\"\"\n This function crops all lines based on a binary signal/mask.\n The 'Capture On' line of the h5 file can be used as a mask.\n\n Parameters\n ----------\n mask : numpy array\n Mask that is used for cropping.\n lines : list of numpy arrays\n List of the lines that should be cropped.\n\n Returns\n -------\n cropped_lines : tuple of numpy arrays\n Tuple of cropped lines in same order as in input list.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"Frame Counter\", \"Capture On\", \"CO2_Stim\", \"OpFlow\"]\n >>> (frame_counter, capture_on, stimulus_line, optical_flow_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=4)\n >>> len(frame_counter), len(capture_on), len(stimulus_line), len(optical_flow_line)\n (117000, 117000, 117000, 117000)\n >>> mask = np.logical_and(frame_counter >= 0, capture_on)\n >>> np.sum(mask)\n 105869\n >>> (frame_counter, capture_on, stimulus_line, optical_flow_line,) = utils2p.synchronization.crop_lines(mask, (frame_counter, capture_on, stimulus_line, optical_flow_line,))\n >>> len(frame_counter), len(capture_on), len(stimulus_line), len(optical_flow_line)\n (105869, 105869, 105869, 105869)\n >>> line = np.arange(10)\n >>> mask = np.ones(10, dtype=np.bool)\n >>> mask[0] = False\n >>> mask[-1] = False\n >>> mask[4] = False\n >>> utils2p.synchronization.crop_lines(mask, (line,))\n (array([1, 2, 3, 4, 5, 6, 7, 8]),)\n \"\"\"\n indices = np.where(mask)[0]\n first_idx = indices[0]\n last_idx = indices[-1]\n cropped_lines = []\n for line in lines:\n cropped_lines.append(line[first_idx:last_idx + 1])\n return tuple(cropped_lines)\n\n\ndef beh_idx_to_2p_idx(beh_indices, cam_line, frame_counter):\n \"\"\"\n This functions converts behaviour frame numbers into the corresponding\n 2p frame numbers.\n\n Parameters\n ----------\n beh_indices : numpy array\n Indices of the behaviour frames to be converted.\n cam_line : numpy array\n Processed cam line.\n frame_counter : numpy array\n Processed frame counter.\n\n Returns\n -------\n indices_2p : numpy array\n Corresponding 2p frame indices.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"Frame Counter\", \"Basler\"]\n >>> (frame_counter, cam_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=4)\n >>> seven_camera_metadata = utils2p.find_seven_camera_metadata_file(\"data/mouse_kidney_raw\")\n >>> cam_line = utils2p.synchronization.process_cam_line(cam_line, seven_camera_metadata)\n >>> utils2p.synchronization.beh_idx_to_2p_idx(np.array([0,]), cam_line, frame_counter)\n array([-9223372036854775808])\n >>> utils2p.synchronization.beh_idx_to_2p_idx(np.array([10,]), cam_line, frame_counter)\n array([0])\n >>> utils2p.synchronization.beh_idx_to_2p_idx(np.arange(30), cam_line, frame_counter)\n array([-9223372036854775808, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 0, 0,\n 0, 1, 1,\n 1, 1, 1,\n 1, 1, 1])\n \"\"\"\n thor_sync_indices = edges(cam_line)[0]\n if not cam_line[0] < 0:\n thor_sync_indices = np.append(np.array([0]), thor_sync_indices)\n\n indices_2p = np.ones(len(beh_indices), dtype=np.int) * np.nan\n\n first_frame_of_cam_line = np.min(cam_line[np.where(cam_line >= 0)])\n\n for i, frame_num in enumerate(beh_indices):\n\n # This is necessary for cropped lines that don't start at 0\n frame_num = frame_num - first_frame_of_cam_line\n if frame_num < 0:\n raise ValueError(f\"{frame_num + first_frame_of_cam_line} is smaller than first frame in cam_line ({first_frame_of_cam_line})\")\n\n thor_sync_index = thor_sync_indices[frame_num]\n indices_2p[i] = frame_counter[thor_sync_index]\n\n return indices_2p.astype(np.int)\n\n\ndef reduce_during_2p_frame(frame_counter, values, function):\n \"\"\"\n Reduces all values occurring during the acquisition of a\n 2-photon frame to a single value using the `function`\n given by the user.\n\n Parameters\n ----------\n frame_counter : numpy array\n Processed frame counter.\n values : numpy array\n Values upsampled to the frequency of ThorSync,\n i.e. 1D numpy array of the same length as\n `frame_counter`.\n function : function\n Function used to reduce the value,\n e.g. np.mean.\n\n Returns\n -------\n reduced : numpy array\n Numpy array with value for each 2p frame.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"Frame Counter\", \"CO2_Stim\"]\n >>> (frame_counter, stimulus_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=1)\n >>> stimulus_line = utils2p.synchronization.process_stimulus_line(stimulus_line)\n >>> np.max(frame_counter)\n 4\n >>> stimulus_during_2p_frames = utils2p.synchronization.reduce_during_2p_frame(frame_counter, stimulus_line, np.mean)\n >>> len(stimulus_during_2p_frames)\n 5\n >>> np.max(stimulus_during_2p_frames)\n 0.7136134613556422\n >>> stimulus_during_2p_frames = utils2p.synchronization.reduce_during_2p_frame(frame_counter, stimulus_line, np.max)\n >>> len(stimulus_during_2p_frames)\n 5\n >>> set(stimulus_during_2p_frames)\n {0.0, 1.0}\n \"\"\"\n warnings.warn(\n \"reduce_during_2p_frame is deprecated use reduce_during_frame instead\",\n DeprecationWarning)\n return reduce_during_frame(frame_counter, values, function)\n\n\ndef reduce_during_frame(line, values, function):\n \"\"\"\n Reduces all values occurring during the acquisition of a\n frame to a single value using the `function` given by the user.\n The line function should be of the resolution of\n the ThorSync ticks and have the frame index as values.\n Possible choices are the processed frame_counter line or the\n processed cam_line.\n\n Parameters\n ----------\n line : numpy array\n Line holding frame indices.\n values : numpy array\n Values upsampled to the frequency of ThorSync,\n i.e. 1D numpy array of the same length as\n `frame_counter`.\n function : function\n Function used to reduce the value,\n e.g. np.mean.\n\n Returns\n -------\n reduced : numpy array\n Numpy array with value for each 2p frame.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> import numpy as np\n >>> h5_file = utils2p.find_sync_file(\"data/mouse_kidney_raw\")\n >>> line_names = [\"Frame Counter\", \"CO2_Stim\"]\n >>> (frame_counter, stimulus_line,) = utils2p.synchronization.get_lines_from_h5_file(h5_file, line_names)\n >>> frame_counter = utils2p.synchronization.process_frame_counter(frame_counter, steps_per_frame=1)\n >>> stimulus_line = utils2p.synchronization.process_stimulus_line(stimulus_line)\n >>> np.max(frame_counter)\n 4\n >>> stimulus_during_2p_frames = utils2p.synchronization.reduce_during_frame(frame_counter, stimulus_line, np.mean)\n >>> len(stimulus_during_2p_frames)\n 5\n >>> np.max(stimulus_during_2p_frames)\n 0.7136134613556422\n >>> stimulus_during_2p_frames = utils2p.synchronization.reduce_during_frame(frame_counter, stimulus_line, np.max)\n >>> len(stimulus_during_2p_frames)\n 5\n >>> set(stimulus_during_2p_frames)\n {0.0, 1.0}\n \"\"\"\n if len(line) != len(values):\n raise ValueError(\"line and values need to have the same length.\")\n\n thor_sync_indices = tuple(edges(line, (0, np.inf))[0])\n\n starts = thor_sync_indices\n stops = thor_sync_indices[1:] + (len(line), )\n\n if not line[0] == -9223372036854775808:\n starts = (0, ) + starts\n stops = (thor_sync_indices[0], ) + stops\n\n dtype = values.dtype\n if np.issubdtype(dtype, np.number):\n dtype = np.float\n else:\n dtype = np.object\n reduced = np.empty(len(starts), dtype=dtype)\n\n for i, (start, stop) in enumerate(zip(starts, stops)):\n reduced[i] = function(values[start:stop])\n\n return reduced\n\n\nclass SyncMetadata(main._XMLFile):\n \"\"\"\n Class for managing ThorSync metadata.\n Loads metadata file 'ThorRealTimeDataSettings.xml'\n and returns the root of an ElementTree.\n\n Parameters\n ----------\n path : string\n Path to xml file.\n\n Returns\n -------\n Instance of class Metadata\n Based on given xml file.\n\n Examples\n --------\n >>> import utils2p.synchronization\n >>> metadata = utils2p.synchronization.SyncMetadata(\"data/mouse_kidney_raw/2p/Sync-025/ThorRealTimeDataSettings.xml\")\n >>> type(metadata)\n <class 'utils2p.synchronization.SyncMetadata'>\n \"\"\"\n def get_active_devices(self):\n active_devices = []\n for device in self.get_value(\"DaqDevices\", \"AcquireBoard\"):\n if device.attrib[\"active\"] == \"1\":\n active_devices.append(device)\n return active_devices\n\n def get_freq(self):\n \"\"\"\n Returns the frequency of the ThorSync\n value acquisition, i.e. the sample rate.\n\n Returns\n -------\n freq : integer\n Sample frequency in Hz.\n\n Examples\n --------\n >>> import utils2p.synchronization\n >>> metadata = utils2p.synchronization.SyncMetadata(\"data/mouse_kidney_raw/2p/Sync-025/ThorRealTimeDataSettings.xml\")\n >>> metadata.get_freq()\n 30000\n \"\"\"\n sample_rate = -1\n for device in self.get_active_devices():\n set_for_device = False\n for element in device.findall(\"SampleRate\"):\n if element.attrib[\"enable\"] == \"1\":\n if set_for_device:\n raise ValueError(\n \"Invalid metadata file. Multiple sample rates \" +\n f\"are enabled for device {device.type}\")\n if sample_rate != -1:\n raise ValueError(\"Multiple devices are enabled.\")\n sample_rate = int(element.attrib[\"rate\"])\n set_for_device = True\n return sample_rate\n\n\ndef get_processed_lines(sync_file,\n sync_metadata_file,\n metadata_2p_file,\n seven_camera_metadata_file=None):\n \"\"\"\n This function extracts all the standard lines and processes them.\n It works for both microscopes.\n\n Parameters\n ----------\n sync_file : str\n Path to the synchronization file.\n sync_metadata_file : str\n Path to the synchronization metadata file.\n metadata_2p_file : str\n Path to the ThorImage metadata file.\n seven_camera_metadata_file : str\n Path to the metadata file of the 7 camera system.\n\n Returns\n -------\n processed_lines : dictionary\n Dictionary with all processed lines.\n\n Examples\n --------\n >>> import utils2p\n >>> import utils2p.synchronization\n >>> experiment_dir = \"data/mouse_kidney_raw/\"\n >>> sync_file = utils2p.find_sync_file(experiment_dir)\n >>> metadata_file = utils2p.find_metadata_file(experiment_dir)\n >>> sync_metadata_file = utils2p.find_sync_metadata_file(experiment_dir)\n >>> seven_camera_metadata_file = utils2p.find_seven_camera_metadata_file(experiment_dir)\n >>> processed_lines = utils2p.synchronization.get_processed_lines(sync_file, sync_metadata_file, metadata_file, seven_camera_metadata_file)\n \"\"\"\n processed_lines = {}\n processed_lines[\"Capture On\"], processed_lines[\n \"Frame Counter\"] = get_lines_from_sync_file(\n sync_file, [\"Capture On\", \"Frame Counter\"])\n\n try:\n # For microscope 1\n processed_lines[\"CO2\"], processed_lines[\"Cameras\"], processed_lines[\n \"Optical flow\"] = get_lines_from_sync_file(sync_file, [\n \"CO2_Stim\",\n \"Basler\",\n \"OpFlow\",\n ])\n except KeyError:\n # For microscope 2\n processed_lines[\"CO2\"], processed_lines[\n \"Cameras\"] = get_lines_from_h5_file(sync_file, [\n \"CO2\",\n \"Cameras\",\n ])\n\n processed_lines[\"Cameras\"] = process_cam_line(processed_lines[\"Cameras\"],\n seven_camera_metadata_file)\n\n metadata_2p = main.Metadata(metadata_2p_file)\n processed_lines[\"Frame Counter\"] = process_frame_counter(\n processed_lines[\"Frame Counter\"], metadata_2p)\n\n processed_lines[\"CO2\"] = process_stimulus_line(processed_lines[\"CO2\"])\n\n if \"Optical flow\" in processed_lines.keys():\n processed_lines[\"Optical flow\"] = process_optical_flow_line(\n processed_lines[\"Optical flow\"])\n\n mask = np.logical_and(processed_lines[\"Capture On\"],\n processed_lines[\"Frame Counter\"] >= 0)\n\n # Make sure the clipping start just before the\n # acquisition of the first frame\n indices = np.where(mask)[0]\n mask[max(0, indices[0] - 1)] = True\n\n for line_name, _ in processed_lines.items():\n processed_lines[line_name] = crop_lines(mask, [\n processed_lines[line_name],\n ])[0]\n\n # Get times of ThorSync ticks\n metadata = SyncMetadata(sync_metadata_file)\n freq = metadata.get_freq()\n times = get_times(len(processed_lines[\"Frame Counter\"]), freq)\n processed_lines[\"Times\"] = times\n\n return processed_lines\n\n\ndef epoch_length_filter(line, cut_off):\n \"\"\"\n This function filters a binary based on the length\n of each event.\n\n Parameters\n ----------\n line : numpy array of type bool\n Binary trace that is filtered.\n cut_off : int\n The minimal event length. All event shorter\n than `cut_off` are set to `False`.\n\n Returns\n -------\n filtered : numpy array of type bool\n The filtered binary trace.\n \"\"\"\n diff = np.diff(np.pad(line.astype(int), 1, \"constant\", constant_values=0))\n rising_edges = np.where(diff > 0)[0]\n falling_edges = np.where(diff < 0)[0]\n epoch_length = falling_edges - rising_edges\n\n discarded_epochs = (epoch_length < cut_off)\n\n discarded_rising_edges = rising_edges[discarded_epochs]\n discarded_falling_edges = falling_edges[discarded_epochs]\n\n filtered = line.copy()\n for start, stop in zip(discarded_rising_edges, discarded_falling_edges):\n filtered[start:stop] = 0\n\n return filtered.astype(bool)\n\n\ndef process_odor_line(line,\n freq=30000,\n arduino_commands=(\n \"None\",\n \"Odor1\",\n \"Odor2\",\n \"Odor3\",\n \"Odor4\",\n \"Odor5\",\n \"Odor6\",\n \"Odor1R\",\n \"Odor2R\",\n \"Odor1L\",\n \"Odor2L\",\n \"Odor1B\",\n \"Odor2B\",\n \"WaterB\",\n \"bubbleMFC_R0\",\n \"MFC1_R2\",\n \"MFC2_L1\",\n ),\n step_size=0.2703,\n filter_only=False):\n \"\"\"\n The odor line is based on a PWM signal for the Arduino controlling the\n valves. This function applies a Butterworth filter and converts the\n resulting voltages to level indices. The corresponding the setting of the\n valves are given by the `arduino_commands` argument.\n\n Parameters\n ----------\n line : numpy array\n Unprocessed odor line from h5 file.\n freq : int\n Frequency of ThorSync. Necessary for the Butterworth filter.\n arduino_commands : list of strings\n Description of the valve settings for commands sent to arduino.\n Note: The order matters since the serial communications between\n computer and Arduino is based on the index in the list.\n This index is converted to a PWM signal that is recorded by ThorSync.\n step_size : float\n The voltage step size between different levels of the PWM. This is used\n to convert the voltage to indices.\n filter_only : bool\n If `True`, only the filtered line is returned instead of the odors\n based on the `arduino_commands`. This is useful for determining\n the `step_size`.\n\n Returns\n -------\n numpy array of strings\n \"\"\"\n b, a = scipy.signal.butter(3, 10, fs=freq)\n filtered_line = scipy.signal.filtfilt(b, a, line)\n if filter_only:\n return filtered_line\n indices = np.rint(filtered_line / step_size).astype(int)\n for index in np.unique(indices):\n mask = (indices == index)\n filtered_mask = epoch_length_filter(mask, freq)\n indices[mask & ~filtered_mask] = 0\n return np.array(arduino_commands)[indices]\n\n\ndef event_based_frame_indices(event_indicator):\n \"\"\"\n Calculates frame indices based on events.\n Frames before an event have negative numbers.\n The event onset has frame number 0 and the frames\n count up for the duration of the event.\n To be able to distinguish multiple events in the\n `event_indicator` an array with event numbers is\n returned.\n\n Parameters\n ----------\n event_indicator : numpy array of type bool\n True indicates some event happening.\n\n Returns\n -------\n event_based_indices : numpy array of type int\n Event based indices as described above.\n event_number : numpy array of type int\n Array of the same length as `event_based_indices`\n counting the number of events in event indicator.\n \"\"\"\n mask = event_indicator.astype(bool)\n inv_mask = ~mask\n inv_mask = inv_mask[::-1]\n mask = mask.astype(np.int8)\n inv_mask = inv_mask.astype(np.int8)\n mask = np.concatenate(([\n 0,\n ], mask))\n inv_mask = np.concatenate((inv_mask, [\n 0,\n ]))\n\n event_numbers = np.cumsum(np.clip(np.diff(mask), 0, None))\n inv_event_numbers = np.cumsum(np.clip(np.diff(inv_mask), 0, None))\n\n mask = mask[1:]\n inv_mask = inv_mask[:-1]\n\n # Count up from zero during the event\n event_frame_indices = np.cumsum(mask)\n inv_event_frame_indices = np.cumsum(inv_mask)\n n_events = max(event_numbers)\n for event in np.arange(1, n_events + 1):\n i = np.where(event_numbers == event)\n event_frame_indices[i] = event_frame_indices[i] - \\\n event_frame_indices[i[0][0]]\n event_frame_indices[~mask.astype(np.bool)] = 0\n\n # Count down from zero before each event\n n_inv_event = max(inv_event_numbers)\n for inv_event in np.arange(1, n_inv_event + 1):\n i = np.where(inv_event_numbers == inv_event)\n inv_event_frame_indices[i] = inv_event_frame_indices[i] - \\\n inv_event_frame_indices[i[0][0]]\n inv_event_frame_indices[~inv_mask.astype(np.bool)] = 0\n inv_event_frame_indices = -inv_event_frame_indices[::-1]\n\n event_frame_indices[~mask.astype(bool)] = inv_event_frame_indices[\n ~mask.astype(bool)]\n\n event_numbers = np.cumsum(\n -1 * np.clip(np.diff(np.concatenate(\n ([2], event_frame_indices))), -1, 0))\n # Make sure the last frames are not counted as the pre-event\n # frames of a new event\n n_events = max(event_numbers)\n last_event = np.where(event_numbers == n_events)\n if np.all(event_frame_indices[last_event] < 0):\n event_numbers[last_event] = -1\n\n return event_frame_indices, event_numbers\n" ]
[ [ "numpy.logical_not", "numpy.ones_like", "numpy.unique", "numpy.arange", "numpy.issubdtype", "numpy.rint", "numpy.cumsum", "numpy.concatenate", "numpy.all", "numpy.zeros_like", "numpy.diff", "numpy.array", "numpy.logical_and", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
billwright93/pybird
[ "32f51c831c2917bb6d402e309e28d08a31f5b655" ]
[ "cosmosis_module/pnl/bird_like.py" ]
[ "from cosmosis.datablock import names, option_section, SectionOptions\nimport numpy as np\nimport os\n\n\nclass BirdLikelihood(object):\n # I take as example TwoPointLikelihood\n # They subclass the Gaussian one, but we can't\n like_name = \"bird_like\"\n\n def __init__(self, options):\n # General options\n self.options = options\n kmin = options.get_double(\"kmin\")\n kmax = options.get_double(\"kmax\")\n self.Nl = options.get_int(\"Nl\")\n self.model = options.get_int(\"model\")\n self.data_directory = options.get_string(\"dir\")\n cov_file = options.get_string(\"cov_file\")\n\n # Load data PS and mask the relevant k\n kdata, PSdata = self.__load_data()\n\n self.k = kdata.reshape(3, -1)[0]\n self.Nk = len(self.k)\n kmask0 = np.argwhere((self.k <= kmax) & (self.k >= kmin))[:, 0]\n self.kmask = kmask0\n # print(self.kmask)\n for i in range(self.Nl - 1):\n kmaski = np.argwhere((self.k <= kmax) & (self.k >= kmin))[:, 0] + (i + 1) * self.Nk\n self.kmask = np.concatenate((self.kmask, kmaski))\n # print(self.kmask)\n self.ydata = PSdata[self.kmask]\n\n # Load data covariance, mask and invert it\n cov = np.loadtxt(os.path.join(self.data_directory, cov_file))\n # print(cov.shape)\n covred = cov[self.kmask.reshape((len(self.kmask), 1)), self.kmask]\n # print(covred.shape)\n self.invcov = np.linalg.inv(covred)\n self.chi2data = np.dot(self.ydata, np.dot(self.invcov, self.ydata))\n self.invcovdata = np.dot(self.ydata, self.invcov)\n\n # Assign priors to the bias parameters to marginalize\n self.assign_priors()\n\n # Check for BBNprior\n self.use_BBNprior = False\n try:\n self.omega_b_BBNsigma = options.get_double(\"omega_b_BBNsigma\")\n self.omega_b_BBNcenter = options.get_double(\"omega_b_BBNcenter\")\n self.use_BBNprior = True\n print ('BBN prior on omega_b: on')\n except:\n print ('BBN prior on omega_b: none')\n\n def __load_data(self):\n \"\"\"\n Helper function to read in the full data vector.\n \"\"\"\n # print(\"Load data?\")\n data_file = self.options.get_string(\"ps_file\")\n fname = os.path.join(self.data_directory, data_file)\n try:\n kPS, PSdata, _ = np.loadtxt(fname, unpack=True)\n except:\n kPS, PSdata = np.loadtxt(fname, unpack=True)\n return kPS, PSdata\n\n def assign_priors(self):\n # Assigns priors to marginalized bias parameters\n if self.Nl is 2:\n self.use_prior = True\n if self.model == 1:\n self.priors = np.array([2., 2., 8., 2., 2.])\n b3, cct, cr1, ce2, sn = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, shotnoise: %s (default)' %\n (b3, cct, cr1, ce2, sn))\n elif self.model == 2:\n self.priors = np.array([2., 2., 8., 2.])\n b3, cct, cr1, ce2 = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s (default)' % (b3, cct, cr1, ce2))\n elif self.model == 3:\n self.priors = np.array([2., 2., 8., 2., 2.]) # np.array([ 10., 4., 8., 4., 2. ])\n b3, cct, cr1, ce2, ce1 = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, ce1: %s (default)' % (b3, cct, cr1, ce2, ce1))\n elif self.model == 4:\n self.priors = np.array([2., 2., 8., 2., 2., 2.])\n b3, cct, cr1, ce2, ce1, sn = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s, ce2: %s, ce1: %s, shotnoise: %s (default)' %\n (b3, cct, cr1, ce2, ce1, sn))\n elif self.model == 5:\n self.priors = np.array([2., 2., 8.])\n b3, cct, cr1 = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1(+cr2): %s (default)' % (b3, cct, cr1))\n elif self.Nl is 3:\n self.use_prior = True\n if self.model == 1:\n self.priors = np.array([2., 2., 4., 4., 2., 2.])\n b3, cct, cr1, cr2, ce2, sn = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, shotnoise: %s (default)' %\n (b3, cct, cr1, cr2, ce2, sn))\n elif self.model == 2:\n self.priors = np.array([2., 2., 4., 4., 2.])\n b3, cct, cr1, ce2 = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s (default)' % (b3, cct, cr1, cr2, ce2))\n elif self.model == 3:\n self.priors = np.array([2., 2., 4., 4., 2., 2.]) # np.array([ 10., 4., 8., 4., 2. ])\n b3, cct, cr1, cr2, ce2, ce1 = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, ce1: %s (default)' %\n (b3, cct, cr1, cr2, ce2, ce1))\n elif self.model == 4:\n self.priors = np.array([2., 2., 4., 4., 2., 2., 2.])\n b3, cct, cr1, cr2, ce2, ce1, sn = self.priors\n print ('EFT priors: b3: %s, cct: %s, cr1: %s, cr2: %s, ce2: %s, ce1: %s, shotnoise: %s (default)' %\n (b3, cct, cr1, cr2, ce2, ce1, sn))\n self.priormat = np.diagflat(1. / self.priors**2)\n\n def biasing(self, block):\n self.knl = self.options.get_double(\"knl\")\n self.km = self.options.get_double(\"km\")\n self.nd = self.options.get_double(\"nd\")\n\n bval = [block[\"bias\", n]\n for n in (\"b1\", \"c2\", \"b3\", \"c4\", \"b5\",\n \"b6\", \"b7\", \"b8\", \"b9\", \"b10\")]\n b1 = bval[0]\n self.b1 = b1\n b2 = (bval[1] + bval[3]) / np.sqrt(2.)\n b3 = bval[2]\n b4 = (bval[1] - bval[3]) / np.sqrt(2.)\n b5 = bval[4] / self.knl**2\n b6 = bval[5] / self.km**2\n b7 = 0.\n\n # The PS are correctly read in the right shape\n self.P11l = block[names.matter_power_nl, 'P11l']\n self.Ploopl = block[names.matter_power_nl, 'Ploopl']\n self.Pctl = block[names.matter_power_nl, 'Pctl']\n self.f = block[names.growth_parameters, \"f_PS\"]\n\n b11 = np.array([b1**2, 2. * b1 * self.f, self.f**2])\n bct = np.array([2. * b1 * b5, 2. * b1 * b6, 2. * b1 * b7,\n 2. * self.f * b5, 2. * self.f * b6, 2. * self.f * b7])\n bloop = np.array([1., b1, b2, b3, b4, b1 * b1, b1 * b2, b1 * b3, b1 * b4, b2 * b2, b2 * b4, b4 * b4])\n\n Ps0 = np.einsum('b,lbx->lx', b11, self.P11l)\n Ps1 = np.einsum('b,lbx->lx', bloop, self.Ploopl) + np.einsum('b,lbx->lx', bct, self.Pctl)\n self.fullPs = Ps0 + Ps1\n # print(self.fullPs.shape)\n self.Pb3 = self.Ploopl[:, 3] + b1 * self.Ploopl[:, 7]\n\n if self.use_prior:\n self.prior = - 0.5 * (\n (bval[1] / 10.)**2 # c2\n + (bval[3] / 2.)**2 # c4\n + (bval[2] / self.priors[0])**2 # b3\n + (bval[4] / self.knl**2 / self.priors[1])**2 # cct\n + (bval[5] / self.km**2 / self.priors[2])**2 # cr1(+cr2)\n )\n if self.model <= 4:\n self.prior += 0.5 * (bval[9] / self.nd / self.km**2 / self.priors[3])**2 # ce,l2\n if self.model == 1:\n self.prior += -0.5 * (bval[7] / self.nd / self.priors[4])**2 # ce0\n if self.model == 3:\n self.prior += -0.5 * (bval[8] / self.nd / self.km**2 / self.priors[4])**2 # ce,l0\n\n def __get_Pi_for_marg(self, Pct, Pb3, b1, f, model=2):\n if self.Nl is 2:\n Pi = np.array([\n Pb3.reshape(-1), # *b3\n (2 * f * Pct[:, 0 + 3] + 2 * b1 * Pct[:, 0]).reshape(-1) / self.knl**2, # *cct\n (2 * f * Pct[:, 1 + 3] + 2 * b1 * Pct[:, 1]).reshape(-1) / self.km**2 # *cr1\n ])\n\n elif self.Nl is 3:\n Pi = np.array([\n Pb3.reshape(-1), # *b3\n (2 * f * Pct[:, 0 + 3] + 2 * b1 * Pct[:, 0]).reshape(-1) / self.knl**2, # *cct\n (2 * f * Pct[:, 1 + 3] + 2 * b1 * Pct[:, 1]).reshape(-1) / self.km**2, # *cr1\n (2 * f * Pct[:, 2 + 3] + 2 * b1 * Pct[:, 2]).reshape(-1) / self.km**2 # *cr2\n ])\n\n if model <= 4:\n kp2l2 = np.zeros(shape=(self.Nl, self.Nk))\n kp2l2[1] = self.k**2 / self.nd / self.km**2 # k^2 quad\n Pi = np.vstack([Pi, kp2l2.reshape(-1)])\n\n if model == 1:\n Onel0 = np.zeros(shape=(self.Nl, self.Nk))\n Onel0[0] = np.ones(self.Nk) / self.nd # shot-noise mono\n Pi = np.vstack([Pi, Onel0.reshape(-1)])\n elif model == 3:\n kp2l0 = np.zeros(shape=(self.Nl, self.Nk))\n kp2l0[0] = self.k**2 / self.nd / self.km**2 # k^2 mono\n Pi = np.vstack([Pi, kp2l0.reshape(-1)])\n elif model == 4:\n kp2l0 = np.zeros(shape=(self.Nl, self.Nk))\n kp2l0[0] = self.k**2 / self.nd / self.km**2 # k^2 mono\n Onel0 = np.zeros(shape=(self.Nl, self.Nk))\n Onel0[0] = np.ones(self.Nk) / self.nd # shot-noise mono\n Pi = np.vstack([Pi, kp2l0.reshape(-1), Onel0.reshape(-1)])\n # print(self.kmask.shape, Pi.shape)\n Pi = Pi[:, self.kmask]\n # print(Pi.shape)\n return Pi\n\n def do_likelihood(self, block):\n self.biasing(block)\n modelX = self.fullPs.reshape(-1)\n modelX = modelX[self.kmask]\n\n Pi = self.__get_Pi_for_marg(self.Pctl, self.Pb3, self.b1, self.f, model=self.model)\n # print(Pi.shape, self.invcov.shape)\n Covbi = np.dot(Pi, np.dot(self.invcov, Pi.T)) + self.priormat\n # print(Covbi.shape)\n Cinvbi = np.linalg.inv(Covbi)\n vectorbi = np.dot(modelX, np.dot(self.invcov, Pi.T)) - np.dot(self.invcovdata, Pi.T)\n chi2nomar = (np.dot(modelX, np.dot(self.invcov, modelX))\n - 2. * np.dot(self.invcovdata, modelX)\n + self.chi2data)\n chi2mar = -np.dot(vectorbi, np.dot(Cinvbi, vectorbi)) + np.log(np.abs(np.linalg.det(Covbi)))\n chi2 = chi2mar + chi2nomar - self.priors.shape[0] * np.log(2. * np.pi)\n\n if self.use_BBNprior:\n omb = block[names.cosmological_parameters, \"ombh2\"]\n self.prior += -0.5 * ((omb - self.omega_b_BBNcenter) / self.omega_b_BBNsigma)**2\n\n lkl = - 0.5 * chi2 + self.prior\n\n # Now save the resulting likelihood\n block[names.likelihoods, self.like_name + \"_LIKE\"] = lkl\n\n def cleanup(self):\n \"\"\"\n You can override the cleanup method if you do something \n unusual to get your data, like open a database or something.\n It is run just once, at the end of the pipeline.\n \"\"\"\n pass\n\n @classmethod\n def build_module(cls):\n\n def setup(options):\n options = SectionOptions(options)\n likelihoodCalculator = cls(options)\n return likelihoodCalculator\n\n def execute(block, config):\n likelihoodCalculator = config\n likelihoodCalculator.do_likelihood(block)\n return 0\n\n def cleanup(config):\n likelihoodCalculator = config\n likelihoodCalculator.cleanup()\n\n return setup, execute, cleanup\n\n\nsetup, execute, cleanup = BirdLikelihood.build_module()\n" ]
[ [ "numpy.dot", "numpy.log", "numpy.sqrt", "numpy.einsum", "numpy.linalg.inv", "numpy.diagflat", "numpy.argwhere", "numpy.concatenate", "numpy.ones", "numpy.linalg.det", "numpy.array", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]