repo_name
stringlengths
6
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
possible_versions
list
d-ataman/lmm
[ "83a3c2d9289e2f4cc24c03b177c81ba16e000b55" ]
[ "onmt/Samplers.py" ]
[ "from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\nimport onmt\nfrom onmt.Utils import aeq\nimport torch.distributions as tdist\n\n\nclass Sampler(nn.Module):\n \"\"\"\n The inference network based on MLP to learn the parameters of a diagonal\n Gaussian distribution and predict samples from it given an input.\n \"\"\"\n\n def __init__(self, latent_dim, hidden_size):\n super(Sampler, self).__init__()\n\n def forward(self, X, batch_size, translate):\n out = self.run_forward_pass(X, batch_size, translate)\n return out\n\n\n\nclass MLP(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(MLP, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size, bias=True) \n self.tanh = nn.Tanh()\n self.fc2 = nn.Linear(hidden_size, output_size, bias=True) \n \n def forward(self, x):\n out = self.fc1(x)\n out = self.tanh(out)\n out = self.fc2(out)\n return out\n\nclass MLP_SP(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(MLP_SP, self).__init__()\n self.fc1 = nn.Linear(input_size, hidden_size, bias=True)\n self.tanh = nn.Tanh()\n self.fc2 = nn.Linear(hidden_size, output_size, bias=True)\n self.softplus = nn.Softplus()\n\n def forward(self, x):\n out = self.fc1(x)\n out = self.tanh(out)\n out = self.fc2(out)\n out = self.softplus(out)\n return out\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nFunctions for the continuous Gaussian variable.\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nclass DiagonalGaussianSampler(Sampler):\n \"\"\"\n The inference network based on MLP to learn the parameters of a diagonal \n Gaussian distribution and predict samples from it given an input.\n \"\"\"\n\n def __init__(self, latent_dim, hidden_size):\n super(DiagonalGaussianSampler, self).__init__(latent_dim, hidden_size)\n\n self.latent_dim = latent_dim\n self.hidden_size = hidden_size\n\n self.mu = MLP(self.hidden_size, self.hidden_size//2, self.latent_dim)\n self.sigma = MLP_SP(self.hidden_size, self.hidden_size//2, self.latent_dim)\n\n\n def sample_value(self, mean, variance, batch_size):\n \"\"\"\n Produce a sample from the inferred Gaussian distribution.\n :param mean: The mean of the Gaussian.\n :param scale: The scale parameter of this Gaussian.\n :return: A random Gaussian vector.\n \"\"\"\n\n N = tdist.Normal(torch.tensor([0.0]), torch.tensor([1.0]))\n e = N.sample(sample_shape=torch.Size([batch_size, self.latent_dim]))\n return mean + variance * e.squeeze(2).cuda()\n\n def run_forward_pass(self, X, batch_size, translate):\n \"\"\"\n Method for passing the input to the inference network\n \"\"\"\n self.mean = self.mu(X)\n self.variance = self.sigma(X)\n if translate == False:\n s = self.sample_value(self.mean, self.variance, batch_size)\n return s\n else:\n return self.mean\n\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nFunctions for the discrete Kumaraswamy variables.\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef hardsigmoid(x):\n return torch.min(torch.ones_like(x), torch.max(x, torch.zeros_like(x)))\n\nclass RV:\n\n def params(self):\n raise NotImplementedError('Implement me')\n\n def sample(self, size=None):\n raise NotImplementedError('Implement me')\n \n def log_pdf(self, x):\n raise NotImplementedError('Implement me')\n \n def log_cdf(self, x):\n raise NotImplementedError('Implement me')\n \n def entropy(self):\n raise NotImplementedError('Implement me')\n \n def pdf(self, x):\n return torch.exp(self.log_pdf(x))\n \n def cdf(self, x):\n return torch.exp(self.log_cdf(x))\n\n\nclass RelaxedBinary(RV):\n \n pass\n\n\nclass Kuma(RelaxedBinary):\n\n def __init__(self, params: list):\n self.a = params[0]\n self.b = params[1]\n \n def params(self):\n return [self.a, self.b]\n\n def sample(self, size=None, eps=0.001):\n y = (2*eps - 1.) * torch.rand(size) + 1. - eps\n y = y.cuda()\n z = (1 - (1 - y).pow(1. / self.b)).pow(1. / self.a)\n return z \n\n def log_pdf(self, x):\n t1 = torch.log(self.a) + torch.log(self.b) \n t2 = (self.a - 1) * torch.log(x + 0.001)\n t3 = (self.b - 1) * torch.log(1. - torch.min(torch.pow(x, self.a), torch.tensor([0.999]).cuda()))\n return t1 + t2 + t3 \n \n def log_cdf(self, x):\n return torch.log(1. - torch.min(torch.pow((1. - torch.pow(x, self.a)), self.b), torch.tensor([0.999]).cuda()))\n\nclass StretchedVariable(RelaxedBinary):\n \n def __init__(self, dist: RelaxedBinary, support: list):\n \"\"\"\n :param dist: a RelaxedBinary variable (e.g. BinaryConcrete or Kuma)\n :param support: a pair specifying the limits of the stretched support (e.g. [-1, 2])\n we use these values to compute location = pair[0] and scale = pair[1] - pair[0] \n \"\"\"\n assert isinstance(dist, RelaxedBinary), 'I need a RelaxedBinary variable, got %s' % type(dist)\n assert support[0] < support[1], 'I need an ordered support, got %s' % support\n self._dist = dist\n self.loc = support[0]\n self.scale = support[1] - support[0]\n \n def params(self):\n return self._dist.params()\n \n def sample(self, size=None):\n # sample a relaxed binary variable\n x_ = self._dist.sample(size=size)\n # and stretch it\n return x_ * self.scale + self.loc\n \n def log_pdf(self, x):\n # shrink the stretched variable\n x_ = (x - self.loc) / self.scale\n # and assess the stretched pdf using the original pdf \n # see eq 25 (left) of Louizos et al\n return self._dist.log_pdf(x_) - torch.log(torch.tensor([self.scale]).cuda())\n \n def log_cdf(self, x):\n # shrink the stretched variable\n x_ = (x - self.loc) / self.scale\n # assess its cdf\n # see eq 25 (right) of Louizos et al\n return self._dist.log_cdf(x_)\n\n\nclass HardBinary(RV):\n \n def __init__(self, dist: StretchedVariable):\n assert isinstance(dist, StretchedVariable), 'I need a stretched variable'\n self._dist = dist\n \n def params(self):\n return self._dist.params()\n \n def sample(self, size=None):\n # sample a stretched variable\n x_ = self._dist.sample(size=size) \n # and rectify it\n return hardsigmoid(x_)\n \n def log_pdf(self, x):\n # first we fix log_pdf for 0s and 1s\n log_p = torch.where(\n x == 0., \n self._dist.log_cdf(0.), # log Q(0) \n torch.log(1. - self._dist.cdf(1.)) # log (1-Q(1))\n )\n\n # then for those that are in the open (0, 1)\n log_p = torch.where(\n torch.lt(x, 0.) * torch.lt(x, 1.),\n torch.log(self._dist.cdf(1.) - self._dist.cdf(0.)) + self._dist.log_pdf(x),\n log_p\n )\n # see eq 26 of Louizos et al\n return log_p\n \n def log_cdf(self, x): \n log_c = torch.where(\n torch.lt(x, 1.), \n self._dist.log_cdf(x),\n torch.full_like(x, 0.) # all of the mass\n )\n return log_c\n\n\nclass HardKuma(HardBinary):\n \n def __init__(self, params: list, support: list):\n super(HardKuma, self).__init__(StretchedVariable(Kuma(params), support))\n\n\nclass KumaSampler(Sampler):\n \"\"\"\n The inference network based on MLP to learn the parameters of a discrete\n Kumaraswamy distribution and predict samples from it given an input.\n \"\"\"\n\n def __init__(self, latent_dim, hidden_size):\n super(KumaSampler, self).__init__(latent_dim, hidden_size)\n\n self.latent_dim = latent_dim\n self.hidden_size = hidden_size\n\n self.na = MLP_SP(self.hidden_size, self.hidden_size//2, self.latent_dim)\n self.nb = MLP_SP(self.hidden_size, self.hidden_size//2, self.latent_dim)\n\n\n def sample(self, a, b, size):\n \"\"\"\n Produce a sample from the Kumaraswamy distribution.\n \"\"\"\n k = HardKuma([a, b], [-0.1, 1.1]) # support of the stretched variable should be just a bit bigger than the base Kumaraswamy\n ksample = k.sample(size=size)\n logpdfloss = torch.log(sum(sum(1 - k.pdf(torch.Tensor([1.]).cuda()) - k.pdf(torch.Tensor([0.]).cuda())))/float(ksample.size(0)*self.latent_dim))\n return ksample, logpdfloss\n\n def run_forward_pass(self, X, batch_size, translate):\n \"\"\"\n Method for passing the input to the inference network\n \"\"\"\n #out_s = []; out_logpdfloss = []\n self.a = self.na(X)\n self.b = self.nb(X)\n s, logpdfloss = self.sample(self.a, self.b, size=1)\n if translate == False:\n return s, logpdfloss\n else:\n return s, logpdfloss\n" ]
[ [ "torch.Size", "torch.nn.Softplus", "torch.Tensor", "torch.zeros_like", "torch.lt", "torch.tensor", "torch.nn.Tanh", "torch.nn.Linear", "torch.pow", "torch.log", "torch.rand", "torch.full_like", "torch.ones_like" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
amperie/user-models
[ "5236c50d0f20a7bac81acc5d1936a3502de2f5f3" ]
[ "task_templates/pipelines/python3_sklearn_multiclass/custom.py" ]
[ "import pickle\nfrom typing import List, Optional, Any\n\nimport numpy as np\nimport pandas as pd\nfrom create_pipeline import make_classifier\nimport logging\n\nlogger = logging.getLogger()\n\n\ndef transform(data: pd.DataFrame, model: Any) -> pd.DataFrame:\n \"\"\"\n Intended to apply transformations to the prediction data before making predictions. This is\n most useful if DRUM supports the model's library, but your model requires additional data\n processing before it can make predictions\n\n Parameters\n ----------\n data : is the dataframe given to DRUM to make predictions on\n model : is the deserialized model loaded by DRUM or by `load_model`, if supplied\n\n Returns\n -------\n Transformed data\n \"\"\"\n if \"class\" in data.columns:\n data.pop(\"class\")\n return data\n\n\ndef fit(\n X: pd.DataFrame,\n y: pd.Series,\n output_dir: str,\n class_order: Optional[List[str]] = None,\n row_weights: Optional[np.ndarray] = None,\n **kwargs,\n):\n \"\"\"\n This hook must be implemented with your fitting code, for running drum in the fit mode.\n\n This hook MUST ALWAYS be implemented for custom tasks.\n For inference models, this hook can stick around unimplemented, and won’t be triggered.\n\n Parameters\n ----------\n X: pd.DataFrame - training data to perform fit on\n y: pd.Series - target data to perform fit on\n output_dir: the path to write output. This is the path provided in '--output' parameter of the\n 'drum fit' command.\n class_order : A two element long list dictating the order of classes which should be used for\n modeling. Class order will always be passed to fit by DataRobot for classification tasks,\n and never otherwise. When models predict, they output a likelihood of one class, with a\n value from 0 to 1. The likelihood of the other class is 1 - this likelihood. Class order\n dictates that the first element in the list will be the 0 class, and the second will be the\n 1 class.\n row_weights: An array of non-negative numeric values which can be used to dictate how important\n a row is. Row weights is only optionally used, and there will be no filtering for which\n custom models support this. There are two situations when values will be passed into\n row_weights, during smart downsampling and when weights are explicitly provided by the user\n kwargs: Added for forwards compatibility\n\n Returns\n -------\n Nothing\n \"\"\"\n logging.info(y.head())\n # Feel free to delete which ever one of these you aren't using\n if class_order is not None:\n if y.dtype == np.dtype(\"bool\"):\n y = y.astype(\"str\")\n estimator = make_classifier(X)\n else:\n raise Exception(\"Running multiclass estimator task: class_order expected to be not None\")\n estimator.fit(X, y)\n\n # You must serialize out your model to the output_dir given, however if you wish to change this\n # code, you will probably have to add a load_model method to read the serialized model back in\n # When prediction is done.\n # Check out this doc for more information on serialization https://github.com/datarobot/custom-\\\n # model-templates/tree/master/custom_model_runner#python\n # NOTE: We currently set a 10GB limit to the size of the serialized model\n with open(\"{}/artifact.pkl\".format(output_dir), \"wb\") as fp:\n pickle.dump(estimator, fp)\n with open(\"{}/class_labels.txt\".format(output_dir), \"wb\") as fp:\n fp.write(\"\\n\".join(str(class_) for class_ in estimator.classes_).encode(\"utf-8\"))\n\n\n\"\"\"\nCustom hooks for prediction\n---------------------------\n\nIf drum's standard assumptions are incorrect for your model, DRUM supports several hooks\nfor custom inference code.\n\"\"\"\n# def init(code_dir : Optional[str], **kwargs) -> None:\n# \"\"\"\n#\n# Parameters\n# ----------\n# code_dir : code folder passed in the `--code_dir` parameter\n# kwargs : future proofing\n# \"\"\"\n\n# def load_model(code_dir: str) -> Any:\n# \"\"\"\n# Can be used to load supported models if your model has multiple artifacts, or for loading\n# models that DRUM does not natively support\n#\n# Parameters\n# ----------\n# code_dir : is the directory where model artifact and additional code are provided, passed in\n#\n# Returns\n# -------\n# If used, this hook must return a non-None value\n# \"\"\"\n\n# def score(data: pd.DataFrame, model: Any, **kwargs: Dict[str, Any]) -> pd.DataFrame:\n# \"\"\"\n# This hook is only needed if you would like to use DRUM with a framework not natively\n# supported by the tool.\n#\n# Parameters\n# ----------\n# data : is the dataframe to make predictions against. If `transform` is supplied,\n# `data` will be the transformed data.\n# model : is the deserialized model loaded by DRUM or by `load_model`, if supplied\n# kwargs : additional keyword arguments to the method\n# In case of classification model class labels will be provided as the following arguments:\n# - `positive_class_label` is the positive class label for a binary classification model\n# - `negative_class_label` is the negative class label for a binary classification model\n#\n# Returns\n# -------\n# This method should return predictions as a dataframe with the following format:\n# Binary Classification: must have columns for each class label with floating- point class\n# probabilities as values. Each row should sum to 1.0\n# Regression: must have a single column called `Predictions` with numerical values\n#\n# \"\"\"\n\n# def post_process(predictions: pd.DataFrame, model: Any) -> pd.DataFrame:\n# \"\"\"\n# This method is only needed if your model's output does not match the above expectations\n#\n# Parameters\n# ----------\n# predictions : is the dataframe of predictions produced by DRUM or by\n# the `score` hook, if supplied\n# model : is the deserialized model loaded by DRUM or by `load_model`, if supplied\n#\n# Returns\n# -------\n# This method should return predictions as a dataframe with the following format:\n# Binary Classification: must have columns for each class label with floating- point class\n# probabilities as values. Each row\n# should sum to 1.0\n# Regression: must have a single column called `Predictions` with numerical values\n#\n# \"\"\"\n" ]
[ [ "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
peper0/mpldock
[ "47bb3f112481e0f55b8c4460bc545e0eeac08461" ]
[ "examples/factory_default.py" ]
[ "import os.path\n\nimport matplotlib\n\nfrom mpldock import persist_layout\n\nmatplotlib.use('module://mpldock.backend')\n\nimport matplotlib.pyplot as plt\n\npersist_layout('1e2682b5-4408-42a6-ae97-3290153294', os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fd_layout.json'))\n\nplt.figure(\"some plot\")\nplt.plot([1, 5, 3])\nplt.figure(\"another plot\")\nplt.plot([5, 0, 1])\n\nplt.show()\n" ]
[ [ "matplotlib.pyplot.plot", "matplotlib.use", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hanshantong/MachineLearningWithPython3
[ "f9054904d5662a4d24d84d5bb7a3e87ed137efd2" ]
[ "plots.py" ]
[ "# -*- coding: utf-8 -*-\n\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\n\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n '''\n plot decision regions\n\n Parameters\n ------------\n X: array-like, shape=(n_samples, 2)\n the train array with a shape (n_samples, 2)\n \n y: 1D-vector with the same length as X\n\n classifier: object\n an estimator for classification\n resolution: scalar\n a step\n\n '''\n\n colors = ['red', 'blue', 'lightgreen', 'gray', 'cyan']\n markers = ['s', 'x', 'o', '^', 'v']\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n\n # plot decision regions\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n\n plt.contourf(xx1, xx2, Z, cmap=cmap, alpha=0.3)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n #plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y==cl, 0], y=X[y==cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)\n" ]
[ [ "numpy.arange", "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Mark-sloan/IKC
[ "f70af607e9434931c22e4971469aaed7683a22a3" ]
[ "codes/train_SFTMD.py" ]
[ "import os\nimport math\nimport argparse\nimport random\nimport logging\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom data.data_sampler import DistIterSampler\n\nimport options.options as option\nfrom utils import util\nfrom data import create_dataloader, create_dataset\nfrom models import create_model\n\n\ndef init_dist(backend='nccl', **kwargs):\n ''' initialization for distributed training'''\n # if mp.get_start_method(allow_none=True) is None:\n if mp.get_start_method(allow_none=True) != 'spawn': #Return the name of start method used for starting processes\n mp.set_start_method('spawn', force=True) ##'spawn' is the default on Windows\n rank = int(os.environ['RANK']) #system env process ranks\n num_gpus = torch.cuda.device_count() #Returns the number of GPUs available\n torch.cuda.set_device(rank % num_gpus)\n dist.init_process_group(backend=backend, **kwargs) #Initializes the default distributed process group\n\n\ndef main():\n ###### SFTMD train ######\n #### setup options\n parser = argparse.ArgumentParser()\n parser.add_argument('-opt_F', type=str, help='Path to option YMAL file of SFTMD_Net.')\n parser.add_argument('--launcher', choices=['none', 'pytorch'], default='none',\n help='job launcher')\n parser.add_argument('--local_rank', type=int, default=0)\n args = parser.parse_args()\n opt_F = option.parse(args.opt_F, is_train=True)\n\n # convert to NoneDict, which returns None for missing keys\n opt_F = option.dict_to_nonedict(opt_F)\n\n #### random seed\n seed = opt_F['train']['manual_seed']\n if seed is None:\n seed = random.randint(1, 10000)\n util.set_random_seed(seed)\n\n # create PCA matrix of enough kernel\n batch_ker = util.random_batch_kernel(batch=30000, l=21, sig_min=0.2, sig_max=4.0, rate_iso=1.0, scaling=3, tensor=False)\n print('batch kernel shape: {}'.format(batch_ker.shape))\n b = np.size(batch_ker, 0)\n batch_ker = batch_ker.reshape((b, -1))\n pca_matrix = util.PCA(batch_ker, k=10).float()\n print('PCA matrix shape: {}'.format(pca_matrix.shape))\n\n #### distributed training settings\n if args.launcher == 'none': # disabled distributed training\n opt_F['dist'] = False\n rank = -1\n print('Disabled distributed training.')\n else:\n opt_F['dist'] = True\n init_dist()\n world_size = torch.distributed.get_world_size()\n rank = torch.distributed.get_rank()\n\n torch.backends.cudnn.benchmark = True\n # torch.backends.cudnn.deterministic = True\n\n #### loading resume state if exists\n if opt_F['path'].get('resume_state', None):\n # distributed resuming: all load into default GPU\n device_id = torch.cuda.current_device()\n resume_state = torch.load(opt_F['path']['resume_state'],\n map_location=lambda storage, loc: storage.cuda(device_id))\n option.check_resume(opt_F, resume_state['iter']) # check resume options\n else:\n resume_state = None\n\n #### mkdir and loggers\n if rank <= 0:\n if resume_state is None:\n util.mkdir_and_rename(\n opt_F['path']['experiments_root']) # rename experiment folder if exists\n util.mkdirs((path for key, path in opt_F['path'].items() if not key == 'experiments_root'\n and 'pretrain_model' not in key and 'resume' not in key))\n\n # config loggers. Before it, the log will not work\n util.setup_logger('base', opt_F['path']['log'], 'train_' + opt_F['name'], level=logging.INFO,\n screen=True, tofile=True)\n util.setup_logger('val', opt_F['path']['log'], 'val_' + opt_F['name'], level=logging.INFO,\n screen=True, tofile=True)\n logger = logging.getLogger('base')\n logger.info(option.dict2str(opt_F))\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n version = float(torch.__version__[0:3])\n if version >= 1.1: # PyTorch 1.1\n from torch.utils.tensorboard import SummaryWriter\n else:\n logger.info(\n 'You are using PyTorch {}. Tensorboard will use [tensorboardX]'.format(version))\n from tensorboardX import SummaryWriter\n tb_logger = SummaryWriter(log_dir='../tb_logger/' + opt_F['name'])\n else:\n util.setup_logger('base', opt_F['path']['log'], 'train', level=logging.INFO, screen=True)\n logger = logging.getLogger('base')\n\n #### create train and val dataloader\n dataset_ratio = 200 # enlarge the size of each epoch\n for phase, dataset_opt in opt_F['datasets'].items():\n if phase == 'train':\n train_set = create_dataset(dataset_opt)\n train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))\n total_iters = int(opt_F['train']['niter'])\n total_epochs = int(math.ceil(total_iters / train_size))\n if opt_F['dist']:\n train_sampler = DistIterSampler(train_set, world_size, rank, dataset_ratio)\n total_epochs = int(math.ceil(total_iters / (train_size * dataset_ratio)))\n else:\n train_sampler = None\n train_loader = create_dataloader(train_set, dataset_opt, opt_F, train_sampler)\n if rank <= 0:\n logger.info('Number of train images: {:,d}, iters: {:,d}'.format(\n len(train_set), train_size))\n logger.info('Total epochs needed: {:d} for iters {:,d}'.format(\n total_epochs, total_iters))\n elif phase == 'val':\n val_set = create_dataset(dataset_opt)\n val_loader = create_dataloader(val_set, dataset_opt, opt_F, None)\n if rank <= 0:\n logger.info('Number of val images in [{:s}]: {:d}'.format(\n dataset_opt['name'], len(val_set)))\n else:\n raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))\n assert train_loader is not None\n assert val_loader is not None\n\n #### create model\n model_F = create_model(opt_F)\n\n #### resume training\n if resume_state:\n logger.info('Resuming training from epoch: {}, iter: {}.'.format(\n resume_state['epoch'], resume_state['iter']))\n\n start_epoch = resume_state['epoch']\n current_step = resume_state['iter']\n model_F.resume_training(resume_state) # handle optimizers and schedulers\n else:\n current_step = 0\n start_epoch = 0\n\n #### training\n logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))\n for epoch in range(start_epoch, total_epochs + 1):\n if opt_F['dist']:\n train_sampler.set_epoch(epoch)\n for _, train_data in enumerate(train_loader):\n current_step += 1\n if current_step > total_iters:\n break\n #### preprocessing for LR_img and kernel map\n prepro = util.SRMDPreprocessing(opt_F['scale'], pca_matrix, para_input=10, kernel=21, noise=False, cuda=True,\n sig_min=0.2, sig_max=4.0, rate_iso=1.0, scaling=3,\n rate_cln=0.2, noise_high=0.0)\n LR_img, ker_map = prepro(train_data['GT'])\n\n #### update learning rate, schedulers\n model_F.update_learning_rate(current_step, warmup_iter=opt_F['train']['warmup_iter'])\n\n #### training\n model_F.feed_data(train_data, LR_img, ker_map)\n model_F.optimize_parameters(current_step)\n\n #### log\n if current_step % opt_F['logger']['print_freq'] == 0:\n logs = model_F.get_current_log()\n message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(\n epoch, current_step, model_F.get_current_learning_rate())\n for k, v in logs.items():\n message += '{:s}: {:.4e} '.format(k, v)\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n if rank <= 0:\n tb_logger.add_scalar(k, v, current_step)\n if rank <= 0:\n logger.info(message)\n\n # validation\n if current_step % opt_F['train']['val_freq'] == 0 and rank <= 0:\n avg_psnr = 0.0\n idx = 0\n for _, val_data in enumerate(val_loader):\n idx += 1\n #### preprocessing for LR_img and kernel map\n prepro = util.SRMDPreprocessing(opt_F['scale'], pca_matrix, para_input=15, noise=False, cuda=True,\n sig_min=0.2, sig_max=4.0, rate_iso=1.0, scaling=3,\n rate_cln=0.2, noise_high=0.0)\n LR_img, ker_map = prepro(val_data['GT'])\n\n model_F.feed_data(val_data, LR_img, ker_map)\n model_F.test()\n\n visuals = model_F.get_current_visuals()\n sr_img = util.tensor2img(visuals['SR']) # uint8\n gt_img = util.tensor2img(visuals['GT']) # uint8\n\n # Save SR images for reference\n img_name = os.path.splitext(os.path.basename(val_data['LQ_path'][0]))[0]\n #img_dir = os.path.join(opt_F['path']['val_images'], img_name)\n img_dir = os.path.join(opt_F['path']['val_images'], str(current_step))\n util.mkdir(img_dir)\n\n save_img_path = os.path.join(img_dir,'{:s}_{:d}.png'.format(img_name, current_step))\n util.save_img(sr_img, save_img_path)\n\n # calculate PSNR\n crop_size = opt_F['scale']\n gt_img = gt_img / 255.\n sr_img = sr_img / 255.\n cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]\n cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]\n avg_psnr += util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)\n\n avg_psnr = avg_psnr / idx\n\n # log\n logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))\n logger_val = logging.getLogger('val') # validation logger\n logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.6f}'.format(epoch, current_step, avg_psnr))\n # tensorboard logger\n if opt_F['use_tb_logger'] and 'debug' not in opt_F['name']:\n tb_logger.add_scalar('psnr', avg_psnr, current_step)\n\n\n #### save models and training states\n if current_step % opt_F['logger']['save_checkpoint_freq'] == 0:\n if rank <= 0:\n logger.info('Saving models and training states.')\n model_F.save(current_step)\n model_F.save_training_state(epoch, current_step)\n\n if rank <= 0:\n logger.info('Saving the final model.')\n model_F.save('latest')\n logger.info('End of SFTMD training.')\n" ]
[ [ "torch.multiprocessing.set_start_method", "torch.distributed.init_process_group", "torch.cuda.set_device", "torch.cuda.current_device", "numpy.size", "torch.multiprocessing.get_start_method", "torch.distributed.get_rank", "torch.cuda.device_count", "torch.distributed.get_world_size" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ghokun-thesis/domain-networks
[ "8f64182a5ef404a0e41eb023812de5efefe4233e" ]
[ "models/corrnet.py" ]
[ "\"\"\"\nproposed model only with the correlation branch.\n\nauthor: David-Alexandre Beaupre\ndate: 2020-04-27\n\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom models.features import Features\nfrom models.classifier import Classifier\n\n\nclass CorrNet(nn.Module):\n def __init__(self, num_channels: int):\n \"\"\"\n represents the architecture of the proposed model with only the correlation branch.\n :param num_channels: number of channels of the input image.\n \"\"\"\n super(CorrNet, self).__init__()\n self.rgb_features = Features(num_channels=num_channels)\n self.lwir_features = Features(num_channels=num_channels)\n self.correlation_cls = Classifier(num_channels=256)\n\n def forward(self, rgb: torch.Tensor, lwir: torch.Tensor) -> torch.Tensor:\n \"\"\"\n forward pass implementation of the correlation branch.\n :param rgb: rgb patch tensor.\n :param lwir: lwir patch tensor.\n :return: 2 elements probability tensor (rgb and lwir being the same or not).\n \"\"\"\n rgb = self.rgb_features(rgb)\n lwir = self.lwir_features(lwir)\n\n correlation = torch.matmul(rgb, lwir)\n correlation = correlation.view(correlation.size(0), -1)\n correlation = self.correlation_cls(correlation)\n\n return correlation\n" ]
[ [ "torch.matmul" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MASILab/AID
[ "1525e2e0273b5c1c87934c6e2cddcdf7f977f7e7" ]
[ "torchsrc/models/layers/grid_attention_layer.py" ]
[ "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom ..networks_other import init_weights\n\n\nclass _GridAttentionBlockND(nn.Module):\n def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',\n sub_sample_factor=(2,2,2)):\n super(_GridAttentionBlockND, self).__init__()\n\n assert dimension in [2, 3]\n assert mode in ['concatenation', 'concatenation_debug', 'concatenation_residual']\n\n # Downsampling rate for the input featuremap\n if isinstance(sub_sample_factor, tuple): self.sub_sample_factor = sub_sample_factor\n elif isinstance(sub_sample_factor, list): self.sub_sample_factor = tuple(sub_sample_factor)\n else: self.sub_sample_factor = tuple([sub_sample_factor]) * dimension\n\n # Default parameter set\n self.mode = mode\n self.dimension = dimension\n self.sub_sample_kernel_size = self.sub_sample_factor\n\n # Number of channels (pixel dimensions)\n self.in_channels = in_channels\n self.gating_channels = gating_channels\n self.inter_channels = inter_channels\n\n if self.inter_channels is None:\n self.inter_channels = in_channels // 2\n if self.inter_channels == 0:\n self.inter_channels = 1\n\n if dimension == 3:\n conv_nd = nn.Conv3d\n bn = nn.BatchNorm3d\n self.upsample_mode = 'trilinear'\n elif dimension == 2:\n conv_nd = nn.Conv2d\n bn = nn.BatchNorm2d\n self.upsample_mode = 'bilinear'\n else:\n raise NotImplemented\n\n # Output transform\n self.W = nn.Sequential(\n conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),\n bn(self.in_channels),\n )\n\n # Theta^T * x_ij + Phi^T * gating_signal + bias\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)\n self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,\n kernel_size=1, stride=1, padding=0, bias=True)\n self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)\n\n # Initialise weights\n for m in self.children():\n init_weights(m, init_type='kaiming')\n\n # Define the operation\n if mode == 'concatenation':\n self.operation_function = self._concatenation\n elif mode == 'concatenation_debug':\n self.operation_function = self._concatenation_debug\n elif mode == 'concatenation_residual':\n self.operation_function = self._concatenation_residual\n else:\n raise NotImplementedError('Unknown operation function.')\n\n\n def forward(self, x, g):\n '''\n :param x: (b, c, t, h, w)\n :param g: (b, g_d)\n :return:\n '''\n\n output = self.operation_function(x, g)\n return output\n\n def _concatenation(self, x, g):\n input_size = x.size()\n batch_size = input_size[0]\n assert batch_size == g.size(0)\n\n # theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)\n # phi => (b, g_d) -> (b, i_c)\n theta_x = self.theta(x)\n theta_x_size = theta_x.size()\n\n # g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')\n # Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)\n phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)\n f = F.relu(theta_x + phi_g, inplace=True)\n\n # psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)\n sigm_psi_f = F.sigmoid(self.psi(f))\n\n # upsample the attentions and multiply\n sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)\n y = sigm_psi_f.expand_as(x) * x\n W_y = self.W(y)\n\n return W_y, sigm_psi_f\n\n def _concatenation_debug(self, x, g):\n input_size = x.size()\n batch_size = input_size[0]\n assert batch_size == g.size(0)\n\n # theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)\n # phi => (b, g_d) -> (b, i_c)\n theta_x = self.theta(x)\n theta_x_size = theta_x.size()\n\n # g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')\n # Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)\n phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)\n f = F.softplus(theta_x + phi_g)\n\n # psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)\n sigm_psi_f = F.sigmoid(self.psi(f))\n\n # upsample the attentions and multiply\n sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)\n y = sigm_psi_f.expand_as(x) * x\n W_y = self.W(y)\n\n return W_y, sigm_psi_f\n\n\n def _concatenation_residual(self, x, g):\n input_size = x.size()\n batch_size = input_size[0]\n assert batch_size == g.size(0)\n\n # theta => (b, c, t, h, w) -> (b, i_c, t, h, w) -> (b, i_c, thw)\n # phi => (b, g_d) -> (b, i_c)\n theta_x = self.theta(x)\n theta_x_size = theta_x.size()\n\n # g (b, c, t', h', w') -> phi_g (b, i_c, t', h', w')\n # Relu(theta_x + phi_g + bias) -> f = (b, i_c, thw) -> (b, i_c, t/s1, h/s2, w/s3)\n phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)\n f = F.relu(theta_x + phi_g, inplace=True)\n\n # psi^T * f -> (b, psi_i_c, t/s1, h/s2, w/s3)\n f = self.psi(f).view(batch_size, 1, -1)\n sigm_psi_f = F.softmax(f, dim=2).view(batch_size, 1, *theta_x.size()[2:])\n\n # upsample the attentions and multiply\n sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)\n y = sigm_psi_f.expand_as(x) * x\n W_y = self.W(y)\n\n return W_y, sigm_psi_f\n\n\nclass GridAttentionBlock2D(_GridAttentionBlockND):\n def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',\n sub_sample_factor=(2,2,2)):\n super(GridAttentionBlock2D, self).__init__(in_channels,\n inter_channels=inter_channels,\n gating_channels=gating_channels,\n dimension=2, mode=mode,\n sub_sample_factor=sub_sample_factor,\n )\n\n\nclass GridAttentionBlock3D(_GridAttentionBlockND):\n def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',\n sub_sample_factor=(2,2,2)):\n super(GridAttentionBlock3D, self).__init__(in_channels,\n inter_channels=inter_channels,\n gating_channels=gating_channels,\n dimension=3, mode=mode,\n sub_sample_factor=sub_sample_factor,\n )\n\nclass _GridAttentionBlockND_TORR(nn.Module):\n def __init__(self, in_channels, gating_channels, inter_channels=None, dimension=3, mode='concatenation',\n sub_sample_factor=(1,1,1), bn_layer=True, use_W=True, use_phi=True, use_theta=True, use_psi=True, nonlinearity1='relu'):\n super(_GridAttentionBlockND_TORR, self).__init__()\n\n assert dimension in [2, 3]\n assert mode in ['concatenation', 'concatenation_softmax',\n 'concatenation_sigmoid', 'concatenation_mean',\n 'concatenation_range_normalise', 'concatenation_mean_flow']\n\n # Default parameter set\n self.mode = mode\n self.dimension = dimension\n self.sub_sample_factor = sub_sample_factor if isinstance(sub_sample_factor, tuple) else tuple([sub_sample_factor])*dimension\n self.sub_sample_kernel_size = self.sub_sample_factor\n\n # Number of channels (pixel dimensions)\n self.in_channels = in_channels\n self.gating_channels = gating_channels\n self.inter_channels = inter_channels\n\n if self.inter_channels is None:\n self.inter_channels = in_channels // 2\n if self.inter_channels == 0:\n self.inter_channels = 1\n\n if dimension == 3:\n conv_nd = nn.Conv3d\n bn = nn.BatchNorm3d\n self.upsample_mode = 'trilinear'\n elif dimension == 2:\n conv_nd = nn.Conv2d\n bn = nn.BatchNorm2d\n self.upsample_mode = 'bilinear'\n else:\n raise NotImplemented\n\n # initialise id functions\n # Theta^T * x_ij + Phi^T * gating_signal + bias\n self.W = lambda x: x\n self.theta = lambda x: x\n self.psi = lambda x: x\n self.phi = lambda x: x\n self.nl1 = lambda x: x\n\n if use_W:\n if bn_layer:\n self.W = nn.Sequential(\n conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0),\n bn(self.in_channels),\n )\n else:\n self.W = conv_nd(in_channels=self.in_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)\n\n if use_theta:\n self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)\n\n\n if use_phi:\n self.phi = conv_nd(in_channels=self.gating_channels, out_channels=self.inter_channels,\n kernel_size=self.sub_sample_kernel_size, stride=self.sub_sample_factor, padding=0, bias=False)\n\n\n if use_psi:\n self.psi = conv_nd(in_channels=self.inter_channels, out_channels=1, kernel_size=1, stride=1, padding=0, bias=True)\n\n\n if nonlinearity1:\n if nonlinearity1 == 'relu':\n self.nl1 = lambda x: F.relu(x, inplace=True)\n\n if 'concatenation' in mode:\n self.operation_function = self._concatenation\n else:\n raise NotImplementedError('Unknown operation function.')\n\n # Initialise weights\n for m in self.children():\n init_weights(m, init_type='kaiming')\n\n\n if use_psi and self.mode == 'concatenation_sigmoid':\n nn.init.constant(self.psi.bias.data, 3.0)\n\n if use_psi and self.mode == 'concatenation_softmax':\n nn.init.constant(self.psi.bias.data, 10.0)\n\n # if use_psi and self.mode == 'concatenation_mean':\n # nn.init.constant(self.psi.bias.data, 3.0)\n\n # if use_psi and self.mode == 'concatenation_range_normalise':\n # nn.init.constant(self.psi.bias.data, 3.0)\n\n parallel = False\n if parallel:\n if use_W: self.W = nn.DataParallel(self.W)\n if use_phi: self.phi = nn.DataParallel(self.phi)\n if use_psi: self.psi = nn.DataParallel(self.psi)\n if use_theta: self.theta = nn.DataParallel(self.theta)\n\n def forward(self, x, g):\n '''\n :param x: (b, c, t, h, w)\n :param g: (b, g_d)\n :return:\n '''\n\n output = self.operation_function(x, g)\n return output\n\n def _concatenation(self, x, g):\n input_size = x.size()\n batch_size = input_size[0]\n assert batch_size == g.size(0)\n\n #############################\n # compute compatibility score\n\n # theta => (b, c, t, h, w) -> (b, i_c, t, h, w)\n # phi => (b, c, t, h, w) -> (b, i_c, t, h, w)\n theta_x = self.theta(x)\n theta_x_size = theta_x.size()\n\n # nl(theta.x + phi.g + bias) -> f = (b, i_c, t/s1, h/s2, w/s3)\n phi_g = F.upsample(self.phi(g), size=theta_x_size[2:], mode=self.upsample_mode)\n\n f = theta_x + phi_g\n f = self.nl1(f)\n\n psi_f = self.psi(f)\n\n ############################################\n # normalisation -- scale compatibility score\n # psi^T . f -> (b, 1, t/s1, h/s2, w/s3)\n if self.mode == 'concatenation_softmax':\n sigm_psi_f = F.softmax(psi_f.view(batch_size, 1, -1), dim=2)\n sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])\n elif self.mode == 'concatenation_mean':\n psi_f_flat = psi_f.view(batch_size, 1, -1)\n psi_f_sum = torch.sum(psi_f_flat, dim=2)#clamp(1e-6)\n psi_f_sum = psi_f_sum[:,:,None].expand_as(psi_f_flat)\n\n sigm_psi_f = psi_f_flat / psi_f_sum\n sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])\n elif self.mode == 'concatenation_mean_flow':\n psi_f_flat = psi_f.view(batch_size, 1, -1)\n ss = psi_f_flat.size()\n psi_f_min = psi_f_flat.min(dim=2)[0].view(ss[0],ss[1],1)\n psi_f_flat = psi_f_flat - psi_f_min\n psi_f_sum = torch.sum(psi_f_flat, dim=2).view(ss[0],ss[1],1).expand_as(psi_f_flat)\n\n sigm_psi_f = psi_f_flat / psi_f_sum\n sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])\n elif self.mode == 'concatenation_range_normalise':\n psi_f_flat = psi_f.view(batch_size, 1, -1)\n ss = psi_f_flat.size()\n psi_f_max = torch.max(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)\n psi_f_min = torch.min(psi_f_flat, dim=2)[0].view(ss[0], ss[1], 1)\n\n sigm_psi_f = (psi_f_flat - psi_f_min) / (psi_f_max - psi_f_min).expand_as(psi_f_flat)\n sigm_psi_f = sigm_psi_f.view(batch_size, 1, *theta_x_size[2:])\n\n elif self.mode == 'concatenation_sigmoid':\n sigm_psi_f = F.sigmoid(psi_f)\n else:\n raise NotImplementedError\n\n # sigm_psi_f is attention map! upsample the attentions and multiply\n sigm_psi_f = F.upsample(sigm_psi_f, size=input_size[2:], mode=self.upsample_mode)\n y = sigm_psi_f.expand_as(x) * x\n W_y = self.W(y)\n\n return W_y, sigm_psi_f\n\n\nclass GridAttentionBlock2D_TORR(_GridAttentionBlockND_TORR):\n def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',\n sub_sample_factor=(1,1), bn_layer=True,\n use_W=True, use_phi=True, use_theta=True, use_psi=True,\n nonlinearity1='relu'):\n super(GridAttentionBlock2D_TORR, self).__init__(in_channels,\n inter_channels=inter_channels,\n gating_channels=gating_channels,\n dimension=2, mode=mode,\n sub_sample_factor=sub_sample_factor,\n bn_layer=bn_layer,\n use_W=use_W,\n use_phi=use_phi,\n use_theta=use_theta,\n use_psi=use_psi,\n nonlinearity1=nonlinearity1)\n\n\nclass GridAttentionBlock3D_TORR(_GridAttentionBlockND_TORR):\n def __init__(self, in_channels, gating_channels, inter_channels=None, mode='concatenation',\n sub_sample_factor=(1,1,1), bn_layer=True):\n super(GridAttentionBlock3D_TORR, self).__init__(in_channels,\n inter_channels=inter_channels,\n gating_channels=gating_channels,\n dimension=3, mode=mode,\n sub_sample_factor=sub_sample_factor,\n bn_layer=bn_layer)\n\n\n\nif __name__ == '__main__':\n from torch.autograd import Variable\n\n mode_list = ['concatenation']\n\n for mode in mode_list:\n\n img = Variable(torch.rand(2, 16, 10, 10, 10))\n gat = Variable(torch.rand(2, 64, 4, 4, 4))\n net = GridAttentionBlock3D(in_channels=16, inter_channels=16, gating_channels=64, mode=mode, sub_sample_factor=(2,2,2))\n out, sigma = net(img, gat)\n print(out.size())" ]
[ [ "torch.nn.functional.upsample", "torch.nn.functional.softmax", "torch.max", "torch.min", "torch.sum", "torch.nn.functional.sigmoid", "torch.nn.DataParallel", "torch.nn.functional.relu", "torch.rand", "torch.nn.init.constant", "torch.nn.functional.softplus" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rogerbao/pytorch-deeplab-xception
[ "43f8b71295712a5d1e474af37c379d05d8e67cb0" ]
[ "modeling/decoder.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\n\nfrom torchsummary import summary\n\nclass Decoder(nn.Module):\n def __init__(self, num_classes, backbone, BatchNorm):\n super(Decoder, self).__init__()\n if backbone == 'resnet' or backbone == 'drn':\n low_level_inplanes = 256\n elif backbone == 'xception':\n low_level_inplanes = 128\n elif backbone == 'mobilenet':\n low_level_inplanes = 24\n else:\n raise NotImplementedError\n\n self.conv1 = nn.Conv2d(low_level_inplanes, 48, 1, bias=False)\n self.bn1 = BatchNorm(48)\n self.relu = nn.ReLU()\n self.last_conv = nn.Sequential(nn.Conv2d(304, 256, kernel_size=3, stride=1, padding=1, bias=False),\n BatchNorm(256),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),\n BatchNorm(256),\n nn.ReLU(),\n nn.Dropout(0.1),\n nn.Conv2d(256, num_classes, kernel_size=1, stride=1))\n self._init_weight()\n\n\n def forward(self, x, low_level_feat):\n low_level_feat = self.conv1(low_level_feat)\n low_level_feat = self.bn1(low_level_feat)\n low_level_feat = self.relu(low_level_feat)\n\n x = F.interpolate(x, size=low_level_feat.size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat((x, low_level_feat), dim=1)\n x = self.last_conv(x)\n\n return x\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\ndef build_decoder(num_classes, backbone, BatchNorm):\n return Decoder(num_classes, backbone, BatchNorm)\n\n\nif __name__ == \"__main__\":\n input = torch.rand(16, 256, 32, 32)\n low_level_feature = torch.rand(16, 24, 128, 128)\n model = Decoder(num_classes=3, backbone='mobilenet', BatchNorm=nn.BatchNorm2d)\n model.cuda()\n summary(model, [(256, 8, 8), (24, 32, 32)])\n # output = model(input, low_level_feature)\n # print(output.size())\n" ]
[ [ "torch.nn.Dropout", "torch.cat", "torch.nn.Conv2d", "torch.rand", "torch.nn.ReLU", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
QTIM-Lab/AdrenalMGB-Version-1
[ "973cf2df352f21f697370b1089036a4209c96ba3" ]
[ "train_cluster.py" ]
[ "#TODO: If re-starting training for a certain saved model, evaluate validation set first to get accurate monitor value\r\n#TODO: If re-starting training, ensure original model isn't overwritten\r\n#TODO: Add ability to choose model to use for training/inference from snapshots\r\n#TODO: Fix random seed issue to make training fully deterministic\r\n#TODO: Allow spreadsheet of train/val cases for easy cross-validation without needing to split cases?\r\n\r\nimport os\r\nimport random\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom model_callbacks import *\r\nfrom sorcery import unpack_keys\r\nfrom unet import create_model, load_my_model\r\nfrom load_data import DataGenerator, nested_folder_filepaths\r\n\r\ndef train_model(params_dict):\r\n #unpack relevant dictionary elements\r\n random_seed, reload_model, output_file, dropblock_scheduler, tensorboard_dir, lr_schedule_type, loss, joint_loss_function_params, data_dir_train, data_dir_val, num_patches_per_patient, adaptive_full_image_patching, input_image_names, ground_truth_label_names, num_epochs, verbose, workers, max_queue_size = unpack_keys(params_dict)\r\n #set random seed if desired\r\n if random_seed[0] == True:\r\n seed_value = random_seed[1]\r\n os.environ['PYTHONHASHSEED']=str(seed_value)\r\n random.seed(seed_value)\r\n np.random.seed(seed_value)\r\n tf.random.set_seed(seed_value)\r\n else:\r\n np.random.seed()\r\n #enable multi-gpu training set-up if more than one GPU is visible\r\n strategy = tf.distribute.MirroredStrategy()\r\n numGPUs_available = strategy.num_replicas_in_sync\r\n with open(output_file, 'a') as f:\r\n f.write('Training on ' + str(numGPUs_available) + ' GPUs simultaneously \\n')\r\n params_dict['batch_size'][0] = params_dict['batch_size'][0] * numGPUs_available\r\n params_dict['batch_size'][1] = params_dict['batch_size'][1] * numGPUs_available\r\n #get patients in train and val sets\r\n train_patients = nested_folder_filepaths(data_dir_train, [input_image_names, ground_truth_label_names])\r\n val_patients = nested_folder_filepaths(data_dir_val, [input_image_names, ground_truth_label_names])\r\n iterations_per_epoch = np.ceil((len(train_patients) * num_patches_per_patient[0]) / params_dict['batch_size'][0])\r\n #instantiate callbacks list\r\n callbacks = []\r\n #if using dropblock with scheduling, convert desired number of epochs to iterations\r\n if dropblock_scheduler[0] == True:\r\n params_dict['dropblock_scheduler'][1] = dropblock_scheduler[1] * iterations_per_epoch\r\n #also add scheduling callback\r\n DropblockScheduleCallback = DecayDropblockProbability()\r\n callbacks.append(DropblockScheduleCallback)\r\n #load/compile model\r\n if numGPUs_available > 1:\r\n with strategy.scope():\r\n if reload_model[0] == False:\r\n model, manager = create_model(params_dict)\r\n else:\r\n model, manager, _ = load_my_model(params_dict, train_mode=True)\r\n else:\r\n if reload_model[0] == False:\r\n model, manager = create_model(params_dict)\r\n else:\r\n model, manager, _ = load_my_model(params_dict, train_mode=True)\r\n #print model summary to output file\r\n with open(output_file, 'a') as f:\r\n model.summary(line_length=150, print_fn=lambda x: f.write(x + '\\n'))\r\n #load tensorboard callback and specific learning rate logger\r\n tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir, write_graph=True, histogram_freq=1, profile_batch=0)\r\n file_writer = tf.summary.create_file_writer(tensorboard_dir + '/learning_rate')\r\n file_writer.set_as_default()\r\n #load callback to print and output information to text file\r\n logging_callback = PrintLossAndMetric(params_dict)\r\n #load early stopping / learning rate reduction on plateau callback\r\n early_stopper_callback = EarlyStoppingAndReduceOnPlateau(manager, params_dict)\r\n callbacks.extend([logging_callback, early_stopper_callback, tensorboard_callback])\r\n #load in learning rate schedule if specified\r\n if lr_schedule_type != 'None':\r\n learning_rate_schedule_callback = CustomLearningRateSchedules(params_dict, iterations_per_epoch)\r\n callbacks.append(learning_rate_schedule_callback)\r\n #if chosen loss function is a joint loss, initialize alpha parameter callback\r\n if 'joint' in loss:\r\n AlphaCallback = DecayAlphaParameter(*joint_loss_function_params)\r\n callbacks.append(AlphaCallback)\r\n #load data generators (shuffle training data; no need to shuffle validation data)\r\n train_generator = DataGenerator(data_dir_train, train_patients, params_dict['batch_size'][0], num_patches_per_patient[0], adaptive_full_image_patching[0], 'train', params_dict)\r\n val_generator = DataGenerator(data_dir_val, val_patients, params_dict['batch_size'][1], num_patches_per_patient[1], adaptive_full_image_patching[1], 'val', params_dict)\r\n #train model for desired number of epochs or until early stopping criteria met (shuffling turned off since our data loader shuffles every epoch)\r\n history = model.fit(x=train_generator, epochs=num_epochs, validation_data=val_generator, callbacks=callbacks, workers=workers, max_queue_size=max_queue_size, verbose=verbose, shuffle=False, use_multiprocessing=False)" ]
[ [ "numpy.random.seed", "tensorflow.distribute.MirroredStrategy", "tensorflow.keras.callbacks.TensorBoard", "tensorflow.random.set_seed", "tensorflow.summary.create_file_writer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dg1223/ML-pipeline
[ "b421fd8dddb695689ffe6dcf58c7640625066074" ]
[ "tests/unit/inputdata_test.py" ]
[ "# This script checks the input dataset using Pytest for the machine learning experiment.\n# Sample unit testing for the workshop.\n\n# Soure code reference: Microsoft Azure Machine Learning\n\nimport os\nimport numpy as np\nimport pandas as pd\n\n\n# Get absolute path of csv files from data folder in your git repository.\ndef get_absPath(filename):\n \"\"\"Returns the path of the notebooks folder\"\"\"\n path = os.path.abspath(\n os.path.join(\n os.path.dirname(__file__), os.path.pardir, os.path.pardir, \"data\", filename\n )\n )\n return path\n\n\n# Expected Number of features in the input data\nexpected_columns = 11\n\n# Check the Input data exists \ndef test_check_schema():\n datafile = get_absPath(\"creditcardcsvpresent.csv\")\n # check that file exists\n assert os.path.exists(datafile)\n dataset = pd.read_csv(datafile)\n header = dataset[dataset.columns[:-1]]\n actual_columns = header.shape[1]\n # check header has expected number of columns\n assert actual_columns == expected_columns\n\n# Check the missing values in the input data\ndef test_check_missing_values():\n datafile = get_absPath(\"creditcardcsvpresent.csv\")\n # check that file exists\n assert os.path.exists(datafile)\n dataset = pd.read_csv(datafile)\n missing_value = dataset.isnull().sum().max()\n assert missing_value > 0" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hadaev8/waveglow
[ "5109a336ef1fdfd3d40dc72b20eebbc4a8d1da43" ]
[ "stft.py" ]
[ "\"\"\"\nBSD 3-Clause License\n\nCopyright (c) 2017, Prem Seetharaman\nAll rights reserved.\n\n* Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice, this\n list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n contributors may be used to endorse or promote products derived from this\n software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\nANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport torch\nfrom torch import nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom scipy.signal import get_window\nimport librosa.util as librosa_util\nfrom librosa.util import pad_center, tiny\nfrom librosa import stft, istft\nfrom librosa.filters import mel as librosa_mel_fn\n\n\ndef window_sumsquare(window, n_frames, hop_length=200, win_length=800,\n n_fft=800, dtype=np.float32, norm=None):\n \"\"\"\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n \"\"\"\n if win_length is None:\n win_length = n_fft\n\n n = n_fft + hop_length * (n_frames - 1)\n x = np.zeros(n, dtype=dtype)\n\n # Compute the squared window at the desired length\n win_sq = get_window(window, win_length, fftbins=True)\n win_sq = librosa_util.normalize(win_sq, norm=norm)**2\n win_sq = librosa_util.pad_center(win_sq, n_fft)\n\n # Fill the envelope\n for i in range(n_frames):\n sample = i * hop_length\n x[sample:min(n, sample + n_fft)\n ] += win_sq[:max(0, min(n_fft, n - sample))]\n return x\n\n\ndef griffin_lim(magnitudes, stft_fn, n_iters=30):\n \"\"\"\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n \"\"\"\n\n angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))\n angles = angles.astype(np.float32)\n angles = torch.from_numpy(angles)\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n\n for i in range(n_iters):\n _, angles = stft_fn.transform(signal)\n signal = stft_fn.inverse(magnitudes, angles).squeeze(1)\n return signal\n\n\ndef dynamic_range_compression(x, C=1, clip_val=1e-5):\n \"\"\"\n PARAMS\n ------\n C: compression factor\n \"\"\"\n return torch.log(torch.clamp(x, min=clip_val) * C)\n\n\ndef dynamic_range_decompression(x, C=1):\n \"\"\"\n PARAMS\n ------\n C: compression factor used to compress\n \"\"\"\n return torch.exp(x) / C\n\n\nclass TacotronSTFT(nn.Module):\n def __init__(self, filter_length=1024, hop_length=256, win_length=1024,\n n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,\n mel_fmax=8000.0):\n super(TacotronSTFT, self).__init__()\n self.n_mel_channels = n_mel_channels\n self.sampling_rate = sampling_rate\n self.stft_fn = STFT(filter_length, hop_length, win_length)\n mel_basis = librosa_mel_fn(\n sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)\n mel_basis = torch.from_numpy(mel_basis).float()\n self.register_buffer('mel_basis', mel_basis)\n\n def spectral_normalize(self, magnitudes):\n output = dynamic_range_compression(magnitudes)\n return output\n\n def spectral_de_normalize(self, magnitudes):\n output = dynamic_range_decompression(magnitudes)\n return output\n\n def mel_spectrogram(self, y):\n \"\"\"Computes mel-spectrograms from a batch of waves\n PARAMS\n ------\n y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]\n\n RETURNS\n -------\n mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)\n \"\"\"\n assert(torch.min(y.data) >= -1)\n assert(torch.max(y.data) <= 1)\n\n magnitudes, phases = self.stft_fn.transform(y)\n magnitudes = magnitudes.data\n mel_output = torch.matmul(self.mel_basis, magnitudes)\n mel_output = self.spectral_normalize(mel_output)\n return mel_output\n\n\nclass STFT(torch.nn.Module):\n \"\"\"adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft\"\"\"\n\n def __init__(self, filter_length=800, hop_length=200, win_length=800,\n window='hann'):\n super(STFT, self).__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.window = window\n self.forward_transform = None\n scale = self.filter_length / self.hop_length\n fourier_basis = np.fft.fft(np.eye(self.filter_length))\n\n cutoff = int((self.filter_length / 2 + 1))\n fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),\n np.imag(fourier_basis[:cutoff, :])])\n\n forward_basis = torch.FloatTensor(fourier_basis[:, None, :])\n inverse_basis = torch.FloatTensor(\n np.linalg.pinv(scale * fourier_basis).T[:, None, :])\n\n if window is not None:\n assert(filter_length >= win_length)\n # get window and zero center pad it to filter_length\n fft_window = get_window(window, win_length, fftbins=True)\n fft_window = pad_center(fft_window, filter_length)\n fft_window = torch.from_numpy(fft_window).float()\n\n # window the bases\n forward_basis *= fft_window\n inverse_basis *= fft_window\n\n self.register_buffer('forward_basis', forward_basis.float())\n self.register_buffer('inverse_basis', inverse_basis.float())\n\n def transform(self, input_data):\n num_batches = input_data.size(0)\n num_samples = input_data.size(1)\n\n self.num_samples = num_samples\n\n if input_data.device.type == \"cuda\":\n # similar to librosa, reflect-pad the input\n input_data = input_data.view(num_batches, 1, num_samples)\n input_data = F.pad(\n input_data.unsqueeze(1),\n (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),\n mode='reflect')\n input_data = input_data.squeeze(1)\n\n forward_transform = F.conv1d(\n input_data,\n self.forward_basis,\n stride=self.hop_length,\n padding=0)\n\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n else:\n x = input_data.detach().numpy()\n real_part = []\n imag_part = []\n for y in x:\n y_ = stft(y, self.filter_length, self.hop_length,\n self.win_length, self.window)\n real_part.append(y_.real[None, :, :])\n imag_part.append(y_.imag[None, :, :])\n real_part = np.concatenate(real_part, 0)\n imag_part = np.concatenate(imag_part, 0)\n\n real_part = torch.from_numpy(real_part).to(input_data.dtype)\n imag_part = torch.from_numpy(imag_part).to(input_data.dtype)\n\n magnitude = torch.sqrt(real_part**2 + imag_part**2)\n phase = torch.atan2(imag_part.data, real_part.data)\n\n return magnitude, phase\n\n def inverse(self, magnitude, phase):\n recombine_magnitude_phase = torch.cat(\n [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1)\n\n if magnitude.device.type == \"cuda\":\n inverse_transform = F.conv_transpose1d(\n recombine_magnitude_phase,\n self.inverse_basis,\n stride=self.hop_length,\n padding=0)\n\n if self.window is not None:\n window_sum = window_sumsquare(\n self.window, magnitude.size(-1), hop_length=self.hop_length,\n win_length=self.win_length, n_fft=self.filter_length,\n dtype=np.float32)\n # remove modulation effects\n approx_nonzero_indices = torch.from_numpy(\n np.where(window_sum > tiny(window_sum))[0])\n window_sum = torch.from_numpy(\n window_sum).to(inverse_transform.device)\n inverse_transform[:, :,\n approx_nonzero_indices] /= window_sum[approx_nonzero_indices]\n\n # scale by hop ratio\n inverse_transform *= float(self.filter_length) / \\\n self.hop_length\n\n inverse_transform = inverse_transform[:, :, int(\n self.filter_length / 2):]\n inverse_transform = inverse_transform[:,\n :, :-int(self.filter_length / 2):]\n inverse_transform = inverse_transform.squeeze(1)\n else:\n x_org = recombine_magnitude_phase.detach().numpy()\n n_b, n_f, n_t = x_org.shape\n x = np.empty([n_b, n_f // 2, n_t], dtype=np.complex64)\n x.real = x_org[:, :n_f // 2]\n x.imag = x_org[:, n_f // 2:]\n inverse_transform = []\n for y in x:\n y_ = istft(y, self.hop_length, self.win_length, self.window)\n inverse_transform.append(y_[None, :])\n inverse_transform = np.concatenate(inverse_transform, 0)\n inverse_transform = torch.from_numpy(\n inverse_transform).to(recombine_magnitude_phase.dtype)\n\n return inverse_transform\n\n def forward(self, input_data):\n self.magnitude, self.phase = self.transform(input_data)\n reconstruction = self.inverse(self.magnitude, self.phase)\n return reconstruction\n" ]
[ [ "numpy.imag", "torch.max", "scipy.signal.get_window", "torch.sin", "numpy.concatenate", "torch.FloatTensor", "torch.sqrt", "numpy.eye", "torch.from_numpy", "torch.nn.functional.conv_transpose1d", "numpy.real", "numpy.zeros", "torch.cos", "torch.min", "torch.nn.functional.conv1d", "torch.exp", "torch.atan2", "torch.matmul", "numpy.linalg.pinv", "torch.clamp", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
witwolf/agents
[ "e084e5184757dd84374e9b67176b8623d4b18a0f" ]
[ "tf_agents/networks/expand_dims_layer.py" ]
[ "# coding=utf-8\n# Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Keras layer performing the equivalent of tf.expand_dims.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\nclass ExpandDims(tf.keras.layers.Layer):\n \"\"\"Expands dims along a particular axis.\n\n Arguments:\n axis: Axis to expand. A new dim is added before this axis.\n May be a negative value. Must not be a tensor.\n\n Input shape:\n `(batch_size,) + shape`\n\n Output shape:\n `(batch_size,) + shape + [1]`, if `axis == -1`.\n\n `(batch_size,) + shape[:axis + 1] + [1] + shape[axis + 1:]`,\n if `axis < -1`.\n\n `(batch_size,) + shape[:axis] + [1] + shape[axis:]`, if `axis >= 0`.\n \"\"\"\n\n def __init__(self, axis, **kwargs):\n super(ExpandDims, self).__init__(**kwargs)\n self.axis = axis\n\n def compute_output_shape(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n if input_shape.rank is None:\n return input_shape\n input_shape = input_shape.as_list()\n if self.axis == -1:\n output_shape = input_shape + [1]\n elif self.axis < 0:\n output_shape = (\n input_shape[:self.axis + 1] + [1] + input_shape[self.axis + 1:])\n else:\n output_shape = input_shape[:self.axis] + [1] + input_shape[self.axis:]\n return tf.TensorShape(output_shape)\n\n def call(self, inputs):\n if self.axis < 0:\n # Negative axis, so expand starting from the right\n return tf.expand_dims(inputs, self.axis)\n else:\n # Perform the expansion from the left, but skip the batch dimension.\n return tf.expand_dims(inputs, self.axis + 1)\n\n def get_config(self):\n config = {'axis': self.axis}\n base_config = super(ExpandDims, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\n# Register with Keras so we can do type(layer).from_config(layer.get_config())\ntf.keras.utils.get_custom_objects()['ExpandDims'] = ExpandDims\n" ]
[ [ "tensorflow.keras.utils.get_custom_objects", "tensorflow.TensorShape", "tensorflow.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
cxqj/33-tensorflow-audio-classification
[ "934162d497a66bc59c87f527448464e121a3a306" ]
[ "vggish/mel_features.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Defines routines to compute mel spectrogram features from audio waveform.\"\"\"\n\nimport numpy as np\n\n\ndef frame(data, window_length, hop_length):\n \"\"\"Convert array into a sequence of successive possibly overlapping frames.\n\n An n-dimensional array of shape (num_samples, ...) is converted into an\n (n+1)-D array of shape (num_frames, window_length, ...), where each frame\n starts hop_length points after the preceding one.\n\n This is accomplished using stride_tricks, so the original data is not\n copied. However, there is no zero-padding, so any incomplete frames at the\n end are not included.\n\n Args:\n data: np.array of dimension N >= 1. 80000\n window_length: Number of samples in each frame. 400\n hop_length: Advance (in samples) between each window. 160\n\n Returns:\n (N+1)-D np.array with as many rows as there are complete frames that can be\n extracted.\n \"\"\"\n num_samples = data.shape[0] #80000 \n num_frames = 1 + int(np.floor((num_samples - window_length) / hop_length)) #498\n shape = (num_frames, window_length) + data.shape[1:] #(498,400)\n strides = (data.strides[0] * hop_length,) + data.strides #(1280,8)\n return np.lib.stride_tricks.as_strided(data, shape=shape, strides=strides) #(498,400) 按shape的形状对一个矩阵进行切块\n\n\ndef periodic_hann(window_length):\n \"\"\"Calculate a \"periodic\" Hann window.\n\n The classic Hann window is defined as a raised cosine that starts and\n ends on zero, and where every value appears twice, except the middle\n point for an odd-length window. Matlab calls this a \"symmetric\" window\n and np.hanning() returns it. However, for Fourier analysis, this\n actually represents just over one cycle of a period N-1 cosine, and\n thus is not compactly expressed on a length-N Fourier basis. Instead,\n it's better to use a raised cosine that ends just before the final\n zero value - i.e. a complete cycle of a period-N cosine. Matlab\n calls this a \"periodic\" window. This routine calculates it.\n\n Args:\n window_length: The number of points in the returned window.\n\n Returns:\n A 1D np.array containing the periodic hann window.\n \"\"\"\n return 0.5 - (0.5 * np.cos(2 * np.pi / window_length *\n np.arange(window_length)))\n\n\ndef stft_magnitude(signal, fft_length,\n hop_length=None,\n window_length=None):\n \"\"\"Calculate the short-time Fourier transform magnitude.\n\n Args:\n signal: 1D np.array of the input time-domain signal. 80000\n fft_length: Size of the FFT to apply. 512\n hop_length: Advance (in samples) between each frame passed to FFT. 160\n window_length: Length of each block of samples to pass to FFT. 400\n\n Returns:\n 2D np.array where each row contains the magnitudes of the fft_length/2+1\n unique values of the FFT for the corresponding frame of input samples.\n \"\"\"\n frames = frame(signal, window_length, hop_length) #(498,400)\n # Apply frame window to each frame. We use a periodic Hann (cosine of period\n # window_length) instead of the symmetric Hann of np.hanning (period\n # window_length-1).\n window = periodic_hann(window_length) #(400,)\n windowed_frames = frames * window #(498,400)\n return np.abs(np.fft.rfft(windowed_frames, int(fft_length))) #(498,257)\n\n\n# Mel spectrum constants and functions.\n_MEL_BREAK_FREQUENCY_HERTZ = 700.0\n_MEL_HIGH_FREQUENCY_Q = 1127.0\n\n\ndef hertz_to_mel(frequencies_hertz):\n \"\"\"Convert frequencies to mel scale using HTK formula.\n\n Args:\n frequencies_hertz: Scalar or np.array of frequencies in hertz.\n\n Returns:\n Object of same size as frequencies_hertz containing corresponding values\n on the mel scale.\n \"\"\"\n return _MEL_HIGH_FREQUENCY_Q * np.log(\n 1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ))\n\n\ndef spectrogram_to_mel_matrix(num_mel_bins=20,\n num_spectrogram_bins=129,\n audio_sample_rate=8000,\n lower_edge_hertz=125.0,\n upper_edge_hertz=3800.0):\n \"\"\"Return a matrix that can post-multiply spectrogram rows to make mel.返回一个矩阵,该矩阵可以将多个谱图行进行后期处理以生成mel。\n\n Returns a np.array matrix A that can be used to post-multiply a matrix S of\n spectrogram values (STFT magnitudes) arranged as frames x bins to generate a\n \"mel spectrogram\" M of frames x num_mel_bins. M = S A.\n\n The classic HTK algorithm exploits the complementarity of adjacent mel bands\n to multiply each FFT bin by only one mel weight, then add it, with positive\n and negative signs, to the two adjacent mel bands to which that bin\n contributes. Here, by expressing this operation as a matrix multiply, we go\n from num_fft multiplies per frame (plus around 2*num_fft adds) to around\n num_fft^2 multiplies and adds. However, because these are all presumably\n accomplished in a single call to np.dot(), it's not clear which approach is\n faster in Python. The matrix multiplication has the attraction of being more\n general and flexible, and much easier to read.\n\n Args:\n num_mel_bins: How many bands in the resulting mel spectrum. This is\n the number of columns in the output matrix.\n num_spectrogram_bins: How many bins there are in the source spectrogram\n data, which is understood to be fft_size/2 + 1, i.e. the spectrogram\n only contains the nonredundant FFT bins.\n audio_sample_rate: Samples per second of the audio at the input to the\n spectrogram. We need this to figure out the actual frequencies for\n each spectrogram bin, which dictates how they are mapped into mel.\n lower_edge_hertz: Lower bound on the frequencies to be included in the mel\n spectrum. This corresponds to the lower edge of the lowest triangular\n band.\n upper_edge_hertz: The desired top edge of the highest frequency band.\n\n Returns:\n An np.array with shape (num_spectrogram_bins, num_mel_bins).\n\n Raises:\n ValueError: if frequency edges are incorrectly ordered or out of range.\n \"\"\"\n nyquist_hertz = audio_sample_rate / 2.\n if lower_edge_hertz < 0.0:\n raise ValueError(\"lower_edge_hertz %.1f must be >= 0\" % lower_edge_hertz)\n if lower_edge_hertz >= upper_edge_hertz:\n raise ValueError(\"lower_edge_hertz %.1f >= upper_edge_hertz %.1f\" %\n (lower_edge_hertz, upper_edge_hertz))\n if upper_edge_hertz > nyquist_hertz:\n raise ValueError(\"upper_edge_hertz %.1f is greater than Nyquist %.1f\" %\n (upper_edge_hertz, nyquist_hertz))\n spectrogram_bins_hertz = np.linspace(0.0, nyquist_hertz, num_spectrogram_bins)\n spectrogram_bins_mel = hertz_to_mel(spectrogram_bins_hertz)\n # The i'th mel band (starting from i=1) has center frequency\n # band_edges_mel[i], lower edge band_edges_mel[i-1], and higher edge\n # band_edges_mel[i+1]. Thus, we need num_mel_bins + 2 values in\n # the band_edges_mel arrays.\n band_edges_mel = np.linspace(hertz_to_mel(lower_edge_hertz),\n hertz_to_mel(upper_edge_hertz), num_mel_bins + 2)\n # Matrix to post-multiply feature arrays whose rows are num_spectrogram_bins\n # of spectrogram values.\n mel_weights_matrix = np.empty((num_spectrogram_bins, num_mel_bins))\n for i in range(num_mel_bins):\n lower_edge_mel, center_mel, upper_edge_mel = band_edges_mel[i:i + 3]\n # Calculate lower and upper slopes for every spectrogram bin.\n # Line segments are linear in the *mel* domain, not hertz.\n lower_slope = ((spectrogram_bins_mel - lower_edge_mel) /\n (center_mel - lower_edge_mel))\n upper_slope = ((upper_edge_mel - spectrogram_bins_mel) /\n (upper_edge_mel - center_mel))\n # .. then intersect them with each other and zero.\n mel_weights_matrix[:, i] = np.maximum(0.0, np.minimum(lower_slope,\n upper_slope))\n # HTK excludes the spectrogram DC bin; make sure it always gets a zero\n # coefficient.\n mel_weights_matrix[0, :] = 0.0\n return mel_weights_matrix\n\n\ndef log_mel_spectrogram(data,\n audio_sample_rate=8000,\n log_offset=0.0,\n window_length_secs=0.025,\n hop_length_secs=0.010,\n **kwargs):\n \"\"\"Convert waveform to a log magnitude mel-frequency spectrogram.\n\n Args:\n data: 1D np.array of waveform data. (80000)\n audio_sample_rate: The sampling rate of data. 16000\n log_offset: Add this to values when taking log to avoid -Infs. 0.01\n window_length_secs: Duration of each window to analyze. 0.025\n hop_length_secs: Advance between successive analysis windows(连续分析窗口之间的前进). 0.01\n num_mel_bins 64\n lower_edge_hertz 125\n upper_edge_hertz 7500 \n **kwargs: Additional arguments to pass to spectrogram_to_mel_matrix.\n\n Returns:\n 2D np.array of (num_frames, num_mel_bins) consisting of log mel filterbank\n magnitudes for successive frames.\n \"\"\"\n window_length_samples = int(round(audio_sample_rate * window_length_secs)) #400\n hop_length_samples = int(round(audio_sample_rate * hop_length_secs)) #160\n fft_length = 2 ** int(np.ceil(np.log(window_length_samples) / np.log(2.0))) #512\n spectrogram = stft_magnitude( #magnitude:强度\n data,\n fft_length=fft_length,\n hop_length=hop_length_samples,\n window_length=window_length_samples) #(498,257)\n mel_spectrogram = np.dot(spectrogram, spectrogram_to_mel_matrix(\n num_spectrogram_bins=spectrogram.shape[1],\n audio_sample_rate=audio_sample_rate, **kwargs)) #(498,64)\n return np.log(mel_spectrogram + log_offset)\n" ]
[ [ "numpy.log", "numpy.minimum", "numpy.linspace", "numpy.arange", "numpy.lib.stride_tricks.as_strided", "numpy.floor", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
divyanshupandey/Twitter_sentiment_analysis
[ "94128ea799c44f402edfa7193146cba9d5ed6936" ]
[ "plot.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 27 17:55:49 2018\n\n@author: user\n\"\"\"\n# importing necessary libraries\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\n\n#setting graph for pie chart\nfig1 = plt.figure(1, figsize=(6, 6))\nax = fig1.add_axes([0.1, 0.1, 0.8, 0.8])\nclasses = [\"positive\",\"negative\",\"neutral\"]\n\n#setting graph for line chart\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n\n#plotting both the graphs \ndef pie_plot(i):\n xs = []\n ys = []\n value = []\n \n #using file data for preparing graphs\n file_data = open('prediction_files/output.txt','r').read()\n predict_data = open('prediction_files/prediction.txt','r').read()\n \n if(file_data == \"\"):\n value.append(1)\n value.append(1)\n value.append(1)\n\n else:\n for val in file_data.split(\",\"):\n value.append(int(val))\n \n if(predict_data != \"\"):\n for val in predict_data.split(\"\\n\"):\n if val != \"\":\n xs.append(int(val.split(\",\")[0]))\n ys.append(int(val.split(\",\")[1]))\n \n #plotting both the graphs\n ax1.clear()\n ax1.plot(xs, ys)\n ax.clear()\n pies = ax.pie(value,colors=['g','r','c'], labels=classes, autopct='%1.1f%%')\n \nani = animation.FuncAnimation(fig1, pie_plot, interval=1000)\nani1 = animation.FuncAnimation(fig, pie_plot, interval=1000)\nplt.show()" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.show", "matplotlib.animation.FuncAnimation", "matplotlib.style.use" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yexiguafuqihao/crowddet-megengine
[ "866c0fd3767e8f3cce84a78efc0ff95f23ef6b61" ]
[ "model/cascade.rcnn/yexiguafu/res50.rcnn.one.head.two.stages.baseline/config.py" ]
[ "import os\nimport sys\nimport os.path as osp\nimport numpy as np\nimport getpass\nimport pdb\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\n\nroot_dir = '../../../..'\nadd_path(os.path.join(root_dir))\nadd_path(os.path.join(root_dir, 'lib'))\nadd_path(os.path.join(root_dir, 'utils'))\n\nclass Crowd_human:\n class_names = ['background', 'person']\n num_classes = len(class_names)\n root_folder = '/home/zhenganlin/june/CrowdHuman'\n image_folder = osp.join(root_folder, 'images')\n eval_source = osp.join(root_folder, 'crowd_human_test4370_final_unsure_fixempty_fixvis_vboxmerge.odgt')\n train_source = osp.join(root_folder, 'crowd_human_train15000_final_unsure_fixempty_fixvis_vboxmerge.odgt')\n\nclass Config:\n\n usr = getpass.getuser()\n this_model_dir = osp.split(osp.split(osp.realpath(__file__))[0])[-1]\n workspace = osp.split(osp.realpath(__file__))[0]\n output_dir = osp.join(root_dir, 'output', usr, 'cascade.rcnn', this_model_dir)\n model_dir = osp.join(output_dir, 'model_dump')\n eval_dir = osp.join(output_dir, 'eval_dump')\n\n pretrain_weight = '/home/zhenganlin/june/CrowdHuman/resnet50_fbaug_76254_4e14b7d1.pkl'\n\n # ----------data config---------- #\n image_mean = np.array([103.530, 116.280, 123.675])\n image_std = np.array([57.375, 57.120, 58.395])\n train_image_short_size = 800\n train_image_max_size = 1400\n eval_resize = True\n eval_image_short_size = 800\n eval_image_max_size = 1400\n seed_dataprovider = 3\n train_source = Crowd_human.train_source\n eval_source = Crowd_human.eval_source\n train_json, eval_json = train_source, eval_source\n image_folder = Crowd_human.image_folder\n class_names = Crowd_human.class_names\n num_classes = Crowd_human.num_classes\n class_names2id = dict(list(zip(class_names, list(range(num_classes)))))\n gt_boxes_name = 'fbox'\n\n backbone_freeze_at = 2\n rpn_channel = 256\n \n # ----------train config---------- #\n batch_per_gpu = 2\n basic_lr = 1e-3 * 1.25\n momentum = 0.9\n weight_decay = 1e-4\n\n warm_iters = 800\n max_epoch = 35\n lr_decay_rate = 0.1\n lr_decay_sates = [20, 26]\n nr_images_epoch = 15000\n\n log_dump_interval = 1\n\n # ----------test config---------- #\n test_cls_threshold = 0.05\n test_nms_version = 'normal_nms'\n test_max_boxes_per_image = 300 #200\n test_save_type = 'human'\n test_nms = 0.5\n test_vis_threshold = 0.3\n\n # ----------model config---------- #\n batch_filter_box_size = 0\n nr_box_dim = 5\n nr_info_dim = 6\n ignore_label = -1\n max_boxes_of_image = 500\n\n # ----------rois generator config---------- #\n anchor_base_size = 8\n anchor_base_scale = [1]\n anchor_aspect_ratios = [1, 2, 3]\n num_cell_anchors = len(anchor_aspect_ratios)\n anchor_within_border = False\n\n rpn_min_box_size = 4\n rpn_nms_threshold = 0.7\n train_prev_nms_top_n = 12000\n train_post_nms_top_n = 2000\n test_prev_nms_top_n = 6000\n test_post_nms_top_n = 1500\n\n # ----------binding&training config---------- #\n rpn_smooth_l1_beta = 1\n rcnn_smooth_l1_beta = 1\n\n num_sample_anchors = 256\n positive_anchor_ratio = 0.5\n rpn_positive_overlap = 0.7\n rpn_negative_overlap = 0.3\n rpn_bbox_normalize_targets = False\n\n num_rois = 512\n fg_ratio = 0.5\n fg_threshold = 0.5\n bg_threshold_high = 0.5\n bg_threshold_low = 0.0\n rcnn_bbox_normalize_targets = True\n bbox_normalize_means = np.array([0, 0, 0, 0]).astype(np.float32)\n bbox_normalize_stds = np.array([0.1, 0.1, 0.2, 0.2]).astype(np.float32)\n\nconfig = Config()\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shushman/PlaNet
[ "a27a119f6a165da01cca6da211afb51cd1f92537" ]
[ "memory.py" ]
[ "import numpy as np\nimport torch\nfrom env import postprocess_observation, preprocess_observation_\n\n\nclass ExperienceReplay():\n def __init__(self, size, symbolic_env, observation_size, action_size, bit_depth, device):\n self.device = device\n self.symbolic_env = symbolic_env\n self.size = size\n self.observations = np.empty((size, observation_size) if symbolic_env else (size, 3, 64, 64), dtype=np.float32 if symbolic_env else np.uint8)\n self.actions = np.empty((size, action_size), dtype=np.float32)\n self.rewards = np.empty((size, ), dtype=np.float32) \n self.nonterminals = np.empty((size, 1), dtype=np.float32)\n self.idx = 0\n self.full = False # Tracks if memory has been filled/all slots are valid\n self.steps, self.episodes = 0, 0 # Tracks how much experience has been used in total\n self.bit_depth = bit_depth\n\n def append(self, observation, action, reward, done):\n if self.symbolic_env:\n self.observations[self.idx] = observation.numpy()\n else:\n self.observations[self.idx] = postprocess_observation(observation.numpy(), self.bit_depth) # Decentre and discretise visual observations (to save memory)\n self.actions[self.idx] = action.numpy()\n self.rewards[self.idx] = reward\n self.nonterminals[self.idx] = not done\n self.idx = (self.idx + 1) % self.size\n self.full = self.full or self.idx == 0\n self.steps, self.episodes = self.steps + 1, self.episodes + (1 if done else 0)\n\n # Returns an index for a valid single sequence chunk uniformly sampled from the memory\n def _sample_idx(self, L):\n valid_idx = False\n while not valid_idx:\n idx = np.random.randint(0, self.size if self.full else self.idx - L)\n idxs = np.arange(idx, idx + L) % self.size\n valid_idx = not self.idx in idxs[1:] # Make sure data does not cross the memory index\n return idxs\n\n def _retrieve_batch(self, idxs, n, L):\n vec_idxs = idxs.transpose().reshape(-1) # Unroll indices\n observations = torch.as_tensor(self.observations[vec_idxs].astype(np.float32))\n if not self.symbolic_env:\n preprocess_observation_(observations, self.bit_depth) # Undo discretisation for visual observations\n return observations.reshape(L, n, *observations.shape[1:]), self.actions[vec_idxs].reshape(L, n, -1), self.rewards[vec_idxs].reshape(L, n), self.nonterminals[vec_idxs].reshape(L, n, 1)\n\n # Returns a batch of sequence chunks uniformly sampled from the memory\n def sample(self, n, L):\n batch = self._retrieve_batch(np.asarray([self._sample_idx(L) for _ in range(n)]), n, L)\n return [torch.as_tensor(item).to(device=self.device) for item in batch]\n\n\n # Hierarchical sampling; first sample a point at random from the full buffer\n # Then collect the data within that episode and sample again\n def _sample_idx_episode(self, L):\n valid_idx = False\n while not valid_idx:\n # Sample from full buffer at random\n idx = np.random.randint(0, self.size if self.full else self.idx - L)\n\n # Set off on both sides until you reach terminal on either side\n ep_start = idx\n ep_end = idx\n while ep_start > 0 and self.nonterminals[ep_start - 1] != 0.0:\n ep_start -= 1\n while ep_end < self.size and self.nonterminals[ep_end] != 0.0:\n ep_end += 1\n \n # Now sample from within episode start to episode end\n idx = np.random.randint(ep_start, min(ep_end+1, self.size) - L)\n idxs = np.arange(idx, idx + L)\n valid_idx = not self.idx in idxs[1:] # Make sure data does not cross the memory index\n\n return idxs\n \n # TODO: Ensure that BATCH has chunks all from the same latent configuration\n def sample_episode(self, n, L):\n batch = self._retrieve_batch(np.asarray([self._sample_idx_episode(L) for _ in range(n)]), n, L)\n return [torch.as_tensor(item).to(device=self.device) for item in batch]" ]
[ [ "numpy.arange", "torch.as_tensor", "numpy.empty", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tramac/MaiHaHi-pytorch
[ "61ddd2b0cdf53fdc17642423f7a4431b4a2512c3" ]
[ "utils/face_alignment/detection/sfd/sfd_detector.py" ]
[ "import os\nimport cv2\nimport torch\nfrom torch.utils.model_zoo import load_url\n\nfrom ..core import FaceDetector\n\nfrom .net_s3fd import s3fd\nfrom .bbox import *\nfrom .detect import *\n\nmodels_urls = {\n 's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth',\n}\n\n\nclass SFDDetector(FaceDetector):\n def __init__(self, device='cuda', path_to_detector=None, \n verbose=False, filter_threshold=0.5):\n super(SFDDetector, self).__init__(device, verbose)\n\n # Initialise the face detector\n if path_to_detector is None:\n model_weights = load_url(models_urls['s3fd'])\n else:\n model_weights = torch.load(path_to_detector)\n\n self.fiter_threshold = filter_threshold\n self.face_detector = s3fd()\n self.face_detector.load_state_dict(model_weights)\n self.face_detector.to(device)\n self.face_detector.eval()\n\n def _filter_bboxes(self, bboxlist):\n if len(bboxlist) > 0:\n keep = nms(bboxlist, 0.3)\n bboxlist = bboxlist[keep, :]\n bboxlist = [x for x in bboxlist if x[-1] > self.fiter_threshold]\n\n return bboxlist\n\n def detect_from_image(self, tensor_or_path):\n image = self.tensor_or_path_to_ndarray(tensor_or_path)\n\n bboxlist = detect(self.face_detector, image, device=self.device)\n bboxlist = self._filter_bboxes(bboxlist)\n\n return bboxlist\n\n def detect_from_batch(self, images):\n bboxlists = batch_detect(self.face_detector, images, device=self.device)\n keeps = [nms(bboxlists[:, i, :], 0.3) for i in range(bboxlists.shape[1])]\n bboxlists = [bboxlists[keep, i, :] for i, keep in enumerate(keeps)]\n bboxlists = [[x for x in bboxlist if x[-1] > self.fiter_threshold] for bboxlist in bboxlists]\n\n return bboxlists\n\n @property\n def reference_scale(self):\n return 195\n\n @property\n def reference_x_shift(self):\n return 0\n\n @property\n def reference_y_shift(self):\n return 0\n" ]
[ [ "torch.utils.model_zoo.load_url", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alburke/hagelslag
[ "fb94c775232da316893e04104761f483b88f4db5" ]
[ "demos/obj_tracking.py" ]
[ "#!/bin/env python\n\n# coding: utf-8\n\n# Severe Weather Forecasting with Python and Data Science Tools: Interactive Demo\n# David John Gagne, University of Oklahoma and NCAR\n# Introduction\n# Severe weather forecasting has entered an age of unprecedented access to large model and observational datasets with even greater hordes of data in the pipeline. With multiple ensembles of convection-allowing models available and an increasing variety of observations derived from radar, satellite, surface, upper air, and crowd-sourcing, forecasters can easily be overwhelmed with guidance. Without ways to organize, synthesize, and visualize the data in a useful manner for forecasters, the pile of new models and observations will languish unused and will not fulfill their full potential. An even worse outcome would be to take the human forecasters completely out of the loop and trust the models, which is a way fraught with peril. Data science tools offer ways to synthesize essential information from many disparate data sources while also quantifying uncertainty. When forecasters use the tools properly, they can identify potential hazards and the associated spatial and time uncertainties more quickly by using the output of the tools to help target their domain knowledge.\n# This module demonstrates how data science tools from the image processing and machine learning families can be used to create a forecast of severe hail. It aims to teach the advantages, challenges, and limitations of these tools through hands-on interaction.\n# \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon, PathPatch\nfrom matplotlib.collections import PatchCollection\nfrom datetime import datetime, timedelta\nfrom scipy.ndimage import gaussian_filter, find_objects\nfrom copy import deepcopy\nimport pdb, sys, argparse, os\n\n\n# In[2]:\n\nfrom hagelslag.processing.EnhancedWatershedSegmenter import EnhancedWatershed\nfrom hagelslag.data import ModelOutput\nfrom hagelslag.processing.ObjectMatcher import ObjectMatcher, closest_distance\nfrom hagelslag.processing import STObject\n\nparser = argparse.ArgumentParser(description='object tracker')\nparser.add_argument('-m', '--member', type=str, help='member description (e.g. 1km_pbl7)', default='1km_on_3km_pbl1')\nparser.add_argument('-d', '--date', help='date yyyymmddhh', default='2005011312')\nparser.add_argument('-f', '--field', default='UP_HELI_MAX03', help='field in which to find objects')\nparser.add_argument('-t','--timethresh', type=int, default=3, help='time threshold (hours)')\nparser.add_argument('-v','--verbose', action=\"store_true\", help='print more output. useful for debugging')\nargs = parser.parse_args()\n\nif args.verbose: \n print(args)\n\nodir = '/glade/p/work/ahijevyc/hagelslag/out/'\nmodel_path = \"/glade/scratch/ahijevyc/VSE/\"\nensemble_name = \"NCAR\"\nrun_date = datetime.strptime(args.date,'%Y%m%d%H')\nmember = args.member\nfield = args.field\nstart_date = run_date + timedelta(hours=10) # remember first time is usually all zeros\n\n# Attributes:\n# min_thresh (int): minimum pixel value for pixel to be part of a region\n# data_increment (int): quantization interval. Use 1 if you don't want to quantize\n# max_thresh (int): values greater than maxThresh are treated as the maximum threshold\n# size_threshold_pixels (int): clusters smaller than this threshold are ignored.\n# delta (int): maximum number of data increments the cluster is allowed to range over. Larger d results in clusters over larger scales.\n\n# From ahij's config file.\nif field == \"MAX_UPDRAFT_HELICITY\" or field == \"UP_HELI_MAX03\":\n params = {\"min_thresh\":75, \"step\":5, \"max_thresh\":250, \"max_size\":50, \"delta\":75, \"min_size\":1, \"filter_size\":0}\nif field == \"HAIL2D\":\n params = {\"min_thresh\":0.025, \"step\":0.005, \"max_thresh\":0.1, \"max_size\":150, \"delta\":75, \"min_size\":0, \"filter_size\":1}\nlevels = params['min_thresh'] * np.arange(1,8)\nlevels = np.append(levels, params['min_thresh'] * 15)\nmodel_watershed_params = (params['min_thresh'],params['step'],params['max_thresh'],params[\"max_size\"],params[\"delta\"])\n\nend_date = start_date + timedelta(hours=0)\n\nfrom netCDF4 import Dataset\nmodel_grid = ModelOutput(ensemble_name, \n member, \n run_date, \n field, \n start_date, \n end_date,\n model_path,\n single_step=True)\nmodel_map_file=\"/glade/p/work/ahijevyc/hagelslag/mapfiles/VSE.txt\"\n\nmodel_grid.load_map_info(model_map_file)\nmodel_grid.data = []\n\nd = start_date\ndeltat = timedelta(minutes=60)\n\ndef add_grid(m):\n m.drawstates()\n m.drawcoastlines(linewidth=0.5)\n parallels = np.arange(0.,81.,1.)\n m.drawparallels(parallels,labels=[True,False,False,False],linewidth=0.5)\n meridians = np.arange(0.,351.,2.)\n m.drawmeridians(meridians,labels=[False,False,False,True],linewidth=0.5)\n return m\n \n\nwhile d <= end_date:\n fhr = (d - run_date).total_seconds()/3600\n # list of potential paths to diagnostic files\n dfiles = [model_path+member+run_date.strftime(\"/%Y%m%d%H\")+\"/wrf/wrfout_d01_\"+d.strftime(\"%Y-%m-%d_%H:%M:%S\"),\n model_path+member+run_date.strftime(\"/%Y%m%d%H\")+\"/wrf/diags_d01.\"+d.strftime(\"%Y-%m-%d_%H:%M:%S\")+\".nc\",\n model_path+member+run_date.strftime(\"/%Y%m%d%H\")+\"/post_AGAIN/\"+'fhr_%d'%fhr+\"/WRFTWO\"+'%02d'%fhr+\".nc\",\n model_path+member+run_date.strftime(\"/%Y%m%d%H\")+\"/wrf/vse_d01.\"+d.strftime(\"%Y-%m-%d_%H:%M:%S\")+\".nc\"]\n for dfile in dfiles:\n # see if each path exists\n if not os.path.isfile(dfile):\n continue\n # If it does, see if 'field' is a variable.\n ncf = Dataset(dfile)\n if field in ncf.variables:\n print(dfile)\n model_grid.data.append(ncf.variables[field][0,:,:])\n ncf.close()\n break\n ncf.close()\n d += deltat\n\nprint(model_grid.lon.shape, np.maximum.reduce(model_grid.data).shape) # max across time dimension \nprint(model_grid.data[0].max(), model_grid.data[-1].max(), np.maximum.reduce(model_grid.data).max())\n\nplt.figure(figsize=(10,8))\n\nplt.contourf(model_grid.lon, model_grid.lat,\n np.maximum.reduce(model_grid.data), # max across time dimension \n levels,\n extend=\"max\",\n latlon= True,\n cmap=\"Accent\")\nplt.colorbar(shrink=0.9, fraction=0.1, ticks=levels)\ntitle_info = plt.title(field + \"\\n\"+member+\" {0}-{1}\".format(start_date.strftime(\"%d %b %Y %H:%M\"),\n end_date.strftime(\"%d %b %Y %H:%M\")),\n fontweight=\"bold\", fontsize=14)\ndtstr = \"_\"+member+run_date.strftime(\"_%Y%m%d%H\")\nret = plt.savefig(odir+\"uh_swaths/\"+field+\"_swaths\"+dtstr+\".png\")\n\n\ndef get_forecast_objects(model_grid, ew_params, min_size, gaussian_window):\n ew = EnhancedWatershed(*ew_params)\n model_objects = []\n print(\"Find model objects Hour:\")\n for h in range(int((model_grid.end_date - model_grid.start_date).total_seconds()/deltat.total_seconds())+1):\n print(h)\n hour_labels = ew.size_filter(ew.label(gaussian_filter(model_grid.data[h], gaussian_window)), min_size)\n obj_slices = find_objects(hour_labels)\n num_slices = len(obj_slices)\n model_objects.append([])\n if num_slices > 0:\n fig, ax = plt.subplots()\n t = plt.contourf(model_grid.lon,model_grid.lat,hour_labels,np.arange(0,num_slices+1)+0.5,extend=\"max\",cmap=\"Set1\",latlon=True,title=str(run_date)+\" \"+field+\" \"+str(h))\n ret = plt.savefig(odir+\"enh_watershed_ex/ew{0:02d}.png\".format(h))\n for s, sl in enumerate(obj_slices): \n model_objects[-1].append(STObject(model_grid.data[h][sl],\n #np.where(hour_labels[sl] > 0, 1, 0),\n # For some objects (especially long, diagonal ones), the rectangular\n # slice encompasses part of other objects (i.e. non-zero elements of slice).\n # We don't want them in our mask.\n np.where(hour_labels[sl] == s+1, 1, 0),\n model_grid.x[sl], \n model_grid.y[sl], \n model_grid.i[sl], \n model_grid.j[sl],\n h,\n h,\n dx=model_grid.dx))\n if h > 0:\n dims = model_objects[-1][-1].timesteps[0].shape\n model_objects[-1][-1].estimate_motion(h, model_grid.data[h-1], dims[1], dims[0])\n return model_objects\n\nmodel_objects = get_forecast_objects(model_grid, model_watershed_params, params['min_size'], params['filter_size'])\n\n\n# In[12]:\n\ndef track_forecast_objects(input_model_objects, model_grid, object_matcher):\n model_objects = deepcopy(input_model_objects)\n hours = np.arange(int((model_grid.end_date-model_grid.start_date).total_seconds()/deltat.total_seconds()) + 1)\n print(\"hours = \",hours)\n tracked_model_objects = []\n for h in hours:\n past_time_objs = []\n for obj in tracked_model_objects:\n # Potential trackable objects are identified\n # In other words, objects whose end_time is the previous hour\n if obj.end_time == h - 1:\n past_time_objs.append(obj)\n # If no objects existed in the last time step, then consider objects in current time step all new\n if len(past_time_objs) == 0:\n print(\"time\",h, \" no objects existed in the last time step. consider objects in current time step all new\")\n tracked_model_objects.extend(deepcopy(model_objects[h]))\n # Match from previous time step with current time step\n elif len(past_time_objs) > 0 and len(model_objects[h]) > 0:\n assignments = object_matcher.match_objects(past_time_objs, model_objects[h], h - 1, h)\n print(\"assignments:\", assignments)\n unpaired = range(len(model_objects[h]))\n for pair in assignments:\n past_time_objs[pair[0]].extend(model_objects[h][pair[1]])\n unpaired.remove(pair[1])\n if len(unpaired) > 0:\n for up in unpaired:\n tracked_model_objects.append(model_objects[h][up])\n print(\"Tracked Model Objects: {0:03d} hour {1:2d}\".format(len(tracked_model_objects), h))\n return tracked_model_objects\n\n#object_matcher = ObjectMatcher([shifted_centroid_distance, centroid_distance], \n# np.array([dist_weight, 1-dist_weight]), np.array([max_distance] * 2))\nobject_matcher = ObjectMatcher([closest_distance],np.array([1]),np.array([4*model_grid.dx]))\n\n\n" ]
[ [ "scipy.ndimage.gaussian_filter", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.colorbar", "numpy.append", "scipy.ndimage.find_objects", "numpy.maximum.reduce", "numpy.array", "numpy.where", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
mufernando/pyslim
[ "f9067959dfa412534ff25683fa47daea27347b96" ]
[ "pyslim/slim_tree_sequence.py" ]
[ "import attr\nimport struct\nimport msprime\nimport tskit\nimport kastore\nimport json\nfrom collections import OrderedDict\nimport warnings\nimport numpy as np\n\nfrom .slim_metadata import *\nfrom .provenance import *\nfrom .slim_metadata import _decode_mutation_pre_nucleotides\n\nINDIVIDUAL_ALIVE = 2**16\nINDIVIDUAL_REMEMBERED = 2**17\nINDIVIDUAL_FIRST_GEN = 2**18\n\n# A nucleotide k in mutation metadata actually means\n# something that in reference_sequence is NUCLEOTIDES[k]\nNUCLEOTIDES = ['A', 'C', 'G', 'T']\n\ndef load(path):\n '''\n Load the SLiM-compatible tree sequence found in the .trees file at ``path``.\n\n :param string path: The path to a .trees file.\n '''\n ts = SlimTreeSequence.load(path)\n return ts\n\n\ndef load_tables(tables, **kwargs):\n '''\n See :func:`SlimTreeSequence.load_tables`.\n\n :param TableCollection tables: A set of tables.\n '''\n ts = SlimTreeSequence.load_tables(tables, **kwargs)\n return ts\n\n\ndef annotate_defaults(ts, model_type, slim_generation, reference_sequence=None):\n '''\n Takes a tree sequence (as produced by msprime, for instance), and adds in the\n information necessary for SLiM to use it as an initial state, filling in\n mostly default values. Returns a :class:`SlimTreeSequence`.\n\n :param TreeSequence ts: A :class:`TreeSequence`.\n :param string model_type: SLiM model type: either \"WF\" or \"nonWF\".\n :param int slim_generation: What generation number in SLiM correponds to\n ``time=0`` in the tree sequence.\n '''\n tables = ts.dump_tables()\n annotate_defaults_tables(tables, model_type, slim_generation)\n return SlimTreeSequence.load_tables(tables, \n reference_sequence=reference_sequence)\n\n\ndef annotate_defaults_tables(tables, model_type, slim_generation):\n '''\n Does the work of :func:`annotate_defaults()`, but modifies the tables in place: so,\n takes tables as produced by ``msprime``, and makes them look like the\n tables as output by SLiM. See :func:`annotate_defaults` for details.\n '''\n if (type(slim_generation) is not int) or (slim_generation < 1):\n raise ValueError(\"SLiM generation must be an integer and at least 1.\")\n # set_nodes must come before set_populations\n if model_type == \"WF\":\n default_ages = -1\n elif model_type == \"nonWF\":\n default_ages = 0\n else:\n raise ValueError(\"Model type must be 'WF' or 'nonWF'\")\n _set_nodes_individuals(tables, age=default_ages)\n _set_populations(tables)\n _set_sites_mutations(tables)\n _set_provenance(tables, model_type=model_type, slim_generation=slim_generation)\n\n\nclass SlimTreeSequence(tskit.TreeSequence):\n '''\n This is just like a :class:`tskit.TreeSequence`, with a few more properties\n and methods, notably:\n \n - :meth:`.recapitate`\n \n You should create a :class:`.SlimTreeSequence` using one of\n \n - :meth:`.SlimTreeSequence.load_tables` :meth:`.SlimTreeSequence.load`,\n - :func:`.load`, or :func:`.load_tables`.\n \n :ivar slim_generation: The generation that the SLiM simulation was at upon writing;\n will be read from provenance if not provided.\n :ivar reference_sequence: None, or an string of length equal to the sequence\n length that gives the entire reference sequence for nucleotide models.\n :vartype slim_generation: int\n :vartype reference_sequence: string\n '''\n\n def __init__(self, ts, reference_sequence=None):\n provenance = get_provenance(ts)\n slim_generation = provenance.slim_generation\n if provenance.file_version != \"0.3\":\n warnings.warn(\"This is an v{} SLiM tree sequence.\".format(provenance.file_version) +\n \" When you write this out, \" +\n \"it will be converted to v0.3 (which you should do).\")\n tables = ts.dump_tables()\n if provenance.file_version == \"0.1\" or provenance.file_version == \"0.2\":\n # add empty nucleotide slots to metadata\n mut_bytes = tskit.unpack_bytes(tables.mutations.metadata,\n tables.mutations.metadata_offset)\n mut_metadata = [_decode_mutation_pre_nucleotides(md) \n for md in mut_bytes]\n annotate_mutation_metadata(tables, mut_metadata)\n if provenance.file_version == \"0.1\":\n # shift times\n node_times = tables.nodes.time + slim_generation\n tables.nodes.set_columns(\n flags=tables.nodes.flags,\n time=node_times,\n population=tables.nodes.population,\n individual=tables.nodes.individual,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n migration_times = tables.migrations.time + slim_generation\n tables.migrations.set_columns(\n left=tables.migrations.left,\n right=tables.migrations.right,\n node=tables.migrations.node,\n source=tables.migrations.source,\n dest=tables.migrations.dest,\n time=migration_times)\n upgrade_slim_provenance(tables)\n ts = tables.tree_sequence()\n provenance = get_provenance(ts)\n assert(provenance.file_version == \"0.3\")\n self._ll_tree_sequence = ts._ll_tree_sequence\n self.slim_generation = slim_generation\n self.reference_sequence = reference_sequence\n # pre-extract individual metadata\n self.individual_locations = ts.tables.individuals.location\n self.individual_locations.shape = (int(len(self.individual_locations)/3), 3)\n self.individual_ages = np.zeros(ts.num_individuals, dtype='int')\n if self.slim_provenance.model_type != \"WF\":\n self.individual_ages = np.fromiter(map(lambda ind: decode_individual(ind.metadata).age, ts.individuals()), dtype='int64')\n\n self.individual_times = np.zeros(ts.num_individuals)\n self.individual_populations = np.repeat(np.int32(-1), ts.num_individuals)\n npops = [len(set(self.node(n).population for n in ind.nodes)) for ind in ts.individuals()]\n ntimes = [len(set(self.node(n).time for n in ind.nodes)) for ind in ts.individuals()]\n if max(npops) > 1:\n raise ValueError(\"Individual has nodes from more than one population.\")\n if max(ntimes) > 1:\n raise ValueError(\"Individual has nodes from more than one time.\")\n has_indiv = (ts.tables.nodes.individual >= 0)\n which_indiv = ts.tables.nodes.individual[has_indiv]\n # if we did not do the sanity check above then an individual with nodes in more than one pop\n # would get the pop of their last node in the list\n self.individual_populations[which_indiv] = ts.tables.nodes.population[has_indiv]\n self.individual_times[which_indiv] = ts.tables.nodes.time[has_indiv]\n\n @classmethod\n def load(cls, path):\n '''\n Load a :class:`SlimTreeSequence` from a .trees file on disk.\n\n :param string path: The path to a .trees file.\n :rtype SlimTreeSequence:\n '''\n ts = tskit.load(path)\n # extract the reference sequence from the kastore\n kas = kastore.load(path)\n if 'reference_sequence/data' in kas:\n int_rs = kas['reference_sequence/data']\n reference_sequence = int_rs.tostring().decode('ascii')\n else:\n reference_sequence = None\n return cls(ts, reference_sequence)\n\n @classmethod\n def load_tables(cls, tables, **kwargs):\n '''\n Creates the :class:`SlimTreeSequence` defined by the tables.\n\n :param TableCollection tables: A set of tables, as produced by SLiM\n or by annotate_defaults().\n :param TableCollection reference_sequence: An optional string of ACGT giving\n the reference sequence.\n :rtype SlimTreeSequence:\n '''\n # a roundabout way to copy the tables\n ts = tables.tree_sequence()\n return cls(ts, **kwargs)\n\n def simplify(self, *args, **kwargs):\n '''\n This is a wrapper for :meth:`tskit.TreeSequence.simplify`.\n The only difference is that this method returns the\n derived class :class:`.SlimTreeSequence`.\n\n :rtype SlimTreeSequence:\n '''\n sts = super(SlimTreeSequence, self).simplify(*args, **kwargs)\n if (type(sts) == tuple):\n ret = (SlimTreeSequence(sts[0]), sts[1])\n ret[0].reference_sequence = self.reference_sequence\n else:\n ret = SlimTreeSequence(sts)\n ret.reference_sequence = self.reference_sequence\n\n return ret\n\n def population(self, id_):\n '''\n Returns the population whose ID is given by `id_`, as documented in\n :meth:`tskit.TreeSequence.population`, but with additional attributes::\n\n slim_id, selfing_fraction, female_cloning_fraction, \n male_cloning_fraction, sex_ratio, \n bounds_x0, bounds_x1, bounds_y0, bounds_y1, bounds_z0, bounds_z1, \n migration_records.\n\n These are all recorded by SLiM in the metadata.\n\n Note that SLiM populations are usually indexed starting from 1,\n but in tskit from zero, so there may be populations (e.g., with id_=0)\n that have no metadata and are not used by SLiM.\n\n :param int id_: The ID of the population (i.e., its index).\n '''\n pop = super(SlimTreeSequence, self).population(id_)\n try:\n pop.metadata = decode_population(pop.metadata)\n except:\n pass\n return pop\n\n def individual(self, id_):\n '''\n Returns the individual whose ID is given by `id_`, as documented in\n :meth:`tskit.TreeSequence.individual`, but with additional attributes::\n\n time, pedigree_id, age, slim_population, sex, slim_flags.\n\n The `time` and `population` properties are extracted from the nodes,\n and an error will be thrown if the individual's nodes derive from\n more than one population or more than one time.\n\n :param int id_: The ID of the individual (i.e., its index).\n '''\n ind = super(SlimTreeSequence, self).individual(id_)\n ind.population = self.individual_populations[id_]\n ind.time = self.individual_times[id_]\n try:\n ind.metadata = decode_individual(ind.metadata)\n except:\n pass\n return ind\n\n def node(self, id_):\n '''\n Returns the node whose ID is given by `id_`, as documented in\n :meth:`tskit.TreeSequence.node`, but with additional attributes::\n\n slim_id, is_null, genome_type.\n\n These are all recorded by SLiM in the metadata.\n\n :param int id_: The ID of the node (i.e., its index).\n '''\n node = super(SlimTreeSequence, self).node(id_)\n try:\n node.metadata = decode_node(node.metadata)\n except:\n pass\n return node\n\n def mutation(self, id_):\n '''\n Returns the mutation whose ID is given by `id_`, as documented in\n :meth:`tskit.TreeSequence.mutation`, but with additional attributes::\n\n mutation_type, selection_coeff, population, slim_time, nucleotide.\n\n These are all recorded by SLiM in the metadata.\n\n :param int id_: The ID of the mutation (i.e., its index).\n '''\n mut = super(SlimTreeSequence, self).mutation(id_)\n try:\n mut.metadata = decode_mutation(mut.metadata)\n except:\n pass\n return mut\n\n def recapitate(self, recombination_rate, recombination_map=None,\n keep_first_generation=False, population_configurations=None, \n **kwargs):\n '''\n Returns a \"recapitated\" tree sequence, by using msprime to run a\n coalescent simulation from the \"top\" of this tree sequence, i.e.,\n allowing any uncoalesced lineages to coalesce.\n\n To allow this process, the first generation of the SLiM simulation has been\n recorded in the tree sequence, but are not currently marked as samples,\n so this process (or, simplify()) will remove any of these that are not needed.\n If you want to keep them, then set ``keep_first_generation`` to True;\n although this will make more work here.\n\n This also means that you must *not* simplify before you recapitate your\n SLiM-produced tree sequence.\n\n Note that ``Ne`` is not set automatically, so defaults to ``1.0``; you probably\n want to set it explicitly. Similarly, migration is not set up\n automatically, so that if there are uncoalesced lineages in more than\n one population, you will need to pass in a migration matrix to allow\n coalescence. In both cases, remember that population IDs in ``tskit`` begin\n with 0, so that if your SLiM simulation has populations ``p1`` and ``p2``,\n then the tree sequence will have three populations (but with no nodes\n assigned to population 0), so that migration rate of 1.0 between ``p1`` and\n ``p2`` needs a migration matrix of::\n\n [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0]]\n\n :param float recombination_rate: The recombination rate - only a constant\n recombination rate is allowed. This parameter cannot \n be used along with the ``recombination_map`` parameter, as these\n values are encoded within the map.\n :param recombination_map: The map describing the changing rates of \n recombination along the simulated chromosome. This parameter cannot \n be used along with the ``recombination_rate`` parameter, as these\n values are encoded within the map. Defaults to a uniform rate as\n described in the ``recombination_rate`` parameter if not specified.\n :type recombination_map: :class:`.RecombinationMap`\n :param bool keep_first_generation: Whether to keep the individuals (and genomes)\n corresponding to the first SLiM generation in the resulting tree sequence\n :param list population_configurations: See :meth:`msprime.simulate` for\n this argument; if not provided, each population will have zero growth rate\n and the same effective population size.\n :param dict kwargs: Any other arguments to :meth:`msprime.simulate`.\n '''\n if not recombination_map:\n recomb = msprime.RecombinationMap(positions = [0.0, self.sequence_length],\n rates = [recombination_rate, 0.0],\n num_loci = int(self.sequence_length))\n else:\n recomb = recombination_map\n\n if population_configurations is None:\n population_configurations = [msprime.PopulationConfiguration()\n for _ in range(self.num_populations)]\n\n if keep_first_generation:\n ts = self._mark_first_generation()\n else:\n ts = self\n\n recap = msprime.simulate(\n from_ts = ts,\n population_configurations = population_configurations,\n recombination_map = recomb,\n start_time = self.slim_generation,\n **kwargs)\n ts = SlimTreeSequence.load_tables(recap.tables)\n ts.reference_sequence = self.reference_sequence\n return ts\n\n def mutation_at(self, node, position, time=None):\n '''\n Finds the mutation present in the genome of ``node`` at ``position``,\n returning -1 if there is no such mutation recorded in the tree\n sequence. Warning: if ``node`` is not actually in the tree sequence\n (e.g., not ancestral to any samples) at ``position``, then this\n function will return -1, possibly erroneously. If `time` is provided,\n returns the last mutation at ``position`` inherited by ``node`` that\n occurred at or before ``time`` ago (using the `slim_time` attribute of\n mutation metadata to infer this).\n \n :param int node: The index of a node in the tree sequence.\n :param float position: A position along the genome.\n :param int time: The time ago that we want the nucleotide, or None,\n in which case the ``time`` of ``node`` is used.\n\n :returns: Index of the mutation in question, or -1 if none.\n '''\n if position < 0 or position >= self.sequence_length:\n raise ValueError(\"Position {} not valid.\".format(position))\n if node < 0 or node >= self.num_nodes:\n raise ValueError(\"Node {} not valid.\".format(node))\n if time is None:\n time = self.node(node).time\n tree = self.at(position)\n slim_time = self.slim_generation - time\n # Mutation's slim_times are one less than the corresponding node's slim times\n # in WF models, but not in WF models, for some reason.\n if self.slim_provenance.model_type == \"WF\":\n slim_time -= 1.0\n site_pos = self.tables.sites.position\n out = tskit.NULL\n if position in site_pos:\n site_index = np.where(site_pos == position)[0][0]\n site = self.site(site_index)\n mut_nodes = []\n # look for only mutations that occurred before `time`\n # not strictly necessary if time was None\n for mut in site.mutations:\n if len(mut.metadata) == 0:\n raise ValueError(\"All mutations must have SLiM metadata.\")\n if max([u.slim_time for u in mut.metadata]) <= slim_time:\n mut_nodes.append(mut.node)\n n = node\n while n > -1 and n not in mut_nodes:\n n = tree.parent(n)\n if n >= 0:\n # do careful error checking here\n for mut in site.mutations:\n if mut.node == n:\n assert(out == tskit.NULL or out == mut.parent)\n out = mut.id\n return out\n\n def nucleotide_at(self, node, position, time=None):\n '''\n Finds the nucleotide present in the genome of ``node`` at ``position``.\n Warning: if ``node`` is not actually in the tree sequence (e.g., not\n ancestral to any samples) at ``position``, then this function will\n return the reference sequence nucleotide, possibly erroneously. If\n `time` is provided, returns the last nucletide produced by a mutation\n at ``position`` inherited by ``node`` that occurred at or before\n ``time`` ago (using the `slim_time` attribute of mutation metadata\n to infer this).\n \n :param int node: The index of a node in the tree sequence.\n :param float position: A position along the genome.\n :param int time: The time ago that we want the nucleotide, or None,\n in which case the ``time`` of ``node`` is used.\n\n :returns: Index of the nucleotide in ``NUCLEOTIDES`` (0=A, 1=C, 2=G, 3=T).\n '''\n if self.reference_sequence is None:\n raise ValueError(\"This tree sequence has no reference sequence.\")\n mut_id = self.mutation_at(node, position, time)\n if mut_id == tskit.NULL:\n out = NUCLEOTIDES.index(self.reference_sequence[int(position)])\n else:\n mut = self.mutation(mut_id)\n k = np.argmax([u.slim_time for u in mut.metadata])\n out = mut.metadata[k].nucleotide\n return out\n\n @property\n def slim_provenance(self):\n '''\n Extracts model type, slim generation, and remembmered node count from the last\n entry in the provenance table that is tagged with \"program\"=\"SLiM\".\n\n :rtype ProvenanceMetadata:\n '''\n return get_provenance(self)\n\n def _mark_first_generation(self):\n '''\n Mark all 'first generation' individuals' nodes as samples, and return\n the corresponding tree sequence.\n '''\n tables = self.dump_tables()\n first_gen_nodes = ((tables.nodes.individual > 0)\n & ((tables.individuals.flags[tables.nodes.individual]\n & INDIVIDUAL_FIRST_GEN) > 0))\n if sum(first_gen_nodes) == 0:\n warnings.warn(\"Tree sequence does not have the initial generation; \" +\n \" did you simplify it after output from SLiM?\")\n flags = tables.nodes.flags\n flags[first_gen_nodes] = (flags[first_gen_nodes] | tskit.NODE_IS_SAMPLE)\n tables.nodes.set_columns(flags=flags, population=tables.nodes.population,\n individual=tables.nodes.individual, time=tables.nodes.time,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n ts = load_tables(tables)\n ts.reference_sequence = self.reference_sequence\n return ts\n\n def individuals_alive_at(self, time):\n \"\"\"\n Returns an array giving the IDs of all individuals that are known to be\n alive at the given time ago. This is determined by seeing if their age\n at `time`, determined since the time since they were born (their\n `.time` attribute) is less than or equal to their `age` attribute\n (which will reflect their age at the last time they were Remembered).\n\n :param float time: The time ago.\n \"\"\"\n births = self.individual_times\n ages = self.individual_ages\n alive_bool = np.logical_and(births >= time, births - ages <= time)\n return np.where(alive_bool)[0]\n\n def individual_ages_at(self, time):\n \"\"\"\n Returns the *ages* of each individual at the corresponding time ago,\n which will be `nan` if the individual is either not born yet or dead.\n This is computed as the time ago the individual was born (found by the\n `time` associated with the the individual's nodes) minus the `time`\n argument; while \"death\" is inferred from the individual's `age`,\n recorded in metadata.\n\n The age is the number of complete time steps the individual has lived\n through, so if they were born in time step `time`, then their age\n will be zero.\n\n :param float time: The reference time ago.\n \"\"\"\n ages = np.repeat(np.nan, self.num_individuals)\n alive = self.individuals_alive_at(time)\n ages[alive] = self.individual_times[alive] - time\n return ages\n\n def first_generation_individuals(self):\n \"\"\"\n Returns the IDs of the individuals remembered as part of the first SLiM generation,\n as determined by their flags.\n \"\"\"\n return np.where(self.tables.individuals.flags & INDIVIDUAL_FIRST_GEN > 0)[0]\n\n\ndef _set_nodes_individuals(\n tables, node_ind=None, location=(0, 0, 0), age=0, ind_id=None,\n ind_population=None, ind_sex=INDIVIDUAL_TYPE_HERMAPHRODITE,\n ind_flags=INDIVIDUAL_ALIVE, slim_ind_flags=0, node_id=None,\n node_is_null=False, node_type=GENOME_TYPE_AUTOSOME):\n '''\n Adds to a TableCollection the information relevant to individuals required\n for SLiM to load in a tree sequence, that is found in Node and Individual\n tables. This will replace any existing Individual table, and will replace\n any information already in the individual, metadata, and population columns\n of the Node table.\n\n This is designed to make it easy to assign default values:\n - (node_ind) the 2*j-th and (2*j+1)-st `sample` nodes to individual j\n - (location) individual locations to (0, 0, 0)\n - (age) individual age to 0\n - (ind_id) SLiM individual pedigree IDs to sequential integers starting from 0\n - (ind_population) individual populations to 0\n - (node_id) SLiM genome IDs to sequential integers starting with samples from 0\n - (node_is_null) genomes to be non-null\n - (node_type) genome type to 0 (= autosome)\n - (ind_flags) INDIVIDUAL_ALIVE\n\n If you have other situations, like non-alive \"remembered\" individuals, you\n will need to edit the tables by hand, afterwards.\n '''\n samples = list(filter(lambda j: tables.nodes.flags[j] & tskit.NODE_IS_SAMPLE,\n range(tables.nodes.num_rows)))\n if (len(samples) % 2) != 0:\n raise ValueError(\"There must be an even number of sampled nodes,\"\\\n + \"since organisms are diploid.\")\n\n if node_ind is None:\n node_ind = [tskit.NULL for _ in range(tables.nodes.num_rows)]\n for j, k in enumerate(samples):\n node_ind[j] = int(k/2)\n\n num_individuals = max(node_ind) + 1\n num_nodes = tables.nodes.num_rows\n\n if type(location) is tuple:\n location = [location for _ in range(num_individuals)]\n assert(len(location) == num_individuals)\n\n if type(age) is int or type(age) is float:\n age = [age for _ in range(num_individuals)]\n assert(len(age) == num_individuals)\n\n if ind_id is None:\n ind_id = list(range(num_individuals))\n assert(len(ind_id) == num_individuals)\n\n if type(ind_sex) is int:\n ind_sex = [ind_sex for _ in range(num_individuals)]\n assert(len(ind_sex) == num_individuals)\n\n if type(slim_ind_flags) is int:\n slim_ind_flags = [slim_ind_flags for _ in range(num_individuals)]\n assert(len(slim_ind_flags) == num_individuals)\n\n if type(ind_flags) is int:\n ind_flags = [ind_flags for _ in range(num_individuals)]\n assert(len(ind_flags) == num_individuals)\n\n if node_id is None:\n node_id = [-1 for _ in range(num_nodes)]\n for j, k in enumerate(list(samples)\n + sorted(list(set(range(num_nodes))\n - set(samples)))):\n node_id[k] = j\n assert(len(node_id) == num_nodes)\n\n if type(node_is_null) is bool:\n node_is_null = [node_is_null for _ in range(num_nodes)]\n assert(len(node_is_null) == num_nodes)\n\n if type(node_type) is int:\n node_type = [node_type for _ in range(num_nodes)]\n assert(len(node_type) == tables.nodes.num_rows)\n\n if ind_population is None:\n # set the individual populations based on what's in the nodes\n ind_population = [tskit.NULL for _ in range(num_individuals)]\n for j, u in enumerate(node_ind):\n if u >= 0:\n ind_population[u] = tables.nodes.population[j]\n assert(len(ind_population) == num_individuals)\n\n # check for consistency: every individual has two nodes, and populations agree\n ploidy = [0 for _ in range(num_individuals)]\n for j in samples:\n u = node_ind[j]\n assert(u >= 0)\n ploidy[u] += 1\n if tables.nodes.population[j] != ind_population[u]:\n raise ValueError(\"Inconsistent populations: nodes and individuals do not agree.\")\n\n if any([p != 2 for p in ploidy]):\n raise ValueError(\"Not all individuals have two assigned nodes.\")\n\n tables.nodes.set_columns(flags=tables.nodes.flags, time=tables.nodes.time,\n population=tables.nodes.population, individual=node_ind,\n metadata=tables.nodes.metadata,\n metadata_offset=tables.nodes.metadata_offset)\n\n loc_vec, loc_off = tskit.pack_bytes(location)\n tables.individuals.set_columns(\n flags=ind_flags, location=loc_vec, location_offset=loc_off)\n\n individual_metadata = [IndividualMetadata(*x) for x in\n zip(ind_id, age, ind_population, ind_sex, slim_ind_flags)]\n node_metadata = [None for _ in range(num_nodes)]\n for j in samples:\n node_metadata[j] = NodeMetadata(slim_id=node_id[j], is_null=node_is_null[j],\n genome_type=node_type[j])\n\n annotate_individual_metadata(tables, individual_metadata)\n annotate_node_metadata(tables, node_metadata)\n\n\ndef _set_populations(\n tables, pop_id=None, selfing_fraction=0.0, female_cloning_fraction=0.0,\n male_cloning_fraction=0.0, sex_ratio=0.5, bounds_x0=0.0, bounds_x1=0.0,\n bounds_y0=0.0, bounds_y1=0.0, bounds_z0=0.0, bounds_z1=0.0,\n migration_records=None):\n '''\n Adds to a TableCollection the information about populations required for SLiM\n to load a tree sequence. This will replace anything already in the Population\n table.\n '''\n num_pops = max(tables.nodes.population) + 1\n for md in tskit.unpack_bytes(tables.individuals.metadata,\n tables.individuals.metadata_offset):\n try:\n ind_md = decode_individual(md)\n except:\n raise ValueError(\"Individuals do not have metadata:\"\n + \"need to run set_nodes_individuals() first?\")\n assert(ind_md.population < num_pops)\n if pop_id is None:\n pop_id = list(range(num_pops))\n assert(len(pop_id) == num_pops)\n\n if type(selfing_fraction) is float:\n selfing_fraction = [selfing_fraction for _ in range(num_pops)]\n assert(len(selfing_fraction) == num_pops)\n\n if type(female_cloning_fraction) is float:\n female_cloning_fraction = [female_cloning_fraction for _ in range(num_pops)]\n assert(len(female_cloning_fraction) == num_pops)\n\n if type(male_cloning_fraction) is float:\n male_cloning_fraction = [male_cloning_fraction for _ in range(num_pops)]\n assert(len(male_cloning_fraction) == num_pops)\n\n if type(sex_ratio) is float:\n sex_ratio = [sex_ratio for _ in range(num_pops)]\n assert(len(sex_ratio) == num_pops)\n\n if type(bounds_x0) is float:\n bounds_x0 = [bounds_x0 for _ in range(num_pops)]\n assert(len(bounds_x0) == num_pops)\n\n if type(bounds_x1) is float:\n bounds_x1 = [bounds_x1 for _ in range(num_pops)]\n assert(len(bounds_x1) == num_pops)\n\n if type(bounds_y0) is float:\n bounds_y0 = [bounds_y0 for _ in range(num_pops)]\n assert(len(bounds_y0) == num_pops)\n\n if type(bounds_y1) is float:\n bounds_y1 = [bounds_y1 for _ in range(num_pops)]\n assert(len(bounds_y1) == num_pops)\n\n if type(bounds_z0) is float:\n bounds_z0 = [bounds_z0 for _ in range(num_pops)]\n assert(len(bounds_z0) == num_pops)\n\n if type(bounds_z1) is float:\n bounds_z1 = [bounds_z1 for _ in range(num_pops)]\n assert(len(bounds_z1) == num_pops)\n\n if migration_records is None:\n migration_records = [[] for _ in range(num_pops)]\n assert(len(migration_records) == num_pops)\n for mrl in migration_records:\n for mr in mrl:\n assert(type(mr) is PopulationMigrationMetadata)\n\n population_metadata = [PopulationMetadata(*x) for x in\n zip(pop_id, selfing_fraction, female_cloning_fraction,\n male_cloning_fraction, sex_ratio, bounds_x0,\n bounds_x1, bounds_y0, bounds_y1, bounds_z0, bounds_z1,\n migration_records)]\n annotate_population_metadata(tables, population_metadata)\n\n\ndef _set_sites_mutations(\n tables, mutation_id=None, mutation_type=1, selection_coeff=0.0,\n population=tskit.NULL, slim_time=None):\n '''\n Adds to a TableCollection the information relevant to mutations required\n for SLiM to load in a tree sequence. This means adding to the metadata column\n of the Mutation table, It will also\n - give SLiM IDs to each mutation\n - round Site positions to integer values\n - stack any mutations that end up at the same position as a result\n - replace ancestral states with \"\"\n This will replace any information already in the metadata or derived state\n columns of the Mutation table.\n '''\n num_mutations = tables.mutations.num_rows\n\n if mutation_id is None:\n mutation_id = list(range(num_mutations))\n assert(len(mutation_id) == num_mutations)\n\n if type(mutation_type) is int:\n mutation_type = [mutation_type for _ in range(num_mutations)]\n assert(len(mutation_type) == num_mutations)\n\n if type(selection_coeff) is float:\n selection_coeff = [selection_coeff for _ in range(num_mutations)]\n assert(len(selection_coeff) == num_mutations)\n\n if type(population) is int:\n population = [population for _ in range(num_mutations)]\n assert(len(population) == num_mutations)\n\n if slim_time is None:\n ## This may *not* make sense because we have to round:\n # slim_time = [(-1) * int(tables.nodes.time[u]) for u in tables.mutations.node]\n slim_time = [0 for _ in range(num_mutations)]\n assert(len(slim_time) == num_mutations)\n\n mutation_metadata = [[MutationMetadata(*x)] for x in\n zip(mutation_type, selection_coeff, population, slim_time)]\n annotate_mutation_metadata(tables, mutation_metadata)\n\n############\n# Provenance\n############\n# See provenances.py for the structure of a Provenance entry.\n\n\ndef _set_provenance(tables, model_type, slim_generation):\n '''\n Appends to the provenance table of a :class:`TableCollection` a record containing\n the information that SLiM expects to find there.\n\n :param TableCollection tables: The table collection.\n :param string model_type: The model type: either \"WF\" or \"nonWF\".\n :param int slim_generation: The \"current\" generation in the SLiM simulation.\n '''\n pyslim_dict = make_pyslim_provenance_dict()\n slim_dict = make_slim_provenance_dict(model_type, slim_generation)\n tables.provenances.add_row(json.dumps(pyslim_dict))\n tables.provenances.add_row(json.dumps(slim_dict))\n\n" ]
[ [ "numpy.int32", "numpy.argmax", "numpy.repeat", "numpy.logical_and", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shobrook/topigraph
[ "abd18b48507f76ebac87c7e2241063a4aa5367a6" ]
[ "topigraph/topigraph.py" ]
[ "# Third Party\nfrom nltk import pos_tag\nfrom nltk.tokenize import word_tokenize\nfrom nltk.probability import FreqDist\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom numpy import dot\nfrom numpy.linalg import norm\nfrom youtube_transcript_api import YouTubeTranscriptApi\nfrom wikipedia import search as wikisearch\nfrom graphlou import detect_communities\n\n# Standard Library\nimport heapq\nfrom collections import defaultdict\n\n\ndef fetch_transcript(video_id=\"RD-9Ghvt480\", max_num_lines=None):\n data = YouTubeTranscriptApi.get_transcript(video_id)\n num_lines = 0\n for line in data:\n if max_num_lines and num_lines >= max_num_lines:\n break\n\n num_lines += 1\n yield line[\"text\"]\n\n\ndef preprocess_sentences(transcript):\n stop_words = set(stopwords.words(\"english\"))\n lemmatizer = WordNetLemmatizer()\n for sentence in transcript:\n word_tokens = (w.lower() for w in word_tokenize(sentence))\n word_tokens = (w for w in word_tokens if w not in stop_words)\n word_tokens = ((lemmatizer.lemmatize(w), w) for w in word_tokens)\n\n yield word_tokens\n\n\ndef vectorize_sentences(pp_transcript, max_vec_size=25):\n dataset = []\n word2count = defaultdict(lambda: 0)\n for sentence in pp_transcript:\n sentence_for_dataset = []\n for (lemma, word) in sentence:\n word2count[lemma] += 1\n sentence_for_dataset.append(word)\n\n dataset.append(sentence_for_dataset)\n\n freq_words = heapq.nlargest(max_vec_size, word2count, key=word2count.get)\n sent_vectors = []\n for sentence in dataset:\n sent_vector = [sentence.count(w) for w in freq_words]\n sent_vectors.append(sent_vector)\n\n return sent_vectors, dataset\n\n\ndef create_similarity_matrix(sent_vectors):\n sim_matrix = []\n for i, source in enumerate(sent_vectors):\n row = []\n for j, target in enumerate(sent_vectors):\n # Keeps matrix left-triangular\n if j > i:\n break\n\n # TODO: Use something better than cosine similarity\n numer = dot(source, target)\n denom = norm(source) * norm(target)\n cosine_sim = numer / denom if denom else 0.0\n row.append(cosine_sim)\n\n sim_matrix.append(row)\n\n return sim_matrix\n\n\ndef extract_topic_labels(clusters, node2sent):\n # valid_tags = (\"NN\", \"NNS\", \"NNP\", \"NNPS\")\n valid_tags = (\"NNP\")\n topic_labels = []\n for cluster in clusters:\n words = [word for node in cluster for word in node2sent[node]]\n keywords = [word for word, pos in pos_tag(words) if pos in valid_tags]\n keywords = keywords[:10] # TODO: Rank keywords\n\n if not keywords:\n continue\n\n topic_labels.append(wikisearch(\" \".join(keywords))[0])\n\n return topic_labels\n\n\nif __name__ == \"__main__\":\n print(\"\\tFetching transcript\")\n\n transcript = fetch_transcript(max_num_lines=200)\n\n print(\"\\tPreprocessing transcript\")\n\n pp_transcript = preprocess_sentences(transcript)\n\n print(\"\\tVectorizing sentences in transcript\")\n\n sent_vectors, node2sent = vectorize_sentences(pp_transcript)\n\n print(\"\\tCreating similarity matrix\")\n\n sim_matrix = create_similarity_matrix(sent_vectors)\n\n print(\"\\tClustering sentences with Louvain's algorithm (takes an eternity)\")\n\n clusters = detect_communities(sim_matrix)\n\n print(\"\\tExtracting topic labels\")\n\n topics = extract_topic_labels(clusters, node2sent)\n\n print(topics)\n" ]
[ [ "numpy.dot", "numpy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
sebdenis/pdsim
[ "1219d257d4952f396022f2a41c245765c8728ab0" ]
[ "PDSim/core/core.py" ]
[ "from __future__ import division, absolute_import, print_function\n\nimport math\nfrom math import pi\nfrom timeit import default_timer\nimport inspect\nimport six\n\n##-- Package imports --\nfrom PDSim.flow import flow,flow_models\nfrom .containers import STATE_VARS_TM, CVArrays, ControlVolumeCollection,TubeCollection\nfrom PDSim.flow.flow import FlowPathCollection\nfrom . import integrators\nfrom PDSim.misc.datatypes import arraym, empty_arraym\nimport PDSim.core.callbacks\nfrom PDSim.misc.error_bar import error_ascii_bar\n\n##-- Non-package imports --\nimport numpy as np\n\n# If scipy is available, use its optimization function, otherwise, \n# use our implementation (for packaging purposes)\ntry:\n from scipy.integrate import trapz\nexcept ImportError:\n from PDSim.misc.scipylike import trapz\n\nimport h5py\n\n# An empty class for storage\nclass struct(object):\n pass \n \nclass IntegratorMixin(object):\n \"\"\"\n This class contains the methods that will be merged with one of the system of ODE integrators\n and includes the methods that are specific to PDSim\n \"\"\"\n def __init__(self, sim, x_state):\n self.sim = sim\n self.x_state = x_state\n \n def get_initial_array(self):\n # Get the beginning of the cycle configured\n # Put a copy of the values into the matrices\n xold = self.x_state.copy()\n self.sim._put_to_matrices(xold, 0)\n return xold\n \n def premature_termination(self):\n # Once every 100 steps check if you are supposed to abort\n if self.sim._check_cycle_abort(self.Itheta):\n return 'abort'\n else:\n return False\n \n def pre_step_callback(self):\n # Call the step callback if provided\n if self.sim.callbacks.step_callback is not None:\n self.h = self.sim.callbacks.step_callback(self.t0, self.h, self.Itheta)\n disable = self.sim.callbacks.step_callback.disable_adaptive\n \n # If we don't want to actually do the step, rather just copy the values\n # (for instance at the merging angle for scroll machines), we set\n # stepAccepted to True and move on\n if disable == 'no_integrate':\n self.stepAccepted = True\n # Retrieve the array of values based on the values set in step_callback\n x = self.sim._get_from_matrices(self.Itheta)\n # Updates the state, calculates the volumes, prepares all the things needed for derivatives\n # The crank angle must be evaluated after theta_d in order to ensure that the new volumes are used\n # The newly calculated values are used\n self.sim.core.properties_and_volumes(self.sim.CVs.exists_CV, self.t0+self.h+1e-10, STATE_VARS_TM, x)\n self.xold = x.copy()\n self.xnew = x.copy()\n self.__store_values()\n \n x = self.sim._get_from_matrices(self.Itheta)\n \n if disable != False and x.all_finite():\n self.xold = self.sim._get_from_matrices(self.Itheta)\n \n # The integrator only cares whether disable is true or not, convert to true or false\n if disable != False:\n self.disableAdaptive = True\n else:\n self.disableAdaptive = False\n \n def __store_values(self):\n \"\"\" Private method that stores the values in the internal data structure \"\"\"\n self.sim.t[self.Itheta] = self.t0\n self.sim._put_to_matrices(self.xold, self.Itheta)\n flows = self.sim.Flows.get_deepcopy()\n # If the index we want to fill is beyond the length of FlowStorage, we append it,\n # otherwise, we replace that entry in the list\n if self.Itheta > len(self.sim.FlowStorage)-1:\n self.sim.FlowStorage.append(flows)\n else:\n self.sim.FlowStorage[self.Itheta] = flows\n \n def post_deriv_callback(self):\n self.__store_values()\n \n def post_step_callback(self): pass\n \n def derivs(self, t, x):\n return self.sim.derivs(t, x)\n \n def post_integration(self):\n \"\"\"\n Run this at the end\n \"\"\"\n # Cache the values\n self.sim.derivs(self.t0, self.xold)\n self.__store_values()\n \n V, dV = self.sim.CVs.volumes(self.t0)\n Nexist = self.sim.CVs.Nexist\n if sorted(self.sim.stateVariables) == ['D','T']:\n self.sim.CVs.updateStates('T',self.xnew[0:Nexist],'D',self.xnew[Nexist:2*Nexist])\n elif sorted(self.sim.stateVariables) == ['M','T']:\n self.sim.CVs.updateStates('T',self.xnew[0:Nexist],'D',self.xnew[Nexist:2*Nexist]/V)\n else:\n raise NotImplementedError\n \nclass EulerIntegrator(IntegratorMixin, integrators.AbstractSimpleEulerODEIntegrator):\n \"\"\" \n Mixin class using the functions defined in IntegratorMixin and the generalized simple ODE \n \"\"\"\n def __init__(self, sim, x_state):\n IntegratorMixin.__init__(self, sim, x_state)\n \nclass HeunIntegrator(IntegratorMixin, integrators.AbstractHeunODEIntegrator):\n \"\"\" \n Mixin class using the functions defined in IntegratorMixin and the generalized Heun ODE integrator\n \"\"\"\n def __init__(self, sim, x_state):\n IntegratorMixin.__init__(self, sim, x_state)\n \nclass RK45Integrator(IntegratorMixin, integrators.AbstractRK45ODEIntegrator):\n \"\"\" \n Mixin class using the functions defined in IntegratorMixin and the generalized RK45 integrator\n \"\"\"\n def __init__(self, sim, x_state):\n IntegratorMixin.__init__(self, sim, x_state)\n\nclass PDSimCore(object):\n \"\"\"\n This is the main driver class for the model\n \n This class is not intended to be run on its own. It must be subclassed and extended to provide functions for mass flow, etc. \n \n The basic order of steps that should be followed can be summarized as\n \n #. Instantiate the subclass of PDSimCore\n #. Add each of the control volumes\n #. Add each of the tubes\n #. Add all the flow models between CV and tubes\n #. Add valves (if applicable)\n #. Connect the callbacks for heat transfer, step, etc.\n #. Run the model\n\n \"\"\"\n def __init__(self,stateVariables=None):\n \"\"\"\n Initialization of the PDSimCore\n \n Parameters\n ----------\n stateVariables : mutable object [list or tuple], optional\n list of keys for the state variables to be used. Current options are 'T','D' or 'T','M'. Default state variables are 'T','M'\n \"\"\"\n #Initialize the containers to be empty\n \n #: The Valves container class\n self.Valves = []\n\n #: The :class:`ControlVolumeCollection <PDSim.core.containers.ControlVolumeCollection>` instance\n #: that contains all the control volumes in the machine\n self.CVs = ControlVolumeCollection()\n \n #: The :class:`FlowPathCollection <PDSim.flow.flow.FlowPathCollection>` \n #: instance\n self.Flows = FlowPathCollection()\n \n #: A :class:`list` that contains copies of the \n #: :class:`FlowPathCollection <PDSim.flow.flow.FlowPathCollection>` \n #: at each crank angle\n self.FlowStorage = []\n \n self.Tubes = TubeCollection()\n self.Tlumps = np.zeros((1,1))\n self.steps = []\n self.__hasValves__ = False\n \n # A storage of the initial state vector\n self.xstate_init = None\n \n # A storage of the initial valves vector\n if isinstance(stateVariables,(list,tuple)):\n self.stateVariables=list(stateVariables)\n else:\n self.stateVariables=['T','M']\n self._want_abort = False\n \n # Build a structure to hold all the callbacks\n self.callbacks = PDSim.core.callbacks.CallbackContainer()\n \n # Build a dummy class to hold information from the solvers\n class dummy: pass\n self.solvers = dummy()\n self.solvers.lump_eb_history = []\n self.solvers.hdisc_history = []\n self.solvers.initial_states_history = []\n \n self.verbosity = 0\n \n self.summary = dummy()\n \n def _check(self):\n \"\"\"\n Do some checking before we start the run.\n \n Here we check:\n \n * Inlet state viscosity and conductivity must be greater than zero\n \"\"\"\n \n if self.inlet_state.get_visc() < 0:\n raise ValueError('Your inlet state viscosity is less than zero. Invalid fluid: ' +self.inlet_state.Fluid)\n if self.inlet_state.get_cond() < 0:\n raise ValueError('Your inlet state conductivity is less than zero. Invalid fluid: '+self.inlet_state.Fluid)\n \n def _get_from_matrices(self,i):\n \"\"\"\n Get values back from the matrices and reconstruct the state variable list\n \"\"\"\n if self.__hasLiquid__==True:\n raise NotImplementedError\n else:\n ValList = []\n exists_indices = np.array(self.CVs.exists_indices)\n for s in self.stateVariables:\n if s=='T':\n ValList += self.T[exists_indices,i].tolist()\n elif s=='D':\n ValList += self.rho[exists_indices,i].tolist()\n elif s=='M':\n ValList += self.m[exists_indices,i].tolist()\n else:\n raise KeyError\n \n if self.__hasValves__:\n # Also store the valve values\n ValList += self.xValves[:,i].tolist()\n\n return arraym(ValList)\n \n def _statevars_to_dict(self,x):\n d={}\n for iS,s in enumerate(self.stateVariables):\n x_=(x[iS*self.CVs.Nexist:self.CVs.Nexist*(iS+1)])\n if s=='T':\n d['T']=x_\n elif s=='D':\n d['D']=x_\n elif s=='M':\n d['M']=x_\n return d\n \n def _put_to_matrices(self,x,i):\n \"\"\"\n Take a state variable list and put back in numpy matrices\n \"\"\"\n exists_indices=self.CVs.exists_indices\n Nexist = self.CVs.Nexist\n Ns = len(self.stateVariables)\n assert(len(x) == len(self.Valves)*2+Ns*Nexist)\n if self.__hasLiquid__==True:\n raise NotImplementedError\n# self.T[:,i]=x[0:self.NCV]\n# self.m[:,i]=x[self.NCV:2*self.NCV]\n else: # self.__hasLiquid__==False\n for iS, s in enumerate(self.stateVariables):\n if s=='T':\n self.T[exists_indices, i] = x[iS*self.CVs.Nexist:self.CVs.Nexist*(iS+1)]\n elif s=='D':\n self.rho[exists_indices, i] = x[iS*self.CVs.Nexist:self.CVs.Nexist*(iS+1)]\n elif s=='M':\n self.m[exists_indices, i] = x[iS*self.CVs.Nexist:self.CVs.Nexist*(iS+1)]\n # Left over terms are for the valves\n if self.__hasValves__:\n self.xValves[0:len(self.Valves)*2, i] = arraym(x[Ns*Nexist:len(x)])\n \n # In the first iteration, self.core has not been filled, so do not \n # overwrite with the values in self.core.m and self.core.rho\n if self.core.m[0] > 0.0 :\n self.m[exists_indices, i] = self.core.m\n if self.core.rho[0] > 0.0 :\n self.rho[exists_indices, i] = self.core.rho\n \n self.V[exists_indices, i] = self.core.V\n self.dV[exists_indices, i] = self.core.dV\n self.p[exists_indices, i] = self.core.p\n self.h[exists_indices, i] = self.core.h\n self.Q[exists_indices,i] = self.core.Q\n \n def _postprocess_flows(self):\n \"\"\"\n In this private method, the flows from each of the flow nodes are summed for \n each step of the revolution, and then averaged flow rates are calculated.\n \"\"\"\n \n def sum_flows(key,Flows):\n \"\"\"\n Sum all the terms for a given flow key. \n \n Flows \"into\" the node are positive, flows out of the \n node are negative\n \n Use the code in the Cython module\n \"\"\"\n return flow.sumterms_given_CV(key, Flows)\n \n def collect_keys(Tubes,Flows):\n \"\"\"\n Get all the keys for a given collection of flow elements\n \"\"\"\n keys=[]\n for Tube in Tubes:\n if Tube.key1 not in keys:\n keys.append(Tube.key1)\n if Tube.key2 not in keys:\n keys.append(Tube.key2)\n for Flow in Flows:\n if Flow.key1 not in keys:\n keys.append(Flow.key1)\n if Flow.key2 not in keys:\n keys.append(Flow.key2)\n return keys\n \n # Get all the nodes that can exist for tubes and CVs\n keys=collect_keys(self.Tubes,self.Flows)\n \n # Get the instantaneous net flow through each node\n # and the averaged mass flow rate through each node\n self.FlowsProcessed=struct()\n self.FlowsProcessed.summed_mdot={}\n self.FlowsProcessed.summed_mdoth={}\n self.FlowsProcessed.mean_mdot={}\n self.FlowsProcessed.integrated_mdoth={}\n self.FlowsProcessed.integrated_mdot={}\n self.FlowsProcessed.t=self.t[0:self.Ntheta]\n\n for key in keys:\n # Empty container numpy arrays\n self.FlowsProcessed.summed_mdot[key]=np.zeros((self.Ntheta,))\n self.FlowsProcessed.summed_mdoth[key]=np.zeros((self.Ntheta,))\n \n assert self.Ntheta == len(self.FlowStorage)\n for i in range(self.Ntheta):\n mdot,mdoth=sum_flows(key,self.FlowStorage[i])\n self.FlowsProcessed.summed_mdot[key][i]=mdot\n self.FlowsProcessed.summed_mdoth[key][i]=mdoth\n \n # All the calculations here should be done in the time domain,\n # rather than crank angle. So convert angle to time by dividing \n # by omega, the rotational speed in rad/s.\n trange = self.t[self.Ntheta-1]-self.t[0]\n # integrated_mdoth has units of kJ/rev * f [Hz] --> kJ/s or kW\n self.FlowsProcessed.integrated_mdoth[key]=trapz(self.FlowsProcessed.summed_mdoth[key], \n self.t[0:self.Ntheta]/self.omega)*self.omega/trange\n # integrated_mdot has units of kg/rev * f [Hz] --> kg/s\n self.FlowsProcessed.integrated_mdot[key]=trapz(self.FlowsProcessed.summed_mdot[key], \n self.t[0:self.Ntheta]/self.omega)*self.omega/trange\n self.FlowsProcessed.mean_mdot[key]=np.mean(self.FlowsProcessed.integrated_mdot[key])\n \n # Special-case the tubes. Only one of the nodes can have flow. \n # The other one is invariant because it is quasi-steady.\n for Tube in self.Tubes:\n mdot1 = self.FlowsProcessed.mean_mdot[Tube.key1]\n mdot2 = self.FlowsProcessed.mean_mdot[Tube.key2]\n mdot_i1 = self.FlowsProcessed.integrated_mdot[Tube.key1]\n mdot_i2 = self.FlowsProcessed.integrated_mdot[Tube.key2]\n mdoth_i1 = self.FlowsProcessed.integrated_mdoth[Tube.key1]\n mdoth_i2 = self.FlowsProcessed.integrated_mdoth[Tube.key2]\n #Swap the sign so the sum of the mass flow rates is zero\n self.FlowsProcessed.mean_mdot[Tube.key1] -= mdot2\n self.FlowsProcessed.mean_mdot[Tube.key2] -= mdot1\n self.FlowsProcessed.integrated_mdot[Tube.key1] -= mdot_i2\n self.FlowsProcessed.integrated_mdot[Tube.key2] -= mdot_i1\n self.FlowsProcessed.integrated_mdoth[Tube.key1] -= mdoth_i2\n self.FlowsProcessed.integrated_mdoth[Tube.key2] -= mdoth_i1\n \n #For each tube, update the flow going through it\n #Tube.mdot is always a positive value\n Tube.mdot = max(abs(mdot1), abs(mdot2))\n \n self.mdot = self.FlowsProcessed.mean_mdot[self.key_inlet]\n \n self.FlowsProcessed.collected_data = []\n \n for i, Flow in enumerate(self.Flows):\n \n mdot = np.array([Flows[i].mdot for Flows in self.FlowStorage])\n edot = np.array([Flows[i].edot for Flows in self.FlowStorage])\n \n data = dict(key1 = Flow.key1,\n key2 = Flow.key2,\n fcn = Flow.MdotFcn_str,\n mdot = mdot,\n edot = edot,\n mdot_average = np.trapz(mdot, self.t[0:self.Ntheta])/(self.t[self.Ntheta-1]-self.t[0]),\n Edot_average = np.trapz(edot, self.t[0:self.Ntheta])/(self.t[self.Ntheta-1]-self.t[0]) \n )\n \n self.FlowsProcessed.collected_data.append(data)\n \n def _postprocess_HT(self):\n \"\"\"\n Postprocess the heat transfer terms\n \n Here we\n calculate the mean heat transfer rate over the course of the cycle \n \"\"\"\n self.HTProcessed=struct()\n r = list(range(self.Ntheta))\n \n #Remove all the NAN placeholders and replace them with zero values\n self.Q[np.isnan(self.Q)] = 0.0\n \n #Sum at each step of the revolution\n self.HTProcessed.summed_Q = np.sum(self.Q, axis = 0) #kW\n \n #Get the mean heat transfer rate\n self.HTProcessed.mean_Q = trapz(self.HTProcessed.summed_Q[r], self.t[r])/(self.t[self.Ntheta-1]-self.t[0])\n \n \n def guess_outlet_temp(self, inlet_state, p_outlet, eta_a=0.7):\n \"\"\" \n Function to guess outlet temperature\n \n Using a guess value for the adiabatic efficiency, calculate the guessed\n outlet temperature. In compressor mode, the adiabatic efficiency is defined by\n \n .. math::\n \n \\eta_a = \\\\frac{h_{2s}-h_1}{h_2-h_1}\n \n and in expander mode it is defined by\n \n .. math::\n \n \\eta_a = \\\\frac{h_2-h_1}{h_{2s}-h_1}\n \n This function can also be overloaded by the subclass in order to \n implement a different guess method\n \"\"\"\n \n h1 = inlet_state.h\n out_state = inlet_state.copy()\n out_state.update(dict(S = inlet_state.s, P = p_outlet))\n h2s = out_state.h\n if p_outlet > inlet_state.p:\n # Compressor Mode\n h2 = h1 + (h2s-h1)/eta_a\n else:\n # Expander Mode\n h2 = h1 + (h2s-h1)*eta_a\n out_state.update(dict(H = h2, P = p_outlet))\n return out_state.T\n \n def reset_initial_state(self):\n \"\"\"\n Reset the initial state of the core class, typically after doing a \n preconditioning run\n \"\"\"\n \n for k,CV in zip(self.CVs.keys,self.CVs.CVs):\n if k in self.exists_CV_init:\n CV.exists = True\n else:\n CV.exists = False\n\n #Update the existence of each of the CV\n self.update_existence()\n \n #Only the State variables, not the valves\n self.x_state = self.xstate_init\n #Make a copy\n x = self.xstate_init.copy()\n #Add the values from the valves\n if self.__hasValves__:\n x.extend(empty_arraym(2*len(self.Valves)))\n self._put_to_matrices(x, 0)\n #Reset the temporary variables\n self.xstate_init = None\n self.exists_CV_init = None\n \n def update_existence(self):\n \"\"\"\n Update existence flags for Tubes and control volumes\n \n This function is required to be called when the existence of any control\n volume or tube changes in order to ensure that internal flags are set\n properly\n \"\"\"\n \n # Update the existence flags in all the control volumes\n self.CVs.rebuild_exists()\n \n # Update the array of enthalpies in the tubes\n self.Tubes.update_existence(self.CVs.Nexist)\n \n # Update the existence of each of the flows\n self.Flows.update_existence(self)\n \n # Update the sizes of the internal arrays in self.core \n self.core.update_size(self.CVs.Nexist)\n \n def add_flow(self,FlowPath):\n \"\"\"\n Add a flow path to the model\n \n Parameters\n ----------\n FlowPath : :class:`FlowPath <PDSim.flow.flow.FlowPath>` instance\n An initialized flow path \n \"\"\"\n #Add FlowPath instance to the list of flow paths\n self.Flows.append(FlowPath)\n \n def add_CV(self,CV):\n \"\"\"\n Add a control volume to the model\n \n Parameters\n ----------\n CV : :class:`ControlVolume <PDSim.core.containers.ControlVolume>` instance\n An initialized control volume\n \"\"\"\n \n if CV.key in self.CVs.keys:\n raise KeyError('Sorry but the key for your Control Volume ['+CV.key+'] is already in use')\n \n #Add the CV to the collection\n self.CVs.add(CV)\n self.CVs.rebuild_exists()\n \n def add_tube(self,Tube):\n \"\"\"\n Add a tube to the model.\n \n Parameters\n ----------\n Tube : :class:`Tube <PDSim.core.containers.Tube>` instance\n An initialized tube.\n \"\"\"\n #Add it to the list\n self.Tubes.append(Tube)\n self.Tubes.update()\n \n def add_valve(self,Valve):\n \"\"\"\n Add a valve to the model.\n \n Parameters\n ----------\n Valve : :class:`ValveModel <PDSim.flow.flow_models.ValveModel>` instance\n An initialized valve.\n \"\"\"\n #Add it to the list\n self.Valves.append(Valve)\n self.__hasValves__=True\n \n def pre_run(self, N = 40000):\n \"\"\"\n This function gets called before the run begins. It builds large matrices\n to store values, and does other initialization. \n \"\"\"\n # Build the full numpy arrays for temperature, volume, etc.\n self.t=np.zeros((N,))\n self.T=np.zeros((self.CVs.N,N))\n self.T.fill(np.nan)\n self.p=self.T.copy()\n self.h = self.T.copy()\n self.m = self.T.copy()\n self.V = self.T.copy()\n self.dV = self.T.copy()\n self.rho = self.T.copy()\n self.Q = self.T.copy()\n self.xValves = np.zeros((2*len(self.Valves),N))\n \n # Initialize the core class that contains the arrays and the derivs\n self.core = CVArrays(0)\n \n # Update the existence of all the control volumes\n self.update_existence()\n \n # Set a flag about liquid flooding\n self.__hasLiquid__ = False\n \n def pre_cycle(self, x0 = None):\n \"\"\"\n This runs before the cycle is run but after pre_run has been called\n \n Parameters\n ----------\n x0 : :class:`arraym <PDSim.misc.datatypes.arraym>` instance\n \"\"\"\n self.t.fill(np.nan)\n self.T.fill(np.nan)\n self.p.fill(np.nan)\n self.m.fill(np.nan)\n self.V.fill(np.nan)\n self.dV.fill(np.nan)\n self.rho.fill(np.nan)\n self.Q.fill(np.nan)\n \n self.FlowStorage=[]\n \n #Get the volumes at theta=0\n #Note: needs to occur in this function because V needed to calculate mass a few lines below\n VdV=[CV.V_dV(0.0,**CV.V_dV_kwargs) for CV in self.CVs.exists_CV]\n V,dV = zip(*VdV)\n \n self.t[0]=0\n \n # If x0 is provided, use its values to initialize the chamber states\n if x0 is None:\n # self.CVs.exists_indices is a list of indices of the CV with the same order of entries\n # as the entries in self.CVs.T\n self.T[self.CVs.exists_indices, 0] = self.CVs.T\n self.p[self.CVs.exists_indices, 0] = self.CVs.p\n self.rho[self.CVs.exists_indices, 0] = self.CVs.rho\n self.m[self.CVs.exists_indices, 0] = self.CVs.rho*arraym(V)\n else:\n #x0 is provided, but need to pad it out to include valve values\n x0_ = x0.copy()\n \n # If x0 is longer than the product of the number of state variables \n # and CV in existence, the valve data is already included and must not be \n # added to the array of independent variables\n if self.__hasValves__ and len(x0) == self.CVs.Nexist*len(self.stateVariables):\n #Load up the rest of the array with zeros since the valves start closed and at rest\n x0_.extend(empty_arraym(len(self.Valves)*2))\n self._put_to_matrices(x0_, 0)\n \n # Assume all the valves to be fully closed and stationary at the beginning of cycle\n self.xValves[:,0]=0\n \n self.Tubes_hdict={}\n for Tube in self.Tubes:\n self.Tubes_hdict[Tube.key1]=Tube.State1.get_h()\n self.Tubes_hdict[Tube.key2]=Tube.State2.get_h()\n \n def calc_boundary_work(self):\n \"\"\"\n This method calculates the boundary work rate using a trapezoidal \n integration of\n \n .. math::\n \n \\\\dot W_{pv} = -\\int p\\\\frac{dV}{d\\\\theta}\\\\frac{\\\\omega}{2\\\\pi} d\\\\theta\n \n for all the control volumes and sets the parameter ``self.Wdot_pv`` with \n the result.\n \n The units of the boundary work are kW.\n \"\"\"\n \n def Wdot_one_CV(CVindex):\n \"\"\" calculate the p-v work for one CV \"\"\"\n \n x0_raw = self.t[0:self.Ntheta]\n y0_raw = self.p[CVindex, 0:self.Ntheta]*self.dV[CVindex, 0:self.Ntheta]\n \n # Convert into chunks that are delimited by nan, if any\n isnotnan_indices = np.flatnonzero(~np.isnan(y0_raw))\n breaks = np.flatnonzero(np.diff(isnotnan_indices) > 1)\n\n if len(breaks) != 0:\n chunks = np.split(isnotnan_indices, np.array(breaks)+1)\n else:\n chunks = [isnotnan_indices]\n \n return -sum([trapz(y0_raw[ii], x0_raw[ii]) for ii in chunks])*self.omega/(2*pi)\n \n self.Wdot_pv = 0.0\n for CVindex in range(self.p.shape[0]):\n self.Wdot_pv+=Wdot_one_CV(CVindex)\n \n def post_cycle(self):\n \"\"\"\n This stuff all happens at the end of the cycle. It is a private method \n not meant to be called externally\n \n The following things are done:\n \n #. The boundary work is calculated\n #. The flows are post-processed\n #. The heat transfer is post-processed\n #. The mass flow rate is calculated\n #. The volumetric efficiency is calculated\n #. The adiabatic efficiency is calculated\n #. The isentropic power is calculated\n #. The power input is calculated\n \"\"\"\n \n self.calc_boundary_work()\n self._postprocess_flows()\n self._postprocess_HT()\n \n # Calculate the lumped mass energy balance\n if self.callbacks.lumps_energy_balance_callback is not None:\n self.lumps_resid = self.callbacks.lumps_energy_balance_callback()\n # Convert to an arraym if needed\n if not isinstance(self.lumps_resid, arraym):\n self.lumps_resid = arraym(self.lumps_resid)\n else:\n raise ValueError('lumps_energy_balance_callback cannot be None')\n \n if not hasattr(self,'Qamb'):\n self.Qamb = 0\n \n # The total mass flow rate\n self.mdot = self.FlowsProcessed.mean_mdot[self.key_inlet]\n \n for key, State in six.iteritems(self.Tubes.Nodes):\n if key == self.key_inlet:\n inletState = State\n if key == self.key_outlet:\n outletState = State\n\n try:\n Vdisp = self.Vdisp\n\n except:\n Vdisp = self.Vdisp()\n\n self.eta_v = self.mdot / (self.omega/(2*pi)*Vdisp*inletState.rho)\n\n h1 = inletState.h\n h2 = outletState.h\n s1 = inletState.s\n\n # Can't use intermediate temperature because the state might be two-phase\n # for some conditions and you are better off just calculating the enthalpy\n # directly\n temp = outletState.copy()\n temp.update(dict(P=outletState.p, S=s1))\n h2s = temp.h\n \n if outletState.p > inletState.p:\n # Compressor Mode\n self.eta_a = (h2s-h1)/(h2-h1)\n self.Wdot_i = self.mdot*(h2s-h1)\n else:\n # Expander Mode\n self.eta_a = (h1-h2)/(h1-h2s)\n self.Wdot_i = self.mdot*(h1-h2s) \n \n # self.Qamb is positive if heat is being added to the lumped mass\n self.Wdot = self.mdot*(h2-h1)-self.Qamb\n \n def _check_cycle_abort(self, index, I = 100):\n \"\"\"\n This function will check whether an abort has been requested every \n ``I`` steps of the solver throughout the rotation\n \n Meant for calling by cycle_RK45, cycle_SimpleEuler, cycle_Heun, etc.\n \n Primarily this is useful for use with the GUI, where the GUI can pass\n an abort command to the model\n \n Parameters\n ----------\n index : int\n The index of the step\n I : int, optional\n Check abort at this interval\n \n \"\"\"\n # % is the symbol for modulus in python\n if index % I == 0 and self.Abort():\n self._want_abort = True\n return True\n \n def check_abort(self):\n \"\"\"\n A callback for use with the graphical user interface to force solver to quit\n \n It will check the Scroll.pipe_abort pipe for a ``True`` value, and if it\n finds one, it will set the Scroll._want_abort value to ``True`` which \n will be read by the main execution thread\n \n Once ``self._want_abort`` is ``True``, it will stay latched ``True`` until the \n run is terminated\n \"\"\"\n \n # If you received an abort request, set a flag in the simulation\n if self.pipe_abort.poll() and self.pipe_abort.recv():\n print('received an abort request')\n self._want_abort = True\n \n # If the run has timed out, quit\n if default_timer() - self.start_time > self.timeout:\n print('run timed out')\n self._want_abort = True\n \n return self._want_abort\n \n def precond_solve(self,**kwargs):\n \"\"\"\n This function is deprecated and will be removed in a future version\n \"\"\"\n \n import warnings\n msg = 'precond_solve is deprecated and will be removed in a future version. Please use solve instead'\n warnings.warn(msg, DeprecationWarning)\n self.solve(**kwargs)\n \n def connect_callbacks(self,\n step_callback=None,\n heat_transfer_callback=None,\n lumps_energy_balance_callback=None,\n endcycle_callback=None\n ):\n \"\"\" \n Connect up the callbacks for the simulation\n \n The callbacks must either be unbound methods or methods of a class derived from PDSimCore\n \n No keyword arguments are supported to be passed to the callbacks. The \n callback is probably a bound method of a PDSimCore instance, in which \n case you have access to all the data in the class anyway\n \n Parameters\n ----------\n step_callback : function, or :class:`StepCallback <PDSim.core.callbacks.StepCallback>` subclass\n \n If a function is provided, it must have the call signature::\n \n disable_adaptive,h = step_callback(double t, double h, int i)\n \n where ``h`` is the step size that the adaptive solver wants to use, ``t`` is the current value of the independent variable, and ``i`` is the index in the container variables. The return value ``disableAdaptive`` is a boolean value that describes whether the adaptive method should be turned off for this step ( ``False`` : use the adaptive method), and ``h`` is the step size you want to use. If you don't want to disable the adaptive method and use the given step size, just::\n \n return False,h\n \n in your code.\n \n heat_transfer_callback : function, or :class:`HeatTransferCallback <PDSim.core.callbacks.HeatTransferCallback>` subclass\n \n If a function is provided, the heat_transfer_callback function must have the call signature::\n \n Q = heat_transfer_callback(double t)\n \n It should return an :class:`arraym <PDSim.misc.datatypes.arraym>` instance \n with the same length as the number of CV in existence. \n The entry in the :class:`arraym <PDSim.misc.datatypes.arraym>` is \n positive if the heat transfer is TO the fluid in the CV in order \n to maintain the sign convention that energy (or mass) input is \n positive.\n \n lumps_energy_balance_callback : function, or :class:`LumpsEnergyBalanceCallback <PDSim.core.callbacks.LumpsEnergyBalanceCallback>` subclass\n \n If a function is provided, the lumps_energy_balance_callback \n function must have the call signature::\n \n r = lumps_energy_balance_callback()\n \n It should return an :class:`arraym <PDSim.misc.datatypes.arraym>` \n instance with the same length as the number of lumps. The entry in \n ``r`` is the value of the energy balance. It will be driven to zero \n by the solver\n \n \"\"\"\n \n if step_callback is None:\n #No callback is provided, don't do anything\n pass\n elif isinstance(step_callback, PDSim.core.callbacks.StepCallback):\n #If the cythonized step callback is provided, hold onto it\n self.callbacks.step_callback = step_callback\n #Otherwise, wrap the desired callback if it has the right signature\n else:\n #Check the functional call\n callargs = inspect.getcallargs(step_callback, 0.0, 1e-10, 0)\n \n # Either a non-bound method is provided, or bound method is provided, in which case you get self,t,h,i as the values\n # t is a subclass of float, h is a subclass of float, is a subclass of int, and self is subclass of PDSimCore\n if not all([isinstance(arg,(float,int,PDSimCore)) for arg in callargs.values()]):\n sig_ok = False\n else:\n if len(callargs) in [3,4]:\n sig_ok = True\n else:\n sig_ok = False\n \n if step_callback is not None and sig_ok:\n self.callbacks.step_callback = PDSim.core.callbacks.WrappedStepCallback(self, step_callback)\n else:\n raise ValueError(\"step_callback is not possible to be wrapped - neither a subclass of StepCallback nor acceptable function signature\")\n \n if heat_transfer_callback is None:\n #No callback is provided, don't do anything\n pass\n elif isinstance(heat_transfer_callback, PDSim.core.callbacks.HeatTransferCallback):\n #If the cythonized heat transfer callback is provided, hold a pointer to it\n self.callbacks.heat_transfer_callback = heat_transfer_callback\n else:\n callargs = inspect.getcallargs(heat_transfer_callback, 0.0)\n # Either a non-bound method is provided, or bound method is provided, in which case you get self,t as the values\n # t is a subclass of float, and self is subclass of PDSimCore\n if not all([isinstance(arg,(float,int,PDSimCore)) for arg in callargs.values()]):\n sig_ok = False\n else:\n if len(callargs) in [1,2]:\n sig_ok = True\n else:\n sig_ok = False\n \n #Otherwise, wrap the desired callback if it has the right signature\n if heat_transfer_callback is not None and sig_ok:\n self.callbacks.heat_transfer_callback = PDSim.core.callbacks.WrappedHeatTransferCallback(self, heat_transfer_callback)\n else:\n raise ValueError(\"heat_transfer_callback is not possible to be wrapped - neither a subclass of HeatTransferCallback nor an acceptable function\")\n \n if lumps_energy_balance_callback is None:\n #No callback is provided, don't do anything\n pass\n elif isinstance(lumps_energy_balance_callback, PDSim.core.callbacks.LumpsEnergyBalanceCallback):\n #If the cythonized lump energy balance callback is provided, hold onto it\n self.callbacks.lumps_energy_balance_callback = lumps_energy_balance_callback\n #Otherwise, wrap the desired callback if it has the right signature\n else:\n callargs = inspect.getcallargs(lumps_energy_balance_callback)\n # Either a non-bound method is provided, or bound method is provided, in which case you get self,t as the values\n # t is a subclass of float, and self is subclass of PDSimCore\n sig_ok = len(callargs) == 0 or (len(callargs) == 1 and isinstance(list(callargs.values())[0],PDSimCore))\n \n if lumps_energy_balance_callback is not None and sig_ok: #Do functional introspection here where the ``True`` is\n self.callbacks.lumps_energy_balance_callback = PDSim.core.callbacks.WrappedLumpsEnergyBalanceCallback(self, lumps_energy_balance_callback)\n else:\n raise ValueError(\"lump_energy_balance_callback is not possible to be wrapped - neither a subclass of LumpsEnergyBalanceCallback nor an acceptable function\")\n \n self.callbacks.endcycle_callback = endcycle_callback\n \n def one_cycle(self, \n X, \n cycle_integrator = 'RK45',\n cycle_integrator_options = None):\n \"\"\"\n Only run one cycle\n \n Parameters\n ----------\n cycle_integrator : str\n One of 'RK45','Euler','Heun'\n cycle_integrator_options : dict\n options to be passed to the solver function (RK45, Euler, etc.)\n \"\"\"\n # Make cycle_integrator_options an empty dictionary if not provided\n if cycle_integrator_options is None:\n cycle_integrator_options = {}\n tmin = 0.0\n tmax = 2*math.pi\n else:\n tmin = cycle_integrator_options['tmin']\n tmax = cycle_integrator_options['tmax']\n \n X = arraym(X)\n \n # (1). First, run all the tubes\n for tube in self.Tubes:\n tube.TubeFcn(tube)\n \n # Call update_existence to save the enthalpies for the tubes \n self.update_existence()\n \n try:\n t1 = default_timer()\n # Run the pre-cycle code\n self.pre_cycle()\n\n if cycle_integrator == 'Euler':\n # Default to 7000 steps if not provided\n N = getattr(self,'EulerN', 7000) \n integrator = EulerIntegrator(self, X)\n aborted = integrator.do_integration(N, tmin, tmax)\n elif cycle_integrator == 'Heun':\n # Default to 7000 steps if not provided\n N = getattr(self,'HeunN', 7000)\n integrator = HeunIntegrator(self, X)\n aborted = integrator.do_integration(N, tmin, tmax)\n elif cycle_integrator == 'RK45':\n # Default to tolerance of 1e-8 if not provided\n eps_allowed = getattr(self,'RK45_eps', 1e-8)\n integrator = RK45Integrator(self, X)\n aborted = integrator.do_integration(tmin, tmax, eps_allowed=eps_allowed)\n else:\n raise AttributeError('solver_method should be one of RK45, Euler, or Heun')\n \n if aborted == False:\n integrator.post_integration()\n \n self.Itheta = integrator.Itheta\n self.Ntheta = self.Itheta + 1\n\n # Make sure we got the right number of things\n assert self.Ntheta == len(self.FlowStorage)\n self.post_cycle()\n \n except ValueError:\n # debug_plots(self)\n raise\n \n if aborted is None:\n aborted = False\n \n # Quit if you have aborted in one of the cycle solvers\n if aborted == 'abort':\n return None\n \n t2 = default_timer()\n print('Elapsed time for cycle is {0:g} s'.format(t2-t1))\n \n mdot_out = self.FlowsProcessed.mean_mdot[self.key_outlet]\n mdot_in = self.FlowsProcessed.mean_mdot[self.key_inlet] \n if hasattr(self, 'additional_inlet_keys'):\n for key in self.additional_inlet_keys:\n mdot_in += self.FlowsProcessed.mean_mdot[key]\n if hasattr(self, 'additional_outlet_keys'):\n for key in self.additional_outlet_keys:\n mdot_out += self.FlowsProcessed.mean_mdot[key]\n \n # We need to find the key at the inlet to the outlet tube.\n Tube = self.Tubes[self.key_outlet]\n if Tube.key1 == self.key_outlet:\n key_outtube_inlet = Tube.key2\n elif Tube.key2 == self.key_outlet:\n key_outtube_inlet = Tube.key1\n \n # This is the so-called hd' state at the outlet of the pump set\n self.h_outlet_pump_set = (self.FlowsProcessed.integrated_mdoth[key_outtube_inlet]\n /self.FlowsProcessed.integrated_mdot[key_outtube_inlet])\n \n # It should be equal to the enthalpy of the fluid at the inlet\n # to the outlet tube at the current Td value\n h_outlet_Tube = self.Tubes.Nodes[key_outtube_inlet].h\n # Residual is the difference of these two terms\n # We put it in kW by multiplying by flow rate\n self.resid_Td = 0.1*(h_outlet_Tube - self.h_outlet_pump_set)\n \n def OBJECTIVE_CYCLE(self, Td_Tlumps0, X, epsilon_cycle = 0.003, epsilon_energy_balance = 0.003, cycle_integrator = 'RK45', OneCycle = False, cycle_integrator_options = None, plot_every_cycle = False):\n \"\"\"\n The Objective function for the energy balance solver\n \n Parameters\n ----------\n Td_Tlumps0 : list\n Discharge temperature and lump temperatures\n X : :class:`arraym <PDSim.misc.datatypes.arraym>` instance\n Contains the state variables for all the control volumes in existence, as well as any other integration variables\n epsilon : float\n Convergence criterion applied to all of the solvers (DEPRECATED!)\n epsilon_cycle : float\n Cycle-cycle convergence criterion\n epsilon_energy_balance : float\n Energy balance convergence criterion\n cycle_integrator : string, one of 'RK45','Euler','Heun'\n Which solver is to be used to integrate the steps\n OneCycle : bool\n If ``True``, stop after one cycle\n plot_every_cycle : bool\n If ``True``, make the debug plots at every cycle\n cycle_integrator_options : dict\n Options to be passed to cycle integrator\n \"\"\"\n \n # Consume the first element as the discharge temp \n self.Td = float(Td_Tlumps0.pop(0))\n # The rest are the lumps in order\n self.Tlumps = Td_Tlumps0 \n \n # The first time this function is run, save the state variables\n if self.xstate_init is None:\n self.xstate_init = X\n self.exists_CV_init = self.CVs.exists_keys\n \n i = 0\n while True:\n \n # Actually run the cycle, runs post_cycle at the end,\n # sets the parameter lumps_resid in this class\n # Also sets resid_Td\n self.one_cycle(X, \n cycle_integrator = cycle_integrator,\n cycle_integrator_options = cycle_integrator_options)\n \n if self.Abort():\n return\n \n errors, X = self.callbacks.endcycle_callback()\n error_metric = np.sqrt(np.sum(np.power(errors, 2)))\n \n ### -----------------------------------\n ### The lump temperatures\n ### -----------------------------------\n \n self.solvers.lump_eb_history.append([self.Tlumps, self.lumps_resid])\n \n if len(self.Tlumps) > 1:\n print(\"Running multi-lump analysis\")\n\n if self.OEB_solver == 'MDNR':\n # Use Multi Dim. Newton Raphson step for multi-lump temperatures\n w = 1.0 \n dx = 0.5\n x = np.array(self.Tlumps,dtype=np.float)\n J = np.zeros((len(x),len(x)))\n error = 999\n # If a float is passed in for dx, convert to a numpy-like list the same shape\n # as x\n if isinstance(dx,int) or isinstance(dx,float):\n dx=dx*np.ones_like(x)\n \n r0 = np.array(self.lumps_resid)*1000 \n\n # Build the Jacobian matrix by columns\n for jj in range(len(self.Tlumps)):\n delta = np.zeros_like(x)\n delta[jj] = dx[jj]\n self.Tlumps = self.Tlumps + delta\n ri = self.callbacks.lumps_energy_balance_callback()\n #print('ri:',ri)\n ri = np.array(ri)\n J[:,jj] = (ri-r0)/delta[jj]\n\n v = np.linalg.solve(J,-r0)\n\n # Calculate new Tlumps\n Tnew = x + w*v\n self.Tlumps = Tnew\n\n elif self.OEB_solver == 'Broyden':\n # Use Broyden Method \n raise('Broyden not implemented yet')\n\n else:\n # Use Relaxed Secant Method for single lump temperature\n print(\"Running single-lump analysis\")\n \n if len(self.solvers.lump_eb_history) == 1:\n \n T, EB = self.solvers.lump_eb_history[-1]\n \n # T and EB are one-element lists, get floats\n _T, _EB = T[0], EB[0]\n \n # Use the thermal mass to make the step\n # Here is the logic:\n # Instantaneous energy balance given by\n # dU/dt = m*c*(dT/dt) = sum(Qdot)\n # and if dt = one cycle period (seconds/rev) Deltat = 2*pi/omega\n # DELTAT = sum(Qdot)*Deltat/(m*c)\n thermal_capacitance = 0.49*0.001 # [kJ/K]\n Deltat = (2*np.pi)/self.omega # [s]\n \n # Update the lump temperatures\n Tnew = np.array([_T + _EB*Deltat/thermal_capacitance])\n \n else:\n \n # Get the values from the history\n Tn1, EBn1 = self.solvers.lump_eb_history[-1]\n Tn2, EBn2 = self.solvers.lump_eb_history[-2]\n \n # Convert to numpy arrays\n Tn1, EBn1, Tn2, EBn2 = [np.array(l) for l in [Tn1, EBn1, Tn2, EBn2]]\n \n # Use the relaxed secant method to find the solution \n Tnew = Tn1 - 0.7*EBn1*(Tn1-Tn2)/(EBn1-EBn2)\n\n # Update the lump temperatures \n self.Tlumps = Tnew.tolist()\n \n ### -----------------------------------\n ### The discharge enthalpy\n ### -----------------------------------\n \n # The outlet tube\n outlet_tube = self.Tubes[self.key_outlet]\n \n # Get the keys for the elements of the outlet tube\n if outlet_tube.key1 == self.key_outlet:\n key_outtube_inlet = outlet_tube.key2\n key_outtube_outlet = outlet_tube.key1\n elif outlet_tube.key2 == self.key_outlet:\n key_outtube_inlet = outlet_tube.key1\n \n if error_metric < 0.1*epsilon_cycle and np.max(np.abs(self.lumps_resid)) < epsilon_energy_balance:\n\n # Each time that we get here and we are significantly below the threshold, store the values\n \n # Get the current value for the outlet enthalpy of the machine\n h_outlet = self.Tubes[self.key_outlet].State2.get_h()\n \n # Store the values in the list of values\n self.solvers.hdisc_history.append([h_outlet,self.resid_Td])\n\n if len(self.solvers.hdisc_history) == 1:\n # The first time we get here, perturb the discharge enthalpy\n self.Tubes.Nodes[self.key_outlet].update_ph(self.Tubes.Nodes[self.key_outlet].p, h_outlet + 5)\n else:\n # Get the values from the history\n hdn1, EBn1 = self.solvers.hdisc_history[-1]\n hdn2, EBn2 = self.solvers.hdisc_history[-2]\n \n # Use the relaxed secant method to find the solution \n hdnew = hdn1 - 0.75*EBn1*(hdn1-hdn2)/(EBn1-EBn2)\n \n # Reset the outlet enthalpy of the outlet tube based on our new\n # value for it\n self.Tubes.Nodes[self.key_outlet].update_ph(self.Tubes.Nodes[self.key_outlet].p, hdnew)\n\n print(self.solvers.hdisc_history)\n \n print('New outlet T:', self.Tubes.Nodes[self.key_outlet].T, 'K')\n \n # Store a copy of the initial temperatures of the chambers\n self.solvers.initial_states_history.append(self.T[:,0].copy())\n \n if OneCycle:\n print('Quitting due to OneCycle being set to True')\n return\n \n if plot_every_cycle:\n from PDSim.plot.plots import debug_plots\n debug_plots(self)\n \n if self.Abort():\n print('Quitting because Abort flag hit')\n return\n \n # Reset the flag for the fixed side of the outlet tube\n #outlet_tube.fixed = old_fixed\n \n mdot_out = abs(self.FlowsProcessed.mean_mdot[self.key_outlet])\n mdot_in = abs(self.FlowsProcessed.mean_mdot[self.key_inlet])\n if hasattr(self, 'additional_inlet_keys'):\n for key in self.additional_inlet_keys:\n print('Additional inlet flow:', key, self.FlowsProcessed.mean_mdot[key]*1000, 'g/s')\n mdot_in += self.FlowsProcessed.mean_mdot[key]\n if hasattr(self, 'additional_outlet_keys'):\n for key in self.additional_outlet_keys:\n print('Additional outlet flow:', key, self.FlowsProcessed.mean_mdot[key]*1000, 'g/s')\n mdot_out += self.FlowsProcessed.mean_mdot[key]\n mdot_error = (mdot_out/mdot_in-1)*100\n \n print('===========')\n print('|| # {i:03d} ||'.format(i=i))\n print('===========')\n print(error_ascii_bar(abs(self.lumps_resid[0]), epsilon_energy_balance), 'energy balance kW ', self.lumps_resid, ' Tlumps: ',self.Tlumps,'K')\n print(error_ascii_bar(abs(self.resid_Td), epsilon_energy_balance), 'discharge state', self.resid_Td, 'h_pump_set: ', self.h_outlet_pump_set,'kJ/kg', self.Tubes.Nodes[key_outtube_inlet].h, 'kJ/kg')\n print(error_ascii_bar(error_metric, epsilon_cycle), 'cycle-cycle ', error_metric)\n print(error_ascii_bar(abs(mdot_error), 1), 'mdot [%]', mdot_error, '|| in:', mdot_in*1000, 'g/s || out:', mdot_out*1000, 'g/s ')\n \n # Check all the stopping conditions\n within_tolerance = [\n np.max(np.abs(self.lumps_resid)) < epsilon_energy_balance, \n abs(self.resid_Td) < epsilon_energy_balance, \n np.sqrt(np.sum(np.power(errors, 2))) < epsilon_cycle\n ]\n # Stop if all conditions are met\n if all(within_tolerance):\n break\n \n i += 1\n \n # If the abort function returns true, quit this loop\n if self.Abort():\n print('Quitting OBJECTIVE_CYCLE loop in core.solve')\n return None # Stop\n else:\n if len(self.solvers.hdisc_history) == 0:\n # Store the values in the list of values\n self.solvers.hdisc_history.append([self.Tubes[self.key_outlet].State2.get_h(),self.resid_Td])\n \n def solve(self,\n key_inlet = None,\n key_outlet = None,\n solver_method = 'Euler',\n OneCycle = False,\n Abort = None,\n pipe_abort = None,\n UseNR = False,\n alpha = 0.5,\n plot_every_cycle = False,\n x0 = None,\n reset_initial_state = False,\n timeout = 3600,\n eps_cycle = 0.001,\n eps_energy_balance = 0.01,\n cycle_integrator_options = None,\n max_number_of_steps = 40000,\n **kwargs):\n \"\"\"\n This is the driving function for the PDSim model. It can be extended through the \n use of the callback functions\n \n It is highly recommended to call this function using keyword arguments like::\n \n solve(key_inlet = 'inlet.1', \n key_outlet = 'outlet.1', ....)\n \n Parameters\n ----------\n key_inlet : str\n The key for the flow node that represents the upstream quasi-steady point\n key_outlet : str\n The key for the flow node that represents the upstream quasi-steady point\n solver_method : str\n OneCycle : bool\n If ``True``, stop after just one rotation. Useful primarily for \n debugging purposes\n Abort : function\n A function that may be called to determine whether to stop running. \n If calling Abort() returns ``True``, stop running \n pipe_abort : \n UseNR : bool\n If ``True``, use a multi-dimensional solver to determine the initial state of the state variables for each control volume\n alpha : float\n Use a range of ``(1-alpha)*dx, (1+alpha)*dx`` for line search if needed\n plot_every_cycle : bool\n If ``True``, make the plots after every cycle (primarily for debug purposes)\n x0 : arraym\n The starting values for the solver that modifies the discharge temperature and lump temperatures\n reset_initial_state : bool\n If ``True``, use the stored initial state from the previous call to ``solve`` as the starting value for the thermodynamic values for the control volumes\n timeout : float\n Number of seconds before the run times out\n eps_cycle : float\n Cycle-cycle convergence criterion\n eps_energy_balance : float\n Energy balance convergence criterion\n cycle_integrator_options : dict\n A dictionary of options to be passed to the cycle integrator\n max_number_of_steps : int\n Maximum number of steps allowed per rotation\n \n Notes\n -----\n The callbacks ``step_callback`` and ``endcycle_callback`` and \n ``heat_transfer_callback`` and ``lump_energy_balance_callback`` and \n ``valves_callback`` should now be passed to the connect_callbacks() \n function before running precond_solve() or solve()\n \n \"\"\"\n if any(cb in kwargs for cb in ['step_callback','endcycle_callback','heat_transfer_callback','lump_energy_balance_callback','valves_callback']):\n raise NotImplementedError('callback functions are no longer passed to solve() function, rather they are passed to connect_callbacks() function prior to calling solve()')\n \n # Save copies of the inlet and outlet states at the root of the HDF5 file\n # for ease of retrieval\n self.inlet_state = self.Tubes.Nodes[key_inlet] \n self.outlet_state = self.Tubes.Nodes[key_outlet]\n \n # Carry out some pre-run checks\n self._check()\n \n self.start_time = default_timer()\n self.timeout = timeout\n \n #Connect functions that have been serialized by saving the function name as a string\n self.connect_flow_functions()\n \n #Both inlet and outlet keys must be connected to invariant nodes - \n # that is they must be part of the tubes which are all quasi-steady\n if not key_inlet == None and not key_inlet in self.Tubes.Nodes:\n raise KeyError('key_inlet must be a Tube node')\n if not key_outlet == None and not key_outlet in self.Tubes.Nodes:\n raise KeyError('key_outlet must be a Tube node')\n \n self.key_inlet = key_inlet\n self.key_outlet = key_outlet\n \n t1=default_timer()\n \n # Set up a pipe for accepting a value of True which will abort the run\n # Used from the GUI to kill process from the top-level thread\n self.pipe_abort = pipe_abort\n \n if len(self.CVs) < 1:\n raise ValueError('At least one control volume must be added using the add_CV function')\n \n if len(self.Flows) <= 1:\n raise ValueError('At least two flows must be added using the add_flow function')\n \n # If a function called pre_solve is provided, call it with no input arguments\n if hasattr(self,'pre_solve'):\n self.pre_solve()\n \n # This runs before the model starts at all\n self.pre_run(N = max_number_of_steps)\n \n # Check which method is used to do aborting\n if Abort is None and pipe_abort is not None:\n # Use the pipe_abort pipe to look at the abort pipe to see whether \n # to quit \n self.Abort = self.check_abort\n elif Abort is None and pipe_abort is None:\n #Disable the ability to abort, always don't abort\n self.Abort = lambda : False\n elif Abort is not None and pipe_abort is None:\n self.Abort = Abort\n else:\n raise ValueError('Only one of Abort and pipe_abort may be provided')\n \n # If you want to reset the initial state, use the values that were\n # cached in the xstate_init array\n if reset_initial_state is not None and reset_initial_state:\n self.reset_initial_state()\n self.pre_cycle(self.xstate_init)\n else:\n # (2) Run a cycle with the given values for the temperatures\n self.pre_cycle()\n self.x_state = self._get_from_matrices(0).copy()\n \n if x0 is None:\n x0 = [self.Tubes.Nodes[key_outlet].T, self.Tubes.Nodes[key_outlet].T]\n\n # Actually run the solver\n self.OBJECTIVE_CYCLE(x0, self.x_state, \n cycle_integrator = solver_method, \n OneCycle = OneCycle,\n epsilon_energy_balance = eps_energy_balance,\n epsilon_cycle = eps_cycle,\n cycle_integrator_options = cycle_integrator_options,\n plot_every_cycle = plot_every_cycle\n ) \n \n if not self.Abort() and not OneCycle: \n self.post_solve()\n \n if hasattr(self,'resid_Td'):\n del self.resid_Td\n \n # Save the elapsed time for simulation\n self.elapsed_time = default_timer() - t1\n \n def get_prune_keys(self):\n \"\"\"\n Remove some elements when the simulation finishes that are not \n very useful and/or are very large when stored to file\n \n Returns\n -------\n prune_key_list: list\n A list of HDF5 keys that are to be removed from the HDF5 file.\n \"\"\"\n \n return ['/CVs/CVs',\n '/CVs/Nodes',\n '/CVs/T',\n '/CVs/cp',\n '/CVs/cv',\n '/CVs/dpdT',\n '/CVs/exists_CV',\n '/CVs/exists_indices',\n '/CVs/exists_keys',\n '/CVs/h',\n '/CVs/p',\n '/CVs/rho',\n '/callbacks',\n '/core',\n '/steps',\n '/theta',\n '/Flows'\n ]\n \n def attach_HDF5_annotations(self, fName):\n \"\"\"\n In this function, annotations can be attached to each HDF5 field\n \n Parameters\n ----------\n fName : str\n The file name for the HDF5 file that is to be used\n \"\"\" \n attrs_dict = {\n '/t':'The array of the independent variable in the solution, either time or crank angle [rad or s]',\n '/m':'The NCV x Nt matrix with the mass in each control volume [kg]',\n '/T':'The NCV x Nt matrix with the temperature in each control volume [K]',\n '/V':'The NCV x Nt matrix with the volume in each control volume [m^3]',\n '/dV':'The NCV x Nt matrix with the derivative of volume w.r.t. crank angle in each control volume [m^3/radian]',\n '/h':'The NCV x Nt matrix with the enthalpy in each control volume [kJ/kg]',\n '/p':'The NCV x Nt matrix with the pressure in each control volume [kPa]',\n '/rho':'The NCV x Nt matrix with the density in each control volume [kg/m^3]',\n '/Q':'The NCV x Nt matrix with the heat transfer TO the gas in each control volume [kW]',\n '/xL':'The NCV x Nt matrix with the oil mass fraction in each control volume [-]',\n '/A_shell':'The shell area of the machine [m^2]',\n '/h_shell':'The heat transfer coefficient between the shell and the ambient [kW/m^2/K]',\n '/key_inlet':'The key for the inlet node',\n '/key_outlet':'The key for the outlet node',\n '/elapsed_time':'The elapsed time for the simulation run [s]',\n '/eta_a': 'Adiabatic efficiency [-]',\n '/eta_oi':'Overall isentropic efficiency [-]',\n '/eta_v':'Volumetric efficiency [-]',\n '/Qamb':'Rate of heat transfer from the machine TO the ambient [kW]',\n '/RK45_eps':'Step error tolerance for Runge-Kutta method [varied]',\n '/Tamb':'Ambient temperature [K]',\n '/Wdot_pv':'Mechanical power calculated as the integral of -pdV [kW]',\n '/Wdot_electrical':'Electrical power of the machine [kW]',\n '/Wdot_forces':'Mechanical power calculated from the mechanical analysis [kW]',\n '/mdot':'Mass flow rate [kg/s]',\n '/motor/eta_motor':'Motor efficiency [-]',\n '/motor/losses':'Losses generated in the motor [kW]',\n '/motor/suction_fraction':'Fraction of the motor losses that are added to the suction gas [-]',\n '/motor/type':'The model used to simulate the motor',\n '/omega':'Rotational speed [rad/s]',\n '/run_index':'A unique identifier for runs in a batch'\n }\n \n hf = h5py.File(fName,'a')\n \n for k, v in attrs_dict.items():\n dataset = hf.get(k)\n if dataset is None:\n print('bad key',k)\n else:\n dataset.attrs['note'] = v\n hf.close()\n \n def post_solve(self):\n \"\"\"\n Do some post-processing to calculate flow rates, efficiencies, etc. \n \"\"\" \n\n #Resize all the matrices to keep only the real data\n print('Ntheta is', self.Ntheta)\n self.t = self.t[ 0:self.Ntheta]\n self.T = self.T[:,0:self.Ntheta]\n self.p = self.p[:,0:self.Ntheta]\n self.Q = self.Q[:,0:self.Ntheta]\n self.m = self.m[:,0:self.Ntheta]\n self.rho = self.rho[:,0:self.Ntheta]\n self.V = self.V[:,0:self.Ntheta]\n self.dV = self.dV[:,0:self.Ntheta]\n self.h = self.h[:,0:self.Ntheta]\n self.xValves = self.xValves[:,0:self.Ntheta]\n \n print('mdot*(h2-h1),P-v,Qamb', self.Wdot, self.Wdot_pv, self.Qamb)\n print('Mass flow rate is',self.mdot*1000,'g/s')\n print('Volumetric efficiency is',self.eta_v*100,'%')\n print('Adiabatic efficiency is',self.eta_a*100,'%')\n \n # Restructure the history for easier writing to file and more clear description of what the things are\n hdisc_history = list(zip(*self.solvers.hdisc_history))\n self.solvers.hdisc_history = dict(hd = np.array(hdisc_history[0]), hd_error = np.array(hdisc_history[1]))\n lump_eb_history = list(zip(*self.solvers.lump_eb_history))\n self.solvers.lump_eb_history = dict(Tlumps = np.array(lump_eb_history[0]), lump_eb_error = np.array(lump_eb_history[1]))\n self.solvers.initial_states_history = np.array(zip(*self.solvers.initial_states_history)).T\n\n \n def derivs(self, theta, x):\n \"\"\"\n Evaluate the derivatives of the state variables\n \n derivs() is an internal function that should (probably) not actually be called by\n any user-provided code, but is documented here for completeness.\n \n Parameters\n ----------\n theta : float\n The value of the independent variable\n x : :class:`arraym <PDSim.misc.datatypes.arraym>` instance\n The array of the independent variables (state variables plus valve parameters)\n \n Returns\n -------\n dfdt : :class:`arraym <PDSim.misc.datatypes.arraym>` instance\n \n \"\"\"\n\n # Updates the state, calculates the volumes, prepares all the things needed for derivatives\n self.core.properties_and_volumes(self.CVs.exists_CV, theta, STATE_VARS_TM, x)\n \n # Calculate the flows and sum up all the terms\n self.core.calculate_flows(self.Flows)\n \n # Calculate the heat transfer terms if provided\n if self.callbacks.heat_transfer_callback is not None:\n self.core.Q = arraym(self.callbacks.heat_transfer_callback(theta))\n if not len(self.core.Q) == self.CVs.Nexist:\n raise ValueError('Length of Q is not equal to length of number of CV in existence')\n else:\n self.core.Q = empty_arraym(self.CVs.Nexist)\n \n # Calculate the derivative terms and set the derivative of the state vector\n self.core.calculate_derivs(self.omega, False)\n \n # Add the derivatives for the valves\n if self.__hasValves__:\n # \n offset = len(self.stateVariables)*self.CVs.Nexist\n for i, valve in enumerate(self.Valves):\n if x[offset+2+i*2-1] < -10:\n x[offset+2+i*2-1]=0.0\n if x[offset+i*2] > valve.x_stopper and x[offset+2+i*2-1] > 0.0 :\n x[offset+2+i*2-1]=0.0 \n # Get the values from the input array for this valve\n xvalve = x[offset+i*2:offset+2+i*2]\n # Set the values in the valve class\n valve.set_xv(xvalve)\n # Get the derivatives of position and derivative of velocity \n self.core.property_derivs.extend(valve.derivs(self))\n \n return self.core.property_derivs\n \n def valves_callback(self):\n \"\"\"\n This is the default valves_callback function that builds the list of \n derivatives of position and velocity with respect to the crank angle\n \n It returns a :class:`list` instance with the valve return values in order \n \"\"\"\n #Run each valve model in turn to calculate the derivatives of the valve parameters\n # for each valve\n f=[]\n for Valve in self.Valves:\n f+=Valve.derivs(self)\n return f\n \n def IsentropicNozzleFM(self,FlowPath,A,**kwargs):\n \"\"\"\n A generic isentropic nozzle flow model wrapper\n \n Parameters\n ----------\n FlowPath : :class:`FlowPath <PDSim.flow.flow.FlowPath>` instance\n A fully-instantiated flow path model\n A : float\n throat area for isentropic nozzle model [:math:`m^2`]\n \n Returns\n -------\n mdot : float\n The mass flow through the flow path [kg/s]\n \"\"\"\n\n try:\n mdot = flow_models.IsentropicNozzle(A,\n FlowPath.State_up,\n FlowPath.State_down)\n return mdot\n except ZeroDivisionError:\n return 0.0\n \n def IsentropicNozzleFMSafe(self,FlowPath,A,DP_floor,**kwargs):\n \"\"\"\n A generic isentropic nozzle flow model wrapper with the added consideration\n that if the pressure drop is below the floor value, there is no flow.\n This was added to handle the case of the injection line where there is\n no flow out of the injection which greatly increases the numerical \n stiffness \n \n Parameters\n ----------\n FlowPath : :class:`FlowPath <PDSim.flow.flow.FlowPath>`\n A fully-instantiated flow path model\n A : float\n throat area for isentropic nozzle model [:math:`m^2`]\n DP_floor: float\n The minimum pressure drop [kPa]\n \n Returns\n -------\n mdot : float\n The mass flow through the flow path [kg/s]\n \"\"\"\n\n try:\n if FlowPath.State_up.p-FlowPath.State_down.p > DP_floor:\n mdot = flow_models.IsentropicNozzle(A,\n FlowPath.State_up,\n FlowPath.State_down)\n return mdot\n else:\n return 0.0\n except ZeroDivisionError:\n return 0.0\n\n def step_callback(self,t,h,i):\n \"\"\" The default step_callback which does nothing\n \n Parameters\n ----------\n t : float\n The current value of the independent variable\n h : float\n The current step size\n i : int\n The current index\n \"\"\"\n return False,h\n \n def endcycle_callback(self, eps_wrap_allowed=0.0001):\n \"\"\"\n This function can be called at the end of the cycle if so desired.\n Its primary use is to determine whether the cycle has converged for a \n given set of discharge temperatures and lump temperatures.\n \n Parameters\n ----------\n eps_wrap_allowed : float\n Maximum error allowed, in absolute value\n \n Returns \n -------\n redo : bool\n ``True`` if cycle should be run again with updated inputs, ``False`` otherwise.\n A return value of ``True`` means that convergence of the cycle has been achieved\n \"\"\"\n assert self.Ntheta - 1 == self.Itheta\n #old and new CV keys\n LHS,RHS=[],[]\n errorT,error_rho,error_mass,newT,new_rho,new_mass,oldT,old_rho,old_mass={},{},{},{},{},{},{},{},{}\n \n for key in self.CVs.exists_keys:\n # Get the 'becomes' field. If a list, parse each fork of list. If a single key convert \n # into a list so you can use the same code below \n if not isinstance(self.CVs[key].becomes, list):\n becomes = [self.CVs[key].becomes]\n else:\n becomes = self.CVs[key].becomes\n \n Iold = self.CVs.index(key)\n\n for newkey in becomes:\n # If newkey is 'none', the control volume will die at the end\n # of the cycle, so just keep going\n if newkey == 'none': continue\n Inew = self.CVs.index(newkey)\n newCV = self.CVs[newkey]\n # There can't be any overlap between keys\n if newkey in newT:\n raise KeyError('newkey [' + newkey + '] is already in newT; becomes keys overlap but should not')\n #What the state variables were at the start of the rotation\n oldT[newkey]=self.T[Inew, 0]\n old_rho[newkey]=self.rho[Inew, 0]\n #What they are at the end of the rotation\n newT[newkey]=self.T[Iold,self.Itheta]\n new_rho[newkey]=self.rho[Iold,self.Itheta]\n errorT[newkey]=(oldT[newkey]-newT[newkey])/newT[newkey]\n error_rho[newkey]=(old_rho[newkey]-new_rho[newkey])/new_rho[newkey]\n #Update the list of keys for setting the exist flags\n LHS.append(key)\n RHS.append(newkey)\n \n error_T_list = [errorT[key] for key in self.CVs.keys if key in newT]\n error_rho_list = [error_rho[key] for key in self.CVs.keys if key in new_rho]\n \n new_T_list = [newT[key] for key in self.CVs.keys if key in newT]\n new_rho_list = [new_rho[key] for key in self.CVs.keys if key in new_rho]\n \n #Reset the exist flags for the CV - this should handle all the possibilities\n #Turn off the LHS CV\n for key in LHS:\n self.CVs[key].exists=False\n #Turn on the RHS CV\n for key in RHS:\n self.CVs[key].exists=True\n \n self.update_existence()\n\n # Error values are based on density and temperature independent of \n # selection of state variables \n error_list = []\n for var in ['T','D']:\n if var == 'T':\n error_list += error_T_list\n elif var == 'D':\n error_list += error_rho_list\n elif var == 'M':\n error_list += error_mass_list\n else:\n raise KeyError\n \n # Calculate the volumes at the beginning of the next rotation\n self.core.just_volumes(self.CVs.exists_CV, 0)\n V = {key:V for key,V in zip(self.CVs.exists_keys,self.core.V)}\n new_mass_list = [new_rho[key]*V[key] for key in self.CVs.exists_keys]\n \n new_list = []\n for var in self.stateVariables:\n if var == 'T':\n new_list += new_T_list\n elif var == 'D':\n new_list += new_rho_list\n elif var == 'M':\n new_list += new_mass_list\n else:\n raise KeyError\n\n # Add values for valves\n for valve in self.Valves:\n new_list += list(valve.get_xv())\n \n return arraym(error_list), arraym(new_list)\n \n def connect_flow_functions(self):\n \"\"\"\n Reconnect function pointers\n \n For pickling purposes, it can sometimes be useful to just give the name\n of the function relative to the PDSimCore (or derived class). If the \n function is just a string, reconnect it to the function in the PDSimCore \n instance \n \"\"\"\n for Flow in self.Flows:\n if hasattr(Flow.MdotFcn, 'Function'):\n if isinstance(Flow.MdotFcn.Function, six.string_types):\n if hasattr(self,Flow.MdotFcn.Function):\n Flow.MdotFcn.Function = getattr(self, Flow.MdotFcn.Function)\n else: \n raise AttributeError('The name of the function ['+Flow.MdotFcn.Function+']is not found in the PDSimCore derived class instance')\n \nif __name__=='__main__':\n PC = PDSimCore()\n PC.attach_HDF5_annotations('runa.h5')\n print('This is the base class that is inherited by other compressor types. Running this file doesn\\'t do anything')\n" ]
[ [ "numpy.linalg.solve", "numpy.ones_like", "numpy.abs", "numpy.power", "numpy.isnan", "numpy.mean", "numpy.diff", "numpy.zeros_like", "numpy.trapz", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
djape24394/gmphd_filter
[ "877c4fcd99a587e08a30ce1b845242288c78dfb1" ]
[ "gmphd.py" ]
[ "import numpy as np\r\nimport numpy.linalg as lin\r\nfrom typing import List, Dict, Any\r\n\r\n\r\ndef multivariate_gaussian(x: np.ndarray, m: np.ndarray, P: np.ndarray) -> float:\r\n \"\"\"\r\n Multivatiate Gaussian Distribution\r\n\r\n :param x: vector\r\n :param m: distribution mean vector\r\n :param P: Covariance matrix\r\n :return: probability density function at x\r\n \"\"\"\r\n first_part = 1 / (((2 * np.pi) ** (x.size / 2.0)) * (lin.det(P) ** 0.5))\r\n second_part = -0.5 * (x - m) @ lin.inv(P) @ (x - m)\r\n return first_part * np.exp(second_part)\r\n\r\n\r\ndef multivariate_gaussian_predefined_det_and_inv(x: np.ndarray, m: np.ndarray, detP: np.float64,\r\n invP: np.ndarray) -> float:\r\n \"\"\"\r\n Multivariate Gaussian Distribution with provided determinant and inverse of the Gaussian mixture.\r\n Useful in case when we already have precalculted determinant and inverse of the covariance matrix.\r\n :param x: vector\r\n :param m: distribution mean\r\n :param detP: determinant of the covariance matrix\r\n :param invP: inverse of the covariance matrix\r\n :return: probability density function at x\r\n \"\"\"\r\n first_part = 1 / (((2 * np.pi) ** (x.size / 2.0)) * (detP ** 0.5))\r\n second_part = -0.5 * (x - m) @ invP @ (x - m)\r\n return first_part * np.exp(second_part)\r\n\r\n\r\ndef clutter_intensity_function(z: np.ndarray, lc: int, surveillance_region: np.ndarray):\r\n \"\"\"\r\n Clutter intensity function, with the uniform distribution through the surveillance region, pg. 8\r\n in \"Bayesian Multiple Target Filtering Using Random Finite Sets\" by Vo, Vo, Clark.\r\n :param z:\r\n :param lc: average number of false detections per time step\r\n :param surveillance_region: np.ndarray of shape (number_dimensions, 2) giving the range(min and max) for each\r\n dimension\r\n \"\"\"\r\n if surveillance_region[0][0] <= z[0] <= surveillance_region[0][1] and surveillance_region[1][0] <= z[1] <= \\\r\n surveillance_region[1][1]:\r\n # example in two dimensions: lc/((xmax - xmin)*(ymax-ymin))\r\n return lc / ((surveillance_region[0][1] - surveillance_region[0][0]) * (\r\n surveillance_region[1][1] - surveillance_region[1][0]))\r\n else:\r\n return 0.0\r\n\r\n\r\nclass GaussianMixture:\r\n def __init__(self, w: List[np.float64], m: List[np.ndarray], P: List[np.ndarray]):\r\n \"\"\"\r\n The Gaussian mixture class\r\n\r\n :param w: list of scalar weights\r\n :param m: list of np.ndarray means\r\n :param P: list of np.ndarray covariance matrices\r\n\r\n Note that constructor creates detP and invP variables which can be used instead of P list, for covariance matrix\r\n determinant and inverse. These lists cen be initialized with assign_determinant_and_inverse function, and\r\n it is useful in case we already have precalculated determinant and inverse earlier.\r\n \"\"\"\r\n self.w = w\r\n self.m = m\r\n self.P = P\r\n self.detP = None\r\n self.invP = None\r\n\r\n def set_covariance_determinant_and_inverse_list(self, detP: List[np.float64], invP: List[np.ndarray]):\r\n \"\"\"\r\n For each Gaussian component, provide the determinant and the covariance inverse\r\n :param detP: list of determinants for each Gaussian component in the mixture\r\n :param invP: list of covariance inverses for each Gaussian component in the mixture\r\n \"\"\"\r\n self.detP = detP\r\n self.invP = invP\r\n\r\n def mixture_value(self, x: np.ndarray):\r\n \"\"\"\r\n Gaussian Mixture function for the given vector x\r\n \"\"\"\r\n sum = 0\r\n if self.detP is None:\r\n for i in range(len(self.w)):\r\n sum += self.w[i] * multivariate_gaussian(x, self.m[i], self.P[i])\r\n else:\r\n for i in range(len(self.w)):\r\n sum += self.w[i] * multivariate_gaussian_predefined_det_and_inv(x, self.m[i], self.detP[i],\r\n self.invP[i])\r\n return sum\r\n\r\n def mixture_single_component_value(self, x: np.ndarray, i: int) -> float:\r\n \"\"\"\r\n Single Gaussian Mixture component value for the given vector\r\n :param x: vector\r\n :param i: index of the component\r\n :returns: probability density function at x, multiplied with the component weght at the index i\r\n \"\"\"\r\n if self.detP is None:\r\n return self.w[i] * multivariate_gaussian(x, self.m[i], self.P[i])\r\n else:\r\n return self.w[i] * multivariate_gaussian_predefined_det_and_inv(x, self.m[i], self.detP[i], self.invP[i])\r\n\r\n def mixture_component_values_list(self, x: np.ndarray) -> List[float]:\r\n \"\"\"\r\n Sometimes it is useful to have value of each component multiplied with its weight\r\n :param x: vector\r\n :return: List[np.float64]:\r\n List of components values at x, multiplied with their weight.\r\n \"\"\"\r\n val = []\r\n if self.detP is None:\r\n for i in range(len(self.w)):\r\n val.append(self.w[i] * multivariate_gaussian(x, self.m[i], self.P[i]))\r\n else:\r\n for i in range(len(self.w)):\r\n val.append(\r\n self.w[i] * multivariate_gaussian_predefined_det_and_inv(x, self.m[i], self.detP[i], self.invP[i]))\r\n return val\r\n\r\n def copy(self):\r\n w = self.w.copy()\r\n m = []\r\n P = []\r\n for m1 in self.m:\r\n m.append(m1.copy())\r\n for P1 in self.P:\r\n P.append(P1.copy())\r\n return GaussianMixture(w, m, P)\r\n\r\n\r\ndef get_matrices_inverses(P_list: List[np.ndarray]) -> List[np.ndarray]:\r\n inverse_P_list = []\r\n for P in P_list:\r\n inverse_P_list.append(lin.inv(P))\r\n return inverse_P_list\r\n\r\n\r\ndef get_matrices_determinants(P_list: List[np.ndarray]) -> List[float]:\r\n \"\"\"\r\n :param P_list: list of covariance matrices\r\n :return:\r\n \"\"\"\r\n detP = []\r\n for P in P_list:\r\n detP.append(lin.det(P))\r\n return detP\r\n\r\n\r\ndef thinning_and_displacement(v: GaussianMixture, p, F: np.ndarray, Q: np.ndarray):\r\n \"\"\"\r\n For the given Gaussian mixture v, perform thinning with probability P and displacement with N(x; F @ x_prev, Q)\r\n See https://ieeexplore.ieee.org/document/7202905 for details\r\n \"\"\"\r\n w = []\r\n m = []\r\n P = []\r\n for weight in v.w:\r\n w.append(weight * p)\r\n for mean in v.m:\r\n m.append(F @ mean)\r\n for cov_matrix in v.P:\r\n P.append(Q + F @ cov_matrix @ F.T)\r\n return GaussianMixture(w, m, P)\r\n\r\n\r\nclass GmphdFilter:\r\n def __init__(self, model: Dict[str, Any]):\r\n \"\"\"\r\n The Gaussian Mixture Probability Hypothesis Density filter implementation.\r\n \"The Gaussian mixture probability hypothesis density filter\" by Vo and Ma.\r\n\r\n https://ieeexplore.ieee.org/document/1710358\r\n\r\n We assume linear transition and measurement model in the\r\n following form\r\n x[k] = Fx[k-1] + w[k-1]\r\n z[k] = Hx[k] + v[k]\r\n Inputs:\r\n\r\n - model: dictionary which contains the following elements(keys are strings):\r\n\r\n F: state transition matrix\r\n\r\n H: measurement matrix\r\n\r\n Q: process noise covariance matrix(of variable w[k]).\r\n\r\n R: measurement noise covariance matrix(of variable v[k]).\r\n\r\n p_d: probability of target detection\r\n\r\n p_s: probability of target survival\r\n\r\n Spawning model, see pg. 5. of the paper. It's a Gaussian Mixture conditioned on state\r\n\r\n F_spawn: d_spawn: Q_spawn: w_spawn: lists of ndarray objects with the same length, see pg. 5\r\n\r\n clutt_int_fun: reference to clutter intensity function, gets only one argument, which is the current measure\r\n\r\n T: U: Jmax: Pruning parameters, see pg. 7.\r\n\r\n birth_GM: The Gaussian Mixture of the birth intensity\r\n \"\"\"\r\n # to do: dtype, copy, improve performance\r\n self.p_s = model['p_s']\r\n self.F = model['F']\r\n self.Q = model['Q']\r\n self.w_spawn = model['w_spawn']\r\n self.F_spawn = model['F_spawn']\r\n self.d_spawn = model['d_spawn']\r\n self.Q_spawn = model['Q_spawn']\r\n self.birth_GM = model['birth_GM']\r\n self.p_d = model['p_d']\r\n self.H = model['H']\r\n self.R = model['R']\r\n self.clutter_density_func = model['clutt_int_fun']\r\n self.T = model['T']\r\n self.U = model['U']\r\n self.Jmax = model['Jmax']\r\n\r\n def spawn_mixture(self, v: GaussianMixture) -> GaussianMixture:\r\n \"\"\"\r\n Spawning targets in prediction step\r\n \"\"\"\r\n w = []\r\n m = []\r\n P = []\r\n for i, w_v in enumerate(v.w):\r\n for j, w_spawn in enumerate(self.w_spawn):\r\n w.append(w_v * w_spawn)\r\n m.append(self.F_spawn[j] @ v.m[i] + self.d_spawn[j])\r\n P.append(self.Q_spawn[j] + self.F_spawn[j] @ v.P[i] @ self.F_spawn[j].T)\r\n return GaussianMixture(w, m, P)\r\n\r\n def prediction(self, v: GaussianMixture) -> GaussianMixture:\r\n \"\"\"\r\n Prediction step of the GMPHD filter\r\n Inputs:\r\n - v: Gaussian mixture of the previous step\r\n \"\"\"\r\n # v_pred = v_s + v_spawn + v_new_born\r\n birth_copy = self.birth_GM.copy()\r\n # targets that survived v_s:\r\n v_s = thinning_and_displacement(v, self.p_s, self.F, self.Q)\r\n # spawning targets\r\n v_spawn = self.spawn_mixture(v)\r\n # final phd of prediction\r\n return GaussianMixture(v_s.w + v_spawn.w + birth_copy.w, v_s.m + v_spawn.m + birth_copy.m,\r\n v_s.P + v_spawn.P + birth_copy.P)\r\n\r\n def correction(self, v: GaussianMixture, Z: List[np.ndarray]) -> GaussianMixture:\r\n \"\"\"\r\n Correction step of the GMPHD filter\r\n Inputs:\r\n - v: Gaussian mixture obtained from the prediction step\r\n - Z: Measurement set, containing set of observations\r\n \"\"\"\r\n v_residual = thinning_and_displacement(v, self.p_d, self.H, self.R)\r\n detP = get_matrices_determinants(v_residual.P)\r\n invP = get_matrices_inverses(v_residual.P)\r\n v_residual.set_covariance_determinant_and_inverse_list(detP, invP)\r\n\r\n K = []\r\n P_kk = []\r\n for i in range(len(v_residual.w)):\r\n k = v.P[i] @ self.H.T @ invP[i]\r\n K.append(k)\r\n P_kk.append(v.P[i] - k @ self.H @ v.P[i])\r\n\r\n v_copy = v.copy()\r\n w = (np.array(v_copy.w) * (1 - self.p_d)).tolist()\r\n m = v_copy.m\r\n P = v_copy.P\r\n\r\n for z in Z:\r\n values = v_residual.mixture_component_values_list(z)\r\n normalization_factor = np.sum(values) + self.clutter_density_func(z)\r\n for i in range(len(v_residual.w)):\r\n w.append(values[i] / normalization_factor)\r\n m.append(v.m[i] + K[i] @ (z - v_residual.m[i]))\r\n P.append(P_kk[i].copy())\r\n\r\n return GaussianMixture(w, m, P)\r\n\r\n def pruning(self, v: GaussianMixture) -> GaussianMixture:\r\n \"\"\"\r\n See https://ieeexplore.ieee.org/document/7202905 for details\r\n \"\"\"\r\n I = (np.array(v.w) > self.T).nonzero()[0]\r\n w = [v.w[i] for i in I]\r\n m = [v.m[i] for i in I]\r\n P = [v.P[i] for i in I]\r\n v = GaussianMixture(w, m, P)\r\n I = (np.array(v.w) > self.T).nonzero()[0].tolist()\r\n invP = get_matrices_inverses(v.P)\r\n vw = np.array(v.w)\r\n vm = np.array(v.m)\r\n w = []\r\n m = []\r\n P = []\r\n while len(I) > 0:\r\n j = I[0]\r\n for i in I:\r\n if vw[i] > vw[j]:\r\n j = i\r\n L = []\r\n for i in I:\r\n if (vm[i] - vm[j]) @ invP[i] @ (vm[i] - vm[j]) <= self.U:\r\n L.append(i)\r\n w_new = np.sum(vw[L])\r\n m_new = np.sum((vw[L] * vm[L].T).T, axis=0) / w_new\r\n P_new = np.zeros((m_new.shape[0], m_new.shape[0]))\r\n for i in L:\r\n P_new += vw[i] * (v.P[i] + np.outer(m_new - vm[i], m_new - vm[i]))\r\n P_new /= w_new\r\n w.append(w_new)\r\n m.append(m_new)\r\n P.append(P_new)\r\n I = [i for i in I if i not in L]\r\n\r\n if len(w) > self.Jmax:\r\n L = np.array(w).argsort()[-self.Jmax:]\r\n w = [w[i] for i in L]\r\n m = [m[i] for i in L]\r\n P = [P[i] for i in L]\r\n\r\n return GaussianMixture(w, m, P)\r\n\r\n def state_estimation(self, v: GaussianMixture) -> List[np.ndarray]:\r\n X = []\r\n for i in range(len(v.w)):\r\n if v.w[i] >= 0.5:\r\n for j in range(int(np.round(v.w[i]))):\r\n X.append(v.m[i])\r\n return X\r\n\r\n def filter_data(self, Z: List[List[np.ndarray]]) -> List[List[np.ndarray]]:\r\n \"\"\"\r\n Given the list of collections of measurements for each time step, perform filtering and return the\r\n estimated sets of tracks for each step.\r\n\r\n :param Z: list of observations(measurements) for each time step\r\n :return X:\r\n list of estimated track sets for each time step\r\n \"\"\"\r\n X = []\r\n v = GaussianMixture([], [], [])\r\n for z in Z:\r\n v = self.prediction(v)\r\n v = self.correction(v, z)\r\n v = self.pruning(v)\r\n x = self.state_estimation(v)\r\n X.append(x)\r\n return X\r\n" ]
[ [ "numpy.linalg.inv", "numpy.linalg.det", "numpy.round", "numpy.exp", "numpy.outer", "numpy.array", "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tusharip/augmentations
[ "426122ce032e4ee5309fb6fd756f659353352632" ]
[ "codes/audio/utils.py" ]
[ "import torch\nimport torchaudio\nimport torchaudio.functional as F\nimport sounddevice as sd\nfrom IPython.display import Audio, display\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef plot_waveform(waveform, sample_rate, title=\"Waveform\", xlim=None, ylim=None):\n waveform = waveform.numpy()\n\n num_channels, num_frames = waveform.shape\n time_axis = torch.arange(0, num_frames) / sample_rate\n\n ax1 = plt.subplot(111)\n ax1.plot(time_axis, waveform[0], linewidth=1)\n ax1.set_xlabel('time')\n plt.show()\n\n\n\n\ndef play_audio(waveform, sr):\n waveform = waveform.squeeze().numpy()\n sd.play(waveform, sr)\n sd.wait()\n\n\n\n\ndef plot_specgram(waveform, sample_rate, title=\"Spectrogram\", xlim=None):\n waveform = waveform.numpy()\n\n num_channels, num_frames = waveform.shape\n time_axis = torch.arange(0, num_frames) / sample_rate\n\n figure, axes = plt.subplots(num_channels, 1)\n if num_channels == 1:\n axes = [axes]\n for c in range(num_channels):\n axes[c].specgram(waveform[c], Fs=sample_rate)\n if num_channels > 1:\n axes[c].set_ylabel(f'Channel {c+1}')\n if xlim:\n axes[c].set_xlim(xlim)\n figure.suptitle(title)\n plt.show()\n\nif __name__ ==\"__main__\":\n hamming(1, 50000)" ]
[ [ "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots", "matplotlib.pyplot.show", "torch.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vijay4313/proxemo
[ "7b09828c3b63b01617824c3b27a059584eb11ca4" ]
[ "pose_tracking/human_tracking_3D.py" ]
[ "#!/usr/bin/env python\n# Title :loader.py\n# Author :Venkatraman Narayanan, Bala Murali Manoghar, Vishnu Shashank Dorbala, Aniket Bera, Dinesh Manocha\n# Copyright :\"Copyright 2020, Proxemo project\"\n# Version :1.0\n# License :\"MIT\"\n# Maintainer :Venkatraman Narayanan, Bala Murali Manoghar\n# Email :[email protected], [email protected]\n# ==============================================================================\nimport cv2\nimport numpy as np\n\nfrom pose_tracking.real_sense_wrapper import Real_Sense_Camera\nfrom pose_tracking.cubemos_wrapper import Cubemos_Tacker\n\n\nclass Skel_Temporal():\n \"\"\"Skeleton gait generator class.\"\"\"\n\n def __init__(self, skel_id, do_not_ignore_false_limbs=True):\n \"\"\"Constructor\n\n Args:\n skel_id (int): Skeleton ID\n do_not_ignore_false_limbs (bool, optional): Ignore false limbs?. Defaults to True.\n \"\"\"\n self.id = skel_id\n self.skel_temporal = []\n self.do_not_ignore_false_limbs = do_not_ignore_false_limbs\n\n def add(self, skel_ti):\n \"\"\"Add skeleton to tracking.\n\n Args:\n skel_ti (np.array): skeleton co-ordinates\n \"\"\"\n if not np.any(skel_ti == -1) or self.ignore_false_limbs:\n if len(self.skel_temporal) > 75:\n self.skel_temporal.pop(0)\n self.skel_temporal.append(skel_ti)\n\n def __eq__(self, other):\n \"\"\"Skeleton compare\n\n Args:\n other (obj): Skeleton Object\n\n Returns:\n [bool]: Same/different skeleton\n \"\"\"\n try:\n if self.id == other.id:\n return True\n else:\n return False\n except:\n if self.id == other:\n return True\n else:\n return False\n\n def get_embedding(self):\n \"\"\"Convert Temporal gait cycle to Image sequence.\n\n Returns:\n [np.array]: Gait cycle embedded as image\n \"\"\"\n skel_temporal_np = np.array(self.skel_temporal)\n # make root as (0, 0, 0)\n # even if number of frames is less than 75 it will be resized to 244*244\n skel_temporal_np = skel_temporal_np - \\\n np.expand_dims(skel_temporal_np[:, 0, :], axis=1)\n skel_temporal_img = cv2.resize(skel_temporal_np, (244, 244))\n return skel_temporal_img\n\n\nclass Skel_Tracker():\n \"\"\"Skeleton Tracking Class.\"\"\"\n\n def __init__(self, do_not_ignore_false_limbs=True):\n \"\"\"Constructor.\n\n Args:\n do_not_ignore_false_limbs (bool, optional): Ignore false limbs?. Defaults to True.\n \"\"\"\n self.skel_tracks = []\n self.img_embeddings = []\n self.do_not_ignore_false_limbs = do_not_ignore_false_limbs\n\n def update(self, skel_nps, skel_ids):\n \"\"\"Add skeleton pose to sequence.\n\n Args:\n skel_nps (np.array): Skeleton co-ordinates\n skel_ids (list): Skeleton IDs\n \"\"\"\n # add skeleton corresponding to id\n for skel_np, skel_id in zip(skel_nps, skel_ids):\n try:\n # ID already present - update\n ndx = self.skel_tracks.index(skel_id)\n skel_temporal = self.skel_tracks[ndx]\n skel_temporal.add(skel_np)\n except ValueError:\n # new human - add\n skel_temporal = Skel_Temporal(\n skel_id, self.do_not_ignore_false_limbs)\n skel_temporal.add(skel_np)\n self.skel_tracks.append(skel_temporal)\n # delete obselete human ids\n skel_ids = np.asarray(skel_ids)\n ndx_to_delete = []\n for ndx, skel_temporal in enumerate(self.skel_tracks):\n if not any(skel_ids == skel_temporal.id):\n # tracked id is not present in current frame\n ndx_to_delete.append(ndx)\n for ndx, value in enumerate(ndx_to_delete):\n # considering ndx_to_delete will be sorted in ascending order\n # while poping elements one by one, the index has to be decreased\n # by number of elements already deleted\n self.skel_tracks.pop(value - ndx)\n\n def get_embedding(self):\n \"\"\"Generate image embedding for entire gait sequence.\n\n Returns:\n [list]: image embeddings, skeleton IDs\n \"\"\"\n self.img_embeddings = []\n ids = []\n for skel_track in self.skel_tracks:\n self.img_embeddings.append(skel_track.get_embedding())\n ids.append(skel_track.id)\n self.img_embeddings = np.asarray(self.img_embeddings)\n return self.img_embeddings, ids\n\n def display_embedding(self):\n \"\"\"View image embeddings.\"\"\"\n imgs = self.img_embeddings[0] # np.empty((244,244,3))\n print(self.img_embeddings.shape)\n for img in self.img_embeddings[1:]:\n print(\"--\")\n imgs = np.hstack((imgs, img))\n print(imgs.shape)\n cv2.imshow(\"embeddings\", imgs.astype(np.uint8))\n\n\nclass Track_Human_Pose():\n \"\"\"Main gait tracking loop.\"\"\"\n\n def __init__(self, display=True, verbose=True):\n \"\"\"Constructor.\n\n Args:\n display (bool, optional): Show skeleton detections?. Defaults to True.\n verbose (bool, optional): Generate verbose log?. Defaults to True.\n \"\"\"\n self.verbose = verbose\n self.display = display\n\n self.camera = Real_Sense_Camera(5, 3)\n self.cubemos = Cubemos_Tacker(self.camera.intrinsics)\n\n self.skel_tracker = Skel_Tracker()\n\n def get_pose(self):\n \"\"\"Get human skeletons.\"\"\"\n # capture\n self.camera.capture()\n # get skeletons\n self.cubemos.track_skeletons(self.camera.color_image,\n self.camera.depth_image_align)\n self.cubemos.render_skeletons(self.camera.color_image)\n if self.display:\n # Stack both images horizontally\n images = np.hstack((self.camera.color_image,\n self.camera.depth_colormap))\n images = np.hstack((images, self.camera.color_image))\n\n # Show images\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', images)\n\n def track_pose(self):\n \"\"\"Track skeleton gaits.\"\"\"\n self.skel_tracker.update(self.cubemos.skel3d_np,\n self.cubemos.skel_ids)\n\n def cleanup(self):\n \"\"\"Cleanup workspace setup.\"\"\"\n self.camera.cleanup()\n\n\nif __name__ == \"__main__\":\n track_pose = Track_Human_Pose(display=True)\n while True:\n track_pose.get_pose()\n if track_pose.display:\n key = cv2.waitKey(1) & 0xFF\n # press the 'q' key to stop the video stream\n if key == ord(\"q\"):\n break\n" ]
[ [ "numpy.hstack", "numpy.expand_dims", "numpy.asarray", "numpy.any", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
XavierCarrera/robust-integration
[ "a0b0236d2095dd999eab3b95440a62183e94dfc9" ]
[ "cross_val.py" ]
[ "import pandas as pd\nimport numpy as np\n\nfrom sklearn.tree import DecisionTreeRegressor\n\nfrom sklearn.model_selection import (\n cross_val_score, KFold\n)\n\nif __name__ == \"__main__\":\n\n dataset = pd.read_csv('./data/felicidad.csv')\n\n X = dataset.drop(['country', 'score'], axis=1)\n y = dataset['score']\n\n model = DecisionTreeRegressor()\n score = cross_val_score(model, X,y, cv= 3, scoring='neg_mean_squared_error')\n print(np.abs(np.mean(score)))\n\n kf = KFold(n_splits=3, shuffle=True, random_state=42)\n for train, test in kf.split(dataset):\n print(train)\n print(test)" ]
[ [ "pandas.read_csv", "sklearn.tree.DecisionTreeRegressor", "sklearn.model_selection.cross_val_score", "sklearn.model_selection.KFold", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
TheJacksonLaboratory/imc-preprocessor
[ "1f11c6c40472affb086e4e66c5fe6a6694d6a451" ]
[ "imcpp/processing.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom .mcd import MCD\n\nimport numpy as np\nfrom argparse import Namespace\nfrom scipy.signal import convolve2d\nfrom skimage.morphology import white_tophat\nfrom skimage.morphology import square, disk, diamond\nfrom skimage.exposure import equalize_hist, equalize_adapthist\n\nfrom .logger import logger\nfrom .spillover import align_spillmat, load_spillmat\n\n\ndef cross(n):\n s = 2 * n + 1\n sel = np.zeros((s, s), dtype=int)\n sel[:, n] = 1\n sel[n, :] = 1\n return sel\n\n\nselems = Namespace(square=square, disk=disk, cross=cross)\n\n\ndef conway(im, selem=disk(1), threshold=None):\n if threshold is None:\n threshold = selem.sum() // 2 + 1\n\n b = im.astype(bool).astype(int)\n m = convolve2d(b, selem, mode=\"same\")\n im_ = im.copy()\n im_[m < threshold] = 0\n return im_\n\n\ndef tophat(im, selem=square(2)):\n b = im.copy().astype(bool).astype(int)\n m = b - white_tophat(b, selem=selem)\n im_ = im.copy()\n im_[~m.astype(bool)] = 0\n return im_\n\n\ndef compensate(img_stack, spillmat):\n swapped = False\n if img_stack.shape[0] == spillmat.shape[0]:\n img_stack = np.moveaxis(img_stack, 0, 2)\n swapped = True\n comp_ = img_stack @ np.linalg.inv(spillmat.T)\n comp_ = np.round(np.clip(comp_, 0, comp_.max())).astype(np.uint16)\n if swapped:\n comp_ = np.moveaxis(comp_, 2, 0)\n return comp_\n\n\ndef equalize(img_stack, adaptive=False):\n L = img_stack.shape[0]\n\n logger.debug(\"5th and 95th percentile before equalization\")\n logger.debug(\n np.column_stack(\n (\n np.arange(1, L + 1),\n np.percentile(img_stack, 5, axis=(1, 2)),\n np.percentile(img_stack, 95, axis=(1, 2)),\n )\n )\n )\n\n if adaptive:\n equalized = np.array(\n [\n equalize_adapthist(img_stack[k, ...], nbins=2 ** 16, clip_limit=0.4)\n for k in range(L)\n ]\n )\n else:\n equalized = np.array(\n [equalize_hist(img_stack[k, ...], nbins=2 ** 16) for k in range(L)]\n )\n return equalized\n\n\npixel_removal_functions = {\"conway\": conway, \"tophat\": tophat}\n\n\ndef report_maxima(mcd, ac_options):\n ac_id = ac_options.acquisition_id\n for ch_opts in ac_options.channels:\n ch_id = ch_opts.ch_id\n label = ch_opts.label\n metal = ch_opts.metal\n ch = mcd.get_data(ac_id, ch_int=ch_id)\n m = np.max(ch)\n if m > 1e5:\n logger.warn(f\"Channel {acid}/{ch_id}:{label}:{metal} maximum value is {m}\")\n else:\n logger.debug(f\"Channel {ch_id}:{label}:{metal} maximum value: {m}\")\n\n\ndef run_compensation(mcd, options):\n logger.info(\"Running compensation\")\n logger.debug(\n \"Note that all channels of the aquisition to be utilized during the \"\n \"compensation calculation but only those specified in the config \"\n \"file will be saved.\"\n )\n if options.spillover_matrix_file:\n logger.info(f\"Using provided spillover matrix {options.spillover_matrix_file}\")\n spillmat_raw = load_spillmat(options.spillover_matrix_file)\n\n for ac_options in options.acquisitions:\n report_maxima(mcd, ac_options)\n ac_id = ac_options.acquisition_id\n logger.debug(f\". compensating acquisition {ac_id}.\")\n spillmat = align_spillmat(spillmat_raw, mcd.channel_metals[ac_id])\n uncomp = mcd.get_data(ac_id)\n comp = compensate(uncomp, spillmat.values)\n mcd.set_data(comp, ac_id)\n\n if options.compensate_output_type:\n logger.info(\"Saving compensation results.\")\n acquisitions = options.export_acquisitions()\n mcd.save(\n acquisitions,\n options.compensate_output_type,\n prefix=options.output_prefix,\n suffix=options.compensate_output_suffix,\n )\n\n logger.info(\"Compensation complete.\")\n\n\ndef run_pixel_removal(mcd, options):\n logger.info(\"Running pixel removal\")\n\n method = options.pixel_removal_method\n if method not in pixel_removal_functions:\n logger.error(\n f\"Pixel removal function [{method}] is not in \"\n f\"allowed methods [{list(pixel_removal_functions.keys())}].\"\n )\n logger.warn(\"Proceeding without pixel removal!\")\n return\n method_func = pixel_removal_functions[method]\n\n global_threshold, global_selem = None, None\n if options.global_pixel_removal_neighbors is not None:\n global_threshold = options.global_pixel_removal_neighbors\n logger.debug(f\"Will use global pixel removal threshold of {global_threshold}\")\n if options.global_pixel_removal_selem is not None:\n global_selem = options.global_pixel_removal_selem\n logger.debug(\"Will use global pixel removal selem\")\n\n for ac_options in options.acquisitions:\n report_maxima(mcd, ac_options)\n ac_id = ac_options.acquisition_id\n\n for ch_opts in ac_options.channels:\n ch_id = ch_opts.ch_id\n clean = mcd.get_data(ac_id, ch_int=ch_id)\n\n logger.debug(f\". cleaning acquisition/channel {ac_id}/{ch_opts.metal}.\")\n selem = (\n global_selem if global_selem is not None else ch_opts.pixel_removal_selem\n )\n params = dict(selem=selem)\n if method == \"conway\":\n params[\"threshold\"] = (\n global_threshold\n if global_threshold is not None\n else ch_opts.pixel_removal_neighbors\n )\n\n for k in range(ch_opts.pixel_removal_iterations):\n logger.debug(\n f\".. iteration {k} using method '{method}' with parameters \"\n f\"'threshold={params['threshold']}'\"\n )\n clean = method_func(clean, **params)\n\n mcd.set_data(clean, ac_id, ch_int=ch_id)\n\n if options.pixel_removal_output_type:\n logger.info(\"Saving pixel removal results.\")\n acquisitions = options.export_acquisitions()\n mcd.save(\n acquisitions,\n options.pixel_removal_output_type,\n prefix=options.output_prefix,\n suffix=options.pixel_removal_output_suffix,\n )\n\n logger.info(\"Pixel removal complete.\")\n\n\ndef run_equalization(mcd, options):\n logger.info(\"Running equalization\")\n\n for ac_options in options.acquisitions:\n report_maxima(mcd, ac_options)\n ac_id = ac_options.acquisition_id\n logger.debug(f\". equalizing acquisition {ac_id}.\")\n unequalized = mcd.get_data(ac_id)\n equalized = equalize(unequalized, adaptive=False)\n mcd.set_data(equalized, ac_id)\n\n if options.equalization_output_type:\n logger.info(\"Saving equalization results.\")\n acquisitions = options.export_acquisitions()\n mcd.save(\n acquisitions,\n options.equalization_output_type,\n prefix=options.output_prefix,\n suffix=options.equalization_output_suffix,\n )\n\n logger.info(\"Equalization complete.\")\n\n\ndef process(options):\n mcd = MCD(options.mcdpath)\n mcd.load_mcd()\n\n if options.do_compensate:\n run_compensation(mcd, options)\n\n if options.do_pixel_removal:\n run_pixel_removal(mcd, options)\n\n if options.do_equalization:\n run_equalization(mcd, options)\n" ]
[ [ "numpy.linalg.inv", "numpy.arange", "scipy.signal.convolve2d", "numpy.percentile", "numpy.max", "numpy.moveaxis", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ashadhaz/pml
[ "8d9261c8885a82d795c89de23f53ff7d05ef9495" ]
[ "pml/utils/distance_utils.py" ]
[ "# Copyright (C) 2012, 2013 David Rusk\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy \n# of this software and associated documentation files (the \"Software\"), to \n# deal in the Software without restriction, including without limitation the \n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or \n# sell copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in \n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING \n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS \n# IN THE SOFTWARE.\n\"\"\"\nAlgorithms for calculating distances between vectors in feature (n) space.\n\n@author: drusk\n\"\"\"\n\nimport numpy as np\n\ndef euclidean(vector1, vector2):\n \"\"\"\n Calculates the Euclidean distance between two vectors in n-space.\n \n Args:\n vector1: \n start point vector\n vector2: \n end point vector\n \n Returns:\n The distance (magnitude) between vector1 and vector2.\n \"\"\"\n return np.sqrt(np.power(np.asarray(vector1) - np.asarray(vector2), 2).sum())\n\ndef cosine_similarity(vector1, vector2):\n \"\"\"\n Calculates the cosine similarity between two vectors. This is the\n cosine of the angle between them.\n\n Args:\n vector1: array-like\n vector2: array-like\n\n Returns:\n The cosine of the angle between the input vectors.\n -1 means they are complete opposites. 0 means they are independent,\n and 1 means they are very similar.\n \"\"\"\n # Make sure we are working with numpy arrays\n vector1 = np.asarray(vector1)\n vector2 = np.asarray(vector2)\n\n norm = np.linalg.norm\n product_of_magnitudes = norm(vector1) * norm(vector2)\n\n if product_of_magnitudes == 0:\n # TODO find a reference supporting this decision\n return 0.0\n\n return np.dot(vector1, vector2) / product_of_magnitudes\n\ndef cosine_distance(vector1, vector2):\n \"\"\"\n Calculates the cosine distance between two vectors. It is the complement\n of cosine similarity. I.e.:\n\n cosine_distance = 1 - cosine_similarity\n\n Use this instead of cosine similarity when you want small values to mean\n the vectors are similar as in regular distance measurements such as\n Euclidean distance.\n\n NOTE: this is not a proper distance metric as it does not have the\n triangle inequality property.\n\n Args:\n vector1: array-like\n vector2: array-like\n\n Returns:\n Close to 0 for similar vectors and larger values for dissimilar vectors.\n \"\"\"\n return 1 - cosine_similarity(vector1, vector2)\n" ]
[ [ "numpy.asarray", "numpy.dot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nitinkumar388/Coronavirus-Probability-Detector
[ "af054a230160d5f79093bde85558a21374188ce2" ]
[ "myTraining.py" ]
[ "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.linear_model import LogisticRegression\r\nimport pickle\r\n\r\ndef train_test_split(data, ratio):\r\n\r\n np.random.seed(56)\r\n shuffled = np.random.permutation(len(data))\r\n test_set_size = int(len(data)*ratio)\r\n test_indices = shuffled[:test_set_size]\r\n train_indices = shuffled[test_set_size:]\r\n return data.iloc[train_indices], data.iloc[test_indices]\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n df = pd.read_csv(\"C:/Users/91741/Desktop/myproject/data.csv\")\r\n train, test = train_test_split(df, 0.2)\r\n\r\n x_train = train[['fever','bodyPain','age','runnyNose','diffBreath']].to_numpy()\r\n x_test = test[['fever','bodyPain','age','runnyNose','diffBreath']].to_numpy()\r\n\r\n y_train = train[['infectionProb']].to_numpy().reshape(2060,)\r\n y_test = test[['infectionProb']].to_numpy().reshape(515,)\r\n\r\n \r\n clf = LogisticRegression()\r\n clf.fit(x_train, y_train)\r\n\r\n file = open('model.pkl','wb')\r\n pickle.dump(clf,file)\r\n file.close()\r\n" ]
[ [ "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "numpy.random.seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Gleiphir/AinuGAN
[ "963dbca303ca13f053a25c9a4f07eb4eda614091" ]
[ "main_1v3.py" ]
[ "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n#from torch.optim.lr_scheduler import ExponentialLR\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport torchvision\n\nimport time\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport os\n#os.environ['CUDA_VISIBLE_DEVICES'] = '1,2,3'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, default=64)\nparser.add_argument('--lr', type=float, default=2e-4)\nparser.add_argument('--loss', type=str, default='hinge')\nparser.add_argument('--checkpoint_dir', type=str, default='CKP_1v3_{}_{}_{}')\nparser.add_argument('--model', type=str, default='std')\n\nparser.add_argument('--d-lrs', type=float,nargs=3)\n\nargs = parser.parse_args()\n\n\nloader = torch.utils.data.DataLoader(\ndatasets.ImageFolder('myDset', transform=transforms.Compose([\n\t\ttransforms.ToTensor(),\n\t\ttransforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])),\n\tbatch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)\n\nZ_dim = 256\n#number of updates to discriminator for every update to generator \ndisc_iters = 1\n\n# discriminator = torch.nn.DataParallel(Discriminator()).cuda() # TODO: try out multi-gpu training\n\nimport model_1 as model\n\n\n# because the spectral normalization module creates parameters that don't require gradients (u and v), we don't want to \n# optimize these using sgd. We only let the optimizer operate on parameters that _do_ require gradients\n# TODO: replace Parameters with buffers, which aren't returned from .parameters() method.\n\n\nif args.d_lrs:\n\td_lrs = [ x* 1e-4 for x in args.d_lrs]\nelse:\n\td_lrs = [2e-4,4e-4,8e-4]\n\nd_wghs = [1.0 for d_lr in d_lrs]\n#d_wghs = [1,1,1]\n\n\nprint(\"Disc lrs: \",d_lrs)\n\n\ndef mdf(it):\n\treturn ''.join([str(t *1e4)+'-' for t in it])\nckp_mdf = args.checkpoint_dir.format(args.model,\"ifuku\",mdf(d_lrs))\n\n\ndiscriminators = [torch.nn.DataParallel(model.Discriminator()).cuda() for lr in d_lrs ]\ngenerator = model.Generator(Z_dim).cuda()\n\n\noptim_discs = [\n\toptim.Adam( filter(lambda p: p.requires_grad, discriminators[i].parameters()),\n\t lr=d_lrs[i], betas=(0.5,0.999)\n\t)\n\t for i in range(len(d_lrs))]\n\noptim_gen = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.5,0.999))\n\n# use an exponentially decaying learning rate\n#scheduler_ds = [optim.lr_scheduler.ExponentialLR(optim_disc, gamma=0.99) for optim_disc in optim_discs ]\n#scheduler_g = optim.lr_scheduler.ExponentialLR(optim_gen, gamma=0.99)\n\ng_iter = 0\n\n\n\ndef train(epoch):\n\tglobal g_iter,start_t,last_t,now_t\n\tfor batch_idx, (data, target) in enumerate(loader):\n\t\tif data.size()[0] != args.batch_size:\n\t\t\tcontinue\n\t\tdata, target = Variable(data.cuda()), Variable(target.cuda())\n\n\t\t# update discriminator\n\t\td_losses = []\n\t\n\t\tfor D_index in range(len(discriminators)):\n\t\t\tfor _ in range(disc_iters):\n\t\t\t\tz = Variable(torch.randn(args.batch_size, Z_dim).cuda())\n\t\t\t\toptim_discs[D_index].zero_grad()\n\t\t\t\t#optim_gen.zero_grad()\n\t\t\t\t\n\t\t\t\tdisc_loss = nn.ReLU()(1.0 - discriminators[D_index](data)).mean() + nn.ReLU()(1.0 + discriminators[D_index](generator(z))).mean()\n\t\t\t\t\n\t\t\t\t\n\t\t\t\td_losses.append(disc_loss.data.item())\n\t\t\t\t\n\t\t\t\tdisc_loss.backward()\n\t\t\t\toptim_discs[D_index].step()\n\t\t\n\t\t\n\t\t\n\t\t\n\t\tz = Variable(torch.randn(args.batch_size, Z_dim).cuda())\n\n\t\t#print(generator(z).size())# update generator\n\t\t#optim_disc.zero_grad()\n\t\toptim_gen.zero_grad()\n\t\t#print(generator(z).size())\n\t\tlosses = []\n\t\t\n\t\tfor discriminator in discriminators:\n\t\t\tlosses.append( -discriminator(generator(z)).mean() )\n\t\tgen_loss =( losses[0] +losses[1]+losses[2] )/3.0\n\t\t\n\t\t\"\"\"\n\t\tgen_loss = torch.cat(losses).mean()# 2 * 32 *... / 4 * 16 * ...\n\t\t\n\t\tgen_loss =gen_loss.mean()\n\t\t\"\"\"\n\t\tgen_loss.backward()\n\t\t\n\t\toptim_gen.step()\n\n\t\tg_iter += 1\n\t\tif batch_idx % 100 == 0:\n\n\t\t\tlast_t = now_t\n\t\t\tnow_t = time.time()\n\t\t\tprint(\"##############################\")\n\t\t\tprint('\\n')\n\t\t\tprint(\"iter : %6d ------- time: %4d of %6d Sec\"%(g_iter,now_t - last_t,now_t - start_t))\n\t\t\tprint('disc loss(avg): %f,gen loss:%f'%(sum(d_losses)/ float(sum(d_wghs )), gen_loss.data.item()))\n\n\t\tif g_iter % 10000 == 0:\n\t\t\t#torch.save(discriminator.state_dict(), os.path.join(args.checkpoint_dir, 'disc_{}'.format(g_iter)))\n\t\t\ttorch.save(generator.state_dict(), os.path.join(ckp_mdf, 'gen_{}'.format(g_iter)))#args.checkpoint_dir, 'gen_{}'.format(g_iter)))\n\t\t#for scheduler_d in scheduler_ds:\n\t\t\t#scheduler_d.step()\n\t\t#scheduler_g.step()\n\n\nfixed_z = Variable(torch.randn(args.batch_size, Z_dim).cuda())\n\ndef evaluate(epoch):\n\n\tsamples = generator(fixed_z).cpu().data.numpy()[:64]\n\n\n\tfig = plt.figure(figsize=(8, 8))\n\tgs = gridspec.GridSpec(8, 8)\n\tgs.update(wspace=0.05, hspace=0.05)\n\n\tfor i, sample in enumerate(samples):\n\t\tax = plt.subplot(gs[i])\n\t\tplt.axis('off')\n\t\tax.set_xticklabels([])\n\t\tax.set_yticklabels([])\n\t\tax.set_aspect('equal')\n\t\tplt.imshow(sample.transpose((1,2,0)) * 0.5 + 0.5)\n\n\tif not os.path.exists('out/'):\n\t\tos.makedirs('out/')\n\n\tplt.savefig('out/{}.png'.format(str(epoch).zfill(3)), bbox_inches='tight')\n\tplt.close(fig)\nif not os.path.exists(ckp_mdf): #args.checkpoint_dir):\n\tos.makedirs(ckp_mdf) #args.checkpoint_dir)#, exist_ok=True)\n\nstart_t = time.time()\nlast_t = start_t\nnow_t = start_t\n\nif __name__ == \"__main__\":\n\tfor epoch in range(10000):\n\t\ttrain(epoch)\n\t\tprint(\"EPOCH # %d\"%epoch)\n\t\tif epoch %100 ==0:\n\t\t\tevaluate(epoch)\n\t\t\tfixed_z = Variable(torch.randn(64, Z_dim).cuda())\n\t\t\tfake_images = generator(fixed_z)\n\t\t\ttorchvision.utils.save_image(fake_images.data,os.path.join(\"./out__\", '{}_fake.png'.format(epoch)),normalize=True,padding=0)\n\n\n" ]
[ [ "matplotlib.use", "torch.randn", "matplotlib.pyplot.subplot", "matplotlib.gridspec.GridSpec", "matplotlib.pyplot.close", "matplotlib.pyplot.axis", "torch.nn.ReLU", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yangggzhang/heterogeneous-sampling
[ "2d65a077f0bee63154c295f1540505132111ac42" ]
[ "src/sampling_measurement/script/measurement_simulation_server.py" ]
[ "#!/usr/bin/env python\nimport itertools\nimport numpy as np\nimport rospkg\nimport rospy\nfrom sampling_msgs.srv import RequestMeasurement, RequestMeasurementResponse\n\nclass MeasurementSimulator(object):\n def __init__(self):\n rospy.init_node('measurement_simulation_node')\n measurement_trial = rospy.get_param(\"~measurement_trial\")\n self.rospack = rospkg.RosPack()\n self.polyfit_order = rospy.get_param('~poly_order', 5)\n self.noise_stdev = rospy.get_param('~noise_stdev', 0.5)\n self.position_x, self.position_y = self.loadposition(measurement_trial)\n self.measurement = self.loadmeasurement(measurement_trial)\n self.polyfit_coef = self.polyfit2d(self.position_x, self.position_y, self.measurement, order=self.polyfit_order)\n self.generateGroundTruth(measurement_trial)\n self.measurement_simulation_server = rospy.Service('measurement_simulation', RequestMeasurement, self.simulatemeasurement)\n rospy.spin()\n\n def polyfit2d(self, x, y, z, order=3):\n ncols = (order + 1)**2\n G = np.zeros((x.size, ncols))\n ij = itertools.product(range(order+1), range(order+1))\n for k, (i,j) in enumerate(ij):\n G[:,k] = x**i * y**j\n m, _, _, _ = np.linalg.lstsq(G, z, rcond=None)\n return m\n\n def polyval2d(self, x, y, m):\n order = int(np.sqrt(len(m))) - 1\n ij = itertools.product(range(order+1), range(order+1))\n z = 0.0\n for a, (i,j) in zip(m, ij):\n z += a * x**i * y**j\n return z\n \n def generateGroundTruth(self, measurement_trail):\n artifical_ground_truth = self.rospack.get_path('sampling_data') + \"/measurement/artificial_\" + measurement_trail + \".txt\"\n data_file = open(artifical_ground_truth, \"w\")\n for x, y in zip(self.position_x, self.position_y):\n measurement = self.polyval2d(x, y, self.polyfit_coef)\n data_file.write(\"%f\\n\" %(measurement))\n data_file.close()\n\n def loadposition(self, measurement_trial):\n position_x = []\n position_y = []\n position_file = self.rospack.get_path('sampling_data') + \"/location/\" + measurement_trial + \".txt\"\n with open(position_file, \"r\") as filestream:\n for line in filestream:\n new_x, new_y = line.split(\",\")\n position_x.append(float(new_x))\n position_y.append(float(new_y))\n return np.array(position_x), np.array(position_y)\n\n def loadmeasurement(self, measurement_trail):\n measurement_file = self.rospack.get_path('sampling_data') + \"/measurement/\" + measurement_trail + \".txt\"\n return np.loadtxt(measurement_file)\n \n def simulatemeasurement(self, req):\n simulated_measurement = self.polyval2d(req.position.x, req.position.y, self.polyfit_coef)\n simulated_measurement += np.random.normal(0, self.noise_stdev)\n return RequestMeasurementResponse(simulated_measurement)\n\nif __name__ == \"__main__\":\n measurement_simulation_server = MeasurementSimulator()\n" ]
[ [ "numpy.linalg.lstsq", "numpy.random.normal", "numpy.array", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
julioadl/mlCodebase
[ "a7a3f9bd9333a040018d7e0865a9933e46d92712" ]
[ "development/ml/models/base.py" ]
[ "from typing import Callable, Dict\nimport pathlib\nfrom boltons.cacheutils import cachedproperty\n\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.externals import joblib\nfrom tensorflow.keras.models import Model as KerasModel\nfrom tensorflow.keras.optimizers import RMSprop\n\nfrom datasets.sequence import DatasetSequence\n\nDIRNAME = pathlib.Path('__file__').parents[0].resolve() / 'weights'\n\nclass ModelSKLearn:\n def __init__(self, dataset_cls: type, algorithm_fn: Callable, dataset_args: Dict=None, algorithm_args: Dict=None):\n self.name = f'{self.__class__.__name__}_{dataset_cls.__name__}{algorithm_fn.__name__}'\n\n if dataset_args is None:\n dataset_args = {}\n self.data = dataset_cls(**dataset_args)\n\n if algorithm_args is None:\n algorithm_args = {}\n self.algorithm = algorithm_fn(**algorithm_args)\n #self.algorithm.summary()\n\n self.batch_augment_fn = None\n self.batch_format_fn = None\n\n @property\n def weights_filename(self):\n DIRNAME.mkdir(parents=True, exist_ok=True)\n return str(DIRNAME / f'{self.name}_weights.pkl')\n\n '''\n Functions to be filled out see: https://github.com/gradescope/fsdl-text-recognizer-project/blob/master/lab6_sln/text_recognizer/models/base.py\n '''\n\n def fit(self, dataset, batch_size=None, epochs=None, callbacks=[]):\n #Define fit sequence\n '''\n Fit generator for keras. See line 44 https://github.com/gradescope/fsdl-text-recognizer-project/blob/master/lab6_sln/text_recognizer/models/base.py\n Arguments for fit generator\n train_sequence,\n epochs = epochs,\n callbacks = callbacks,\n validation_data = test_sequence,\n use_multiprocessing = False,\n workers = 1,\n shuffle = True\n '''\n #Updated for sklearn\n train_sequence = DatasetSequence(dataset.x_train, dataset.y_train, batch_size)\n\n self.algorithm.fit(\n train_sequence.x,\n train_sequence.y\n )\n\n def evaluate(self, x, y):\n #Define evaluate sequence\n '''\n For predict for Keras see line 56 in https://github.com/gradescope/fsdl-text-recognizer-project/blob/master/lab6_sln/text_recognizer/models/base.py\n '''\n sequence = DatasetSequence(x, y, batch_size=12)\n preds = self.algorithm.predict(sequence.x)\n report = metrics.classification_report(sequence.y, preds)\n return report\n\n def loss(self):\n #Return loss\n return 'Loss type'\n\n def optimizer(self):\n #Return optimizer\n return 'optimizer'\n\n def metrics(self):\n return ['accuracy']\n\n def save_model(self):\n joblib.dump(self.algorithm, self.weights_filename)\n\n def load_model(self):\n self.algorithm = joblib.load(self.weights_filename)\n\nclass ModelTf:\n \"\"\"Base class, to be subclassed by predictors for specific type of data.\"\"\"\n def __init__(self, dataset_cls: type, algorithm_fn: Callable, dataset_args: Dict=None, algorithm_args: Dict=None):\n self.name = f'{self.__class__.__name__}_{dataset_cls.__name__}_{algorithm_fn.__name__}'\n\n if dataset_args is None:\n dataset_args = {}\n self.data = dataset_cls(**dataset_args)\n\n if algorithm_args is None:\n algorithm_args = {}\n self.algorithm = algorithm_fn(self.data.input_shape, self.data.output_shape, **algorithm_args)\n self.algorithm.summary()\n\n self.batch_augment_fn = None\n self.batch_format_fn = None\n\n @property\n def weights_filename(self):\n DIRNAME.mkdir(parents=True, exist_ok=True)\n return str(DIRNAME / f'{self.name}_weights.h5')\n\n def fit(self, dataset, batch_size=32, epochs=10, callbacks=[]):\n self.algorithm.compile(loss=self.loss(), optimizer=self.optimizer(), metrics=self.metrics())\n\n train_sequence = DatasetSequence(dataset.x_train, dataset.y_train, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn)\n test_sequence = DatasetSequence(dataset.x_test, dataset.y_test, batch_size, augment_fn=self.batch_augment_fn, format_fn=self.batch_format_fn)\n\n self.algorithm.fit_generator(\n train_sequence,\n epochs=epochs,\n callbacks=callbacks,\n validation_data=test_sequence,\n use_multiprocessing=True,\n workers=1,\n shuffle=True\n )\n\n def evaluate(self, x, y):\n sequence = DatasetSequence(x, y, batch_size=16) # Use a small batch size to use less memory\n preds = self.algorithm.predict_generator(sequence)\n return np.mean(np.argmax(preds, -1) == np.argmax(y, -1))\n\n def loss(self):\n return 'categorical_crossentropy'\n\n def optimizer(self):\n return RMSprop()\n\n def metrics(self):\n return ['accuracy']\n\n def load_weights(self):\n self.algorithm.load_weights(self.weights_filename)\n\n def save_weights(self):\n self.algorithm.save_weights(self.weights_filename)\n" ]
[ [ "sklearn.externals.joblib.dump", "tensorflow.keras.optimizers.RMSprop", "numpy.argmax", "sklearn.externals.joblib.load", "sklearn.metrics.classification_report" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] } ]
SooluThomas/qiskit-terra
[ "25b47af83f14afb3441d7b2c1bd31bda93e3549d" ]
[ "qiskit/quantum_info/operators/symplectic/base_pauli.py" ]
[ "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2020\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\"\"\"\nOptimized list of Pauli operators\n\"\"\"\n# pylint: disable=invalid-name, abstract-method\n\nimport copy\nimport numpy as np\n\nfrom qiskit.exceptions import QiskitError\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit.barrier import Barrier\nfrom qiskit.quantum_info.operators.base_operator import BaseOperator\nfrom qiskit.quantum_info.operators.mixins import AdjointMixin, MultiplyMixin\n\n\nclass BasePauli(BaseOperator, AdjointMixin, MultiplyMixin):\n r\"\"\"Symplectic representation of a list of N-qubit Paulis.\n\n Base class for Pauli and PauliList.\n \"\"\"\n\n def __init__(self, z, x, phase):\n \"\"\"Initialize the BasePauli.\n\n This is an array of M N-qubit Paulis defined as\n P = (-i)^phase Z^z X^x.\n\n Args:\n z (np.ndarray): input z matrix.\n x (np.ndarray): input x matrix.\n phase (np.ndarray): input phase vector.\n \"\"\"\n self._z = z\n self._x = x\n self._phase = phase\n self._num_paulis, num_qubits = self._z.shape\n super().__init__(num_qubits=num_qubits)\n\n def copy(self):\n \"\"\"Make a deep copy of current operator.\"\"\"\n # Deepcopy has terrible performance on objects with Numpy arrays\n # attributes so we make a shallow copy and then manually copy the\n # Numpy arrays to efficiently mimic a deepcopy\n ret = copy.copy(self)\n ret._z = self._z.copy()\n ret._x = self._x.copy()\n ret._phase = self._phase.copy()\n return ret\n\n # ---------------------------------------------------------------------\n # BaseOperator methods\n # ---------------------------------------------------------------------\n\n def tensor(self, other):\n return self._tensor(self, other)\n\n def expand(self, other):\n return self._tensor(other, self)\n\n @classmethod\n def _tensor(cls, a, b):\n z = np.hstack([a._stack(b._z, a._num_paulis), a._z])\n x = np.hstack([a._stack(b._x, a._num_paulis), a._x])\n phase = np.mod(a._phase + b._phase, 4)\n return BasePauli(z, x, phase)\n\n # pylint: disable=arguments-differ\n def compose(self, other, qargs=None, front=False, inplace=False):\n \"\"\"Return the composition of Paulis.\n\n Args:\n a ({cls}): an operator object.\n b ({cls}): an operator object.\n qargs (list or None): Optional, qubits to apply dot product\n on (default: None).\n inplace (bool): If True update in-place (default: False).\n\n Returns:\n {cls}: The operator a.compose(b)\n\n Raises:\n QiskitError: if number of qubits of other does not match qargs.\n \"\"\".format(cls=type(self).__name__)\n # Validation\n if qargs is None and other.num_qubits != self.num_qubits:\n raise QiskitError(\n \"other {} must be on the same number of qubits.\".format(\n type(self).__name__))\n\n if qargs and other.num_qubits != len(qargs):\n raise QiskitError(\n \"Number of qubits of the other {} does not match qargs.\".format(\n type(self).__name__))\n\n if other._num_paulis not in [1, self._num_paulis]:\n raise QiskitError(\"Incompatible BasePaulis. Second list must \"\n \"either have 1 or the same number of Paulis.\")\n\n # Compute phase shift\n if qargs is not None:\n x1, z1 = self._x[:, qargs], self._z[:, qargs]\n else:\n x1, z1 = self._x, self._z\n x2, z2 = other._x, other._z\n\n # Get phase shift\n phase = self._phase + other._phase\n if front:\n phase += 2 * np.sum(np.logical_and(x1, z2), axis=1)\n else:\n phase += 2 * np.sum(np.logical_and(z1, x2), axis=1)\n\n # Update Pauli\n x = np.logical_xor(x1, x2)\n z = np.logical_xor(z1, z2)\n\n if qargs is None:\n if not inplace:\n return BasePauli(z, x, phase)\n # Inplace update\n self._x = x\n self._z = z\n self._phase = phase\n return self\n\n # Qargs update\n ret = self if inplace else self.copy()\n ret._x[:, qargs] = x\n ret._z[:, qargs] = z\n ret._phase = np.mod(phase, 4)\n return ret\n\n def _multiply(self, other):\n \"\"\"Return the {cls} other * self.\n\n Args:\n other (complex): a complex number in ``[1, -1j, -1, 1j]``.\n\n Returns:\n {cls}: the {cls} other * self.\n\n Raises:\n QiskitError: if the phase is not in the set ``[1, -1j, -1, 1j]``.\n \"\"\".format(cls=type(self).__name__)\n if isinstance(other, (np.ndarray, list, tuple)):\n phase = np.array([self._phase_from_complex(phase) for phase in other])\n else:\n phase = self._phase_from_complex(other)\n return BasePauli(self._z, self._x, np.mod(self._phase + phase, 4))\n\n def conjugate(self):\n \"\"\"Return the conjugate of each Pauli in the list.\"\"\"\n complex_phase = np.mod(self._phase, 2)\n if np.all(complex_phase == 0):\n return self\n return BasePauli(self._z, self._x, np.mod(self._phase + 2 * complex_phase, 4))\n\n def transpose(self):\n \"\"\"Return the transpose of each Pauli in the list.\"\"\"\n # Transpose sets Y -> -Y. This has effect on changing the phase\n parity_y = self._count_y() % 2\n if np.all(parity_y == 0):\n return self\n return BasePauli(self._z, self._x, np.mod(self._phase + 2 * parity_y, 4))\n\n def commutes(self, other, qargs=None):\n \"\"\"Return True if Pauli that commutes with other.\n\n Args:\n other (BasePauli): another BasePauli operator.\n qargs (list): qubits to apply dot product on (default: None).\n\n Returns:\n np.array: Boolean array of True if Pauli's commute, False if\n they anti-commute.\n\n Raises:\n QiskitError: if number of qubits of other does not match qargs.\n \"\"\"\n if qargs is not None and len(qargs) != other.num_qubits:\n raise QiskitError(\n \"Number of qubits of other Pauli does not match number of \"\n \"qargs ({} != {}).\".format(other.num_qubits, len(qargs)))\n if qargs is None and self.num_qubits != other.num_qubits:\n raise QiskitError(\n \"Number of qubits of other Pauli does not match the current \"\n \"Pauli ({} != {}).\".format(other.num_qubits, self.num_qubits))\n if qargs is not None:\n inds = list(qargs)\n x1, z1 = self._x[:, inds], self._z[:, inds]\n else:\n x1, z1 = self._x, self._z\n a_dot_b = np.mod(np.sum(np.logical_and(x1, other._z), axis=1), 2)\n b_dot_a = np.mod(np.sum(np.logical_and(z1, other._x), axis=1), 2)\n return a_dot_b == b_dot_a\n\n def evolve(self, other, qargs=None):\n r\"\"\"Heisenberg picture evolution of a Pauli by a Clifford.\n\n This returns the Pauli :math:`P^\\prime = C^\\dagger.P.C`.\n\n Args:\n other (BasePauli or QuantumCircuit): The Clifford circuit to evolve by.\n qargs (list): a list of qubits to apply the Clifford to.\n\n Returns:\n BasePauli: the Pauli :math:`C^\\dagger.P.C`.\n\n Raises:\n QiskitError: if the Clifford number of qubits and qargs don't match.\n \"\"\"\n # Check dimension\n if qargs is not None and len(qargs) != other.num_qubits:\n raise QiskitError(\n \"Incorrect number of qubits for Clifford circuit ({} != {}).\".format(\n other.num_qubits, len(qargs)))\n if qargs is None and self.num_qubits != other.num_qubits:\n raise QiskitError(\n \"Incorrect number of qubits for Clifford circuit ({} != {}).\".format(\n other.num_qubits, self.num_qubits))\n\n # Evolve via Pauli\n if isinstance(other, BasePauli):\n ret = self.compose(other.adjoint(), qargs=qargs)\n ret = ret.compose(other, front=True, qargs=qargs)\n return ret\n\n # Otherwise evolve by the inverse circuit to compute C^dg.P.C\n return self.copy()._append_circuit(other.inverse(), qargs=qargs)\n\n # ---------------------------------------------------------------------\n # Helper Methods\n # ---------------------------------------------------------------------\n\n def __imul__(self, other):\n return self.compose(other, front=True, inplace=True)\n\n def __neg__(self):\n ret = copy.copy(self)\n ret._phase = np.mod(self._phase + 2, 4)\n return ret\n\n def _count_y(self):\n \"\"\"Count the number of I Pauli's\"\"\"\n return np.sum(np.logical_and(self._x, self._z), axis=1)\n\n @staticmethod\n def _stack(array, size):\n \"\"\"Stack array.\"\"\"\n if size == 1:\n return array\n return np.vstack(size * [array])\n\n @staticmethod\n def _phase_from_complex(coeff):\n \"\"\"Return the phase from a label\"\"\"\n if np.isclose(coeff, 1):\n return 0\n if np.isclose(coeff, -1j):\n return 1\n if np.isclose(coeff, -1):\n return 2\n if np.isclose(coeff, 1j):\n return 3\n raise QiskitError(\"Pauli can only be multiplied by 1, -1j, -1, 1j.\")\n\n @staticmethod\n def _from_array(z, x, phase=0):\n \"\"\"Convert array data to BasePauli data.\"\"\"\n if isinstance(z, np.ndarray) and z.dtype == bool:\n base_z = z\n else:\n base_z = np.asarray(z, dtype=bool)\n if base_z.ndim == 1:\n base_z = base_z.reshape((1, base_z.size))\n elif base_z.ndim != 2:\n raise QiskitError(\"Invalid Pauli z vector shape.\")\n\n if isinstance(x, np.ndarray) and x.dtype == bool:\n base_x = x\n else:\n base_x = np.asarray(x, dtype=bool)\n if base_x.ndim == 1:\n base_x = base_x.reshape((1, base_x.size))\n elif base_x.ndim != 2:\n raise QiskitError(\"Invalid Pauli x vector shape.\")\n\n if base_z.shape != base_x.shape:\n raise QiskitError(\"z and x vectors are different size.\")\n\n # Convert group phase convention to internal ZX-phase conversion.\n base_phase = np.mod(np.sum(np.logical_and(base_x, base_z),\n axis=1, dtype=int) + phase, 4)\n return base_z, base_x, base_phase\n\n @staticmethod\n def _to_matrix(z, x, phase=0, group_phase=False, sparse=False):\n \"\"\"Return the matrix matrix from symplectic representation.\n\n The Pauli is defined as :math:`P = (-i)^{phase + z.x} * Z^z.x^x`\n where ``array = [x, z]``.\n\n Args:\n z (array): The symplectic representation z vector.\n x (array): The symplectic representation x vector.\n phase (int): Pauli phase.\n group_phase (bool): Optional. If True use group-phase convention\n instead of BasePauli ZX-phase convention.\n (default: False).\n sparse (bool): Optional. Of True return a sparse CSR matrix,\n otherwise return a dense Numpy array\n (default: False).\n\n Returns:\n array: if sparse=False.\n csr_matrix: if sparse=True.\n \"\"\"\n num_qubits = z.size\n\n # Convert to zx_phase\n if group_phase:\n phase += np.sum(x & z)\n phase %= 4\n\n dim = 2**num_qubits\n twos_array = 1 << np.arange(num_qubits)\n x_indices = np.asarray(x).dot(twos_array)\n z_indices = np.asarray(z).dot(twos_array)\n\n indptr = np.arange(dim + 1, dtype=np.uint)\n indices = indptr ^ x_indices\n if phase:\n coeff = (-1j)**phase\n else:\n coeff = 1\n data = np.array([coeff * (-1) ** (bin(i).count('1') % 2)\n for i in z_indices & indptr])\n if sparse:\n # Return sparse matrix\n from scipy.sparse import csr_matrix\n return csr_matrix((data, indices, indptr), shape=(dim, dim),\n dtype=complex)\n\n # Build dense matrix using csr format\n mat = np.zeros((dim, dim), dtype=complex)\n for i in range(dim):\n mat[i][indices[indptr[i]:indptr[i + 1]]] = data[indptr[i]:indptr[i + 1]]\n return mat\n\n @staticmethod\n def _to_label(z, x, phase, group_phase=False,\n full_group=True, return_phase=False):\n \"\"\"Return the label string for a Pauli.\n\n Args:\n z (array): The symplectic representation z vector.\n x (array): The symplectic representation x vector.\n phase (int): Pauli phase.\n group_phase (bool): Optional. If True use group-phase convention\n instead of BasePauli ZX-phase convention.\n (default: False).\n full_group (bool): If True return the Pauli label from the full Pauli group\n including complex coefficient from [1, -1, 1j, -1j]. If\n False return the unsigned Pauli label with coefficient 1\n (default: True).\n return_phase (bool): If True return the adjusted phase for the coefficient\n of the returned Pauli label. This can be used even if\n ``full_group=False``.\n\n Returns:\n str: the Pauli label from the full Pauli group (if ``full_group=True``) or\n from the unsigned Pauli group (if ``full_group=False``).\n Tuple[str, int]: if ``return_phase=True`` returns a tuple of the Pauli\n label (from either the full or unsigned Pauli group) and\n the phase ``q`` for the coefficient :math:`(-i)^(q + x.z)`\n for the label from the full Pauli group.\n \"\"\"\n num_qubits = z.size\n coeff_labels = {0: '', 1: '-i', 2: '-', 3: 'i'}\n label = ''\n for i in range(num_qubits):\n if not z[num_qubits - 1 - i]:\n if not x[num_qubits - 1 - i]:\n label += 'I'\n else:\n label += 'X'\n elif not x[num_qubits - 1 - i]:\n label += 'Z'\n else:\n label += 'Y'\n if not group_phase:\n phase -= 1\n phase %= 4\n if phase and full_group:\n label = coeff_labels[phase] + label\n if return_phase:\n return label, phase\n return label\n\n def _append_circuit(self, circuit, qargs=None):\n \"\"\"Update BasePauli inplace by applying a Clifford circuit.\n\n Args:\n circuit (QuantumCircuit or Instruction): the gate or composite gate to apply.\n qargs (list or None): The qubits to apply gate to.\n\n Returns:\n BasePauli: the updated Pauli.\n\n Raises:\n QiskitError: if input gate cannot be decomposed into Clifford gates.\n \"\"\"\n if isinstance(circuit, Barrier):\n return self\n\n if qargs is None:\n qargs = list(range(self.num_qubits))\n\n if isinstance(circuit, QuantumCircuit):\n gate = circuit.to_instruction()\n else:\n gate = circuit\n\n # Basis Clifford Gates\n basis_1q = {\n 'i': _evolve_i,\n 'id': _evolve_i,\n 'iden': _evolve_i,\n 'x': _evolve_x,\n 'y': _evolve_y,\n 'z': _evolve_z,\n 'h': _evolve_h,\n 's': _evolve_s,\n 'sdg': _evolve_sdg,\n 'sinv': _evolve_sdg\n }\n basis_2q = {\n 'cx': _evolve_cx,\n 'cz': _evolve_cz,\n 'cy': _evolve_cy,\n 'swap': _evolve_swap\n }\n\n # Non-Clifford gates\n non_clifford = ['t', 'tdg', 'ccx', 'ccz']\n\n if isinstance(gate, str):\n # Check if gate is a valid Clifford basis gate string\n if gate not in basis_1q and gate not in basis_2q:\n raise QiskitError(\n \"Invalid Clifford gate name string {}\".format(gate))\n name = gate\n else:\n # Assume gate is an Instruction\n name = gate.name\n\n # Apply gate if it is a Clifford basis gate\n if name in non_clifford:\n raise QiskitError(\n \"Cannot update Pauli with non-Clifford gate {}\".format(name))\n if name in basis_1q:\n if len(qargs) != 1:\n raise QiskitError(\"Invalid qubits for 1-qubit gate.\")\n return basis_1q[name](self, qargs[0])\n if name in basis_2q:\n if len(qargs) != 2:\n raise QiskitError(\"Invalid qubits for 2-qubit gate.\")\n return basis_2q[name](self, qargs[0], qargs[1])\n\n # If not a Clifford basis gate we try to unroll the gate and\n # raise an exception if unrolling reaches a non-Clifford gate.\n if gate.definition is None:\n raise QiskitError('Cannot apply Instruction: {}'.format(gate.name))\n if not isinstance(gate.definition, QuantumCircuit):\n raise QiskitError(\n '{} instruction definition is {}; expected QuantumCircuit'.format(\n gate.name, type(gate.definition)))\n for instr, qregs, cregs in gate.definition:\n if cregs:\n raise QiskitError(\n 'Cannot apply Instruction with classical registers: {}'.format(\n instr.name))\n # Get the integer position of the flat register\n new_qubits = [qargs[tup.index] for tup in qregs]\n self._append_circuit(instr, new_qubits)\n\n # Since the individual gate evolution functions don't take mod\n # of phase we update it at the end\n self._phase %= 4\n return self\n\n\n# ---------------------------------------------------------------------\n# Evolution by Clifford gates\n# ---------------------------------------------------------------------\n\ndef _evolve_h(base_pauli, qubit):\n \"\"\"Update P -> H.P.H\"\"\"\n x = base_pauli._x[:, qubit].copy()\n z = base_pauli._z[:, qubit].copy()\n base_pauli._x[:, qubit] = z\n base_pauli._z[:, qubit] = x\n base_pauli._phase += 2 * np.logical_and(x, z).T\n return base_pauli\n\n\ndef _evolve_s(base_pauli, qubit):\n \"\"\"Update P -> S.P.Sdg\"\"\"\n x = base_pauli._x[:, qubit]\n base_pauli._z[:, qubit] ^= x\n base_pauli._phase += x.T\n return base_pauli\n\n\ndef _evolve_sdg(base_pauli, qubit):\n \"\"\"Update P -> Sdg.P.S\"\"\"\n x = base_pauli._x[:, qubit]\n base_pauli._z[:, qubit] ^= x\n base_pauli._phase -= x.T\n return base_pauli\n\n\n# pylint: disable=unused-argument\ndef _evolve_i(base_pauli, qubit):\n \"\"\"Update P -> P\"\"\"\n return base_pauli\n\n\ndef _evolve_x(base_pauli, qubit):\n \"\"\"Update P -> X.P.X\"\"\"\n base_pauli._phase += 2 * base_pauli._z[:, qubit].T\n return base_pauli\n\n\ndef _evolve_y(base_pauli, qubit):\n \"\"\"Update P -> Y.P.Y\"\"\"\n base_pauli._phase += 2 * base_pauli._x[:, qubit].T + 2 * base_pauli._z[:, qubit].T\n return base_pauli\n\n\ndef _evolve_z(base_pauli, qubit):\n \"\"\"Update P -> Z.P.Z\"\"\"\n base_pauli._phase += 2 * base_pauli._x[:, qubit].T\n return base_pauli\n\n\ndef _evolve_cx(base_pauli, qctrl, qtrgt):\n \"\"\"Update P -> CX.P.CX\"\"\"\n base_pauli._x[:, qtrgt] ^= base_pauli._x[:, qctrl]\n base_pauli._z[:, qctrl] ^= base_pauli._z[:, qtrgt]\n return base_pauli\n\n\ndef _evolve_cz(base_pauli, q1, q2):\n \"\"\"Update P -> CZ.P.CZ\"\"\"\n x1 = base_pauli._x[:, q1]\n x2 = base_pauli._x[:, q2]\n base_pauli._z[:, q1] ^= x1\n base_pauli._z[:, q2] ^= x2\n base_pauli._phase += 2 * np.logical_and(x1, x2).T\n return base_pauli\n\n\ndef _evolve_cy(base_pauli, qctrl, qtrgt):\n \"\"\"Update P -> CY.P.CY\"\"\"\n x1 = base_pauli._x[:, qctrl]\n x2 = base_pauli._x[:, qtrgt]\n z2 = base_pauli._z[:, qtrgt]\n base_pauli._x[:, qtrgt] ^= x1\n base_pauli._z[:, qtrgt] ^= x1\n base_pauli._z[:, qctrl] ^= np.logical_xor(x2, z2)\n base_pauli._phase += x1 + 2 * np.logical_and(x1, x2).T\n return base_pauli\n\n\ndef _evolve_swap(base_pauli, q1, q2):\n \"\"\"Update P -> SWAP.P.SWAP\"\"\"\n x1 = base_pauli._x[:, q1]\n z1 = base_pauli._z[:, q1]\n base_pauli._x[:, q1] = base_pauli._x[:, q2]\n base_pauli._z[:, q1] = base_pauli._z[:, q2]\n base_pauli._x[:, q2] = x1\n base_pauli._z[:, q2] = z1\n return base_pauli\n" ]
[ [ "numpy.logical_xor", "numpy.asarray", "numpy.arange", "scipy.sparse.csr_matrix", "numpy.all", "numpy.mod", "numpy.logical_and", "numpy.zeros", "numpy.sum", "numpy.vstack", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
iesl/expLinkage
[ "4d46683a3eb86b4a40425acf08b608ab44f5006b" ]
[ "src/trainer/train_vect_data.py" ]
[ "\"\"\"\nCopyright (C) 2019 University of Massachusetts Amherst.\nThis file is part of \"expLinkage\"\nhttp://github.com/iesl/expLinkage\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport argparse, time, sys, os\nfrom pathlib import Path\nimport torch\n\nfrom models.mahalabonis import MahalanobisDist, GenLinkMahalanobis\n\nfrom utils.Config import Config\nfrom utils.basic_utils import create_logger\nfrom eval.finalEval import run_final_eval\n\nfrom trainer.VectDataTrainer import VectDataTrainer\n\ndef trainExpLinkOnly(trainer):\n\tif trainer.config.trainObj == \"linkage_auto\":\n\t\ttrainer.logger.info(\"Not training linkageAlpha separately because if trainObj is linakge_auto then it must be trained already...\")\n\telif trainer.config.modelType == \"maha\":\n\t\t\n\t\tassert isinstance(trainer.model, MahalanobisDist)\n\t\t\n\t\tnew_model = GenLinkMahalanobis(trainer.config)\n\t\tnew_model.seqModel[0].weight.requires_grad = False\n\t\tnew_model.seqModel[0].weight.data = trainer.model.seqModel[0].weight.data\n\t\tnew_model.seqModel[0].weight.requires_grad = True\n\t\ttrainer.model = new_model\n\t\tif trainer.config.useGPU:\n\t\t\ttrainer.logger.info(\"Shifting model to cuda because GPUs are available!\")\n\t\t\ttrainer.model = trainer.model.cuda()\n\t\t\n\t\ttrainer.config.trainAlpha = True\n\t\ttrainer.config.trainModel = False\n\t\ttrainer.resetOptimizer()\n\t\t\n\t\tif \"linkage_auto\" not in trainer.config.inferenceMethods:\n\t\t\ttrainer.config.inferenceMethods += [\"linkage_auto\"]\n\t\tif \"linkage_auto@t\" not in trainer.config.inferenceMethods:\n\t\t\ttrainer.config.inferenceMethods += [\"linkage_auto@t\"]\n\t\t\n\t\torigCSVFile = \"{}/origTraining/results.csv\"\n\t\tfileCheck = Path(origCSVFile.format(trainer.config.resultDir))\n\t\tif not fileCheck.is_file():\n\t\t\tprint(\"File does not exist:{}\".format(origCSVFile))\n\t\t\tcommand = \"cd {} && mkdir -p origTraining && cp *.csv origTraining/ && cp *.png origTraining/\".format(trainer.config.resultDir)\n\t\t\tos.system(command)\n\t\t\n\t\ttrainer.config.trainObj = \"linkage_auto\"\n\t\ttrainer.logger.info(\"Training alpha parameter of expLink ...\\n\\n\\n\")\n\t\ttrainer.logger.info(trainer.model)\n\t\t\n\t\tt1 = time.time()\n\t\tsuccess = trainer.train()\n\t\tif success is not None and (not success):\n\t\t\ttry:\n\t\t\t\ttrainer.config.inferenceMethods.remove(\"linkage_auto@t\")\n\t\t\t\ttrainer.config.inferenceMethods.remove(\"linkage_auto\")\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\t\n\n\t\ttrainer.printModelWeights()\n\t\ttrainer.logger.info(\"Training alpha parameter of expLink linkage ends...in time={:.3f}\".format(time.time() - t1))\n\t\ttrainer.logger.info(\"Saving model...\")\n\t\t\n\t\ttrainer.config.bestModel = os.path.join(trainer.config.resultDir, \"model_alpha.torch\")\n\t\ttorch.save(trainer.model, trainer.config.bestModel )\n\t\ttrainer.config.save_config(trainer.config.resultDir, \"config_expLink.json\")\n\t\ttrainer.logger.info(\"Saved model...\")\n\t\t\n\telse:\n\t\ttrainer.logger.info(\"Not training linkageAlpha separately because if modelType is not Mahalanobis distance matrix... \")\n\ndef runMain(config):\n\tcommand = sys.argv\n\tstart = time.time()\n\tassert isinstance(config,Config)\n\tif config.mode == \"train\":\n\t\ttrainer = VectDataTrainer(config)\n\t\ttrainer.printModelWeights()\n\t\t\n\t\tt1 = time.time()\n\t\ttrainer.train()\n\t\t\n\t\ttrainer.logger.info(\"Training ends...in time={:.3f}\".format(time.time() - t1))\n\t\ttrainer.printModelWeights()\n\t\ttrainer.logger.info(\"Saving model...\")\n\t\t\n\t\ttrainer.config.bestModel = os.path.join(trainer.config.resultDir, \"model.torch\")\n\t\ttorch.save(trainer.model, trainer.config.bestModel )\n\t\ttrainer.config.save_config(trainer.config.resultDir)\n\t\ttrainer.logger.info(\"Saved model...\")\n\t\t\n\t\t################### Train alpha parameter for softLink ##########################\n\t\n\t\tif config.trainExpLink:\n\t\t\ttrainExpLinkOnly(trainer)\n\t\t#################################################################################\n\t\n\telif config.mode == \"trainExpLink\":\n\t\ttrainer = VectDataTrainer(config)\n\t\t\n\t\t# Load model and reset optimizer to have parameters of the loaded model\n\t\ttrainer.loadModel()\n\t\t\n\t\t# Update output directory\n\t\ttrainer.config.resultDir = trainer.config.resultDir + args.newDirSuffix\n\t\tPath(trainer.config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\t\t\n\t\t# Update logger object\n\t\ttrainer.logger = create_logger(config=config, logFile=\"logFile_trainExpLink.txt\", currLogger=trainer.logger)\n\t\t\n\t\ttrainer.logger.info(trainer)\n\t\ttrainer.logger.info(command)\n\t\ttrainExpLinkOnly(trainer)\n\t\t\n\telif config.mode == \"test\":\n\t\ttrainer = VectDataTrainer(config)\n\t\t\n\t\t# Load model and reset optimizer to have parameters of the loaded model\n\t\ttrainer.loadModel()\n\t\t\n\t\t# Update output directory\n\t\ttrainer.config.resultDir = trainer.config.resultDir + args.newDirSuffix\n\t\tPath(trainer.config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\t\t\n\t\t# Update logger object\n\t\ttrainer.logger = create_logger(config=config, logFile=\"logFile_retest.txt\", currLogger=trainer.logger)\n\t\t\n\t\ttrainer.logger.info(command)\n\t\ttrainer.logger.info(trainer)\n\t\n\telse:\n\t\traise Exception(\"Invalid mode = {}. Choose one from: test, train\".format(config.mode))\n\t\n\t\n\trun_final_eval(trainer)\n\t# trainer.performFinalEvaluation()\n\ttrainer.logger.info(\"\\n\\n\\n\\n\")\n\t\n\ttrainer.logger.info(trainer)\n\ttrainer.logger.info(command)\n\tend = time.time()\n\ttrainer.logger.info(\" Total time taken = {:.4f} = {:.4f} min = {:.4f} hours\".format(end - start, (end - start)/60, (end - start)/3600))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser( description='Supervised clustering training for data in R^n')\n\tparser.add_argument('--config', type=str, help=\"Config file\")\n\n\t################################## OPTIONAL ARGUMENTS TO OVERWRITE CONFIG FILE ARGS###################################################\n\ttemp_config = Config()\n\tfor config_arg in temp_config.__dict__:\n\t\tdef_val = temp_config.__getattribute__(config_arg)\n\t\targ_type = type(def_val) if def_val is not None else str\n\t\tparser.add_argument('--{}'.format(config_arg), type=arg_type, default=None, help='If not specified then value from config file will be used')\n\t#########################################################################################################\n\n\targs = parser.parse_args()\n\t\n\tassert args.config is not None\n\tconfig = Config(args.config)\n\tfor config_arg in temp_config.__dict__:\n\t\tdef_val = getattr(args, config_arg)\n\t\tif def_val is not None:\n\t\t\told_val = config.__dict__[config_arg]\n\t\t\tconfig.__dict__.update({config_arg:def_val})\n\t\t\tnew_val =config.__dict__[config_arg]\n\t\t\tprint(\"Updating Config.{} from {} to {} using arg_val={}\".format(config_arg, old_val, new_val, def_val))\n\t\n\t# Update result directory if there are any parameters passed through command line that are different from those in config file\n\tif args.resultDir is None:\n\t\tconfig.updateResultDir(\"auto\")\n\telse:\n\t\tconfig.updateResultDir(args.resultDir)\n\t\n\tPath(config.resultDir).mkdir(parents=True, exist_ok=True) # Create resultDir directory if not already present\n\tconfig.useGPU \t\t= config.cuda and torch.cuda.is_available()\n\tconfig.updateRandomSeeds(config.seed)\n\tconfig.save_config(config.resultDir, \"orig_config.json\")\n\trunMain(config)\n\t\n\t\n\n\n" ]
[ [ "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Prabhdeep1999/Caffe-Model-to-Keras-.h5-conversion
[ "54f1d76fb54dbdfe8d3ac378b4b53c2d1fbd78d5" ]
[ "caffe_weight_converter.py" ]
[ "'''\nA tool to convert `.caffemodel` weights to Keras-compatible HDF5 files or to export them to a simpler Python dictionary structure for further processing.\nCopyright (C) 2018 Pierluigi Ferrari\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\n'''\n\nimport os\nos.environ['GLOG_minloglevel'] = '2' # Prevents Caffe from printing sooo much stuff to the console.\nimport caffe\nos.environ['GLOG_minloglevel'] = '0'\nimport numpy as np\nimport warnings\nimport argparse\ntry:\n import pickle\nexcept ImportError:\n warnings.warn(\"'pickle' module is missing. You can export the weights to an HDF5 file only.\")\ntry:\n import h5py\nexcept ImportError:\n warning.warn(\"'h5py' module is missing. You can export the weights to a pickle file only.\")\n\ndef convert_caffemodel_to_keras(output_filename,\n prototxt_filename,\n caffemodel_filename,\n include_layers_without_weights=False,\n include_unknown_layer_types=True,\n keras_backend='tf',\n verbose=True):\n '''\n Converts Caffe weights from the `.caffemodel` format to an HDF5 format that is\n compatible with Keras 2.x with TensorFlow backend. The Theano and CNTK backends\n are currently not supported.\n Note that this converter converts the weights only, not the model definition.\n The most painfree way to use this weight converter is to leave the\n `include_layers_without_weights` option deactivated and load the weights into\n an appropriate Keras model by setting `by_name = True` in the `Model.load_weights()`\n method.\n This converter can handle all layer types, but it is not guaranteed to perform\n the conversion correctly for unknown layer types. What this means concretely\n is that the converter can always extract the weights from a Caffe model and\n put them into the Keras-compatible HDF5 format regardless of the layer type,\n but some layer types may need processing on top of that that the converter\n cannot perform for layers it doesn't know. Two potential issues come to mind\n for unsupported layer types:\n 1) For layers that have multiple weight tensors, the converter will save\n the weight tensors in the order they have in the Caffe model. If this happens\n not to be the same order in which Keras saves the weights for that same\n layer type, then we obviously have a problem. For supported layer types\n it is ensured that the order is correct, but for a given unknown layer\n type this may or may not be the case.\n 2) If the weights of a layer type need to be processed in a certain way,\n the converter is not going to know about this for unknown layer types.\n For example, the axes of the kernels of convolutional layers need to be\n transposed between Caffe and Keras with TensorFlow backend. Similar processing\n might be necessary for the weights of other (unknown) layer types to work\n correctly, so be aware of that.\n Of course any layer types that do not have trainable weights (such as Reshape,\n ReLU, Split, Concat, Permute, Flatten, Pooling etc.) won't cause any trouble\n because the converter does not care about them. The possible issues described\n above might occur only with unknown layer types that do have trainable weights.\n The currently supported (i.e. known) Caffe layer types that do have trainable\n weights are:\n - BatchNorm (i.e. BatchNorm layer followed by subsequent Scale layer)\n - Convolution\n - Deconvolution\n - InnerProduct\n If your model contains batch normalization layers, make sure that the names of\n the batch normalization layers in the Keras model are the same as the names of the\n corresponding 'BatchNorm' layers in the Caffe model, not the 'Scale' layers.\n Arguments:\n output_filename (str): The filename (full path, but excluding the file extension)\n under which to save the HDF5 file with the converted weights.\n prototxt_filename (str): The filename (full path including file extension)\n of the `.prototxt` file that defines the Caffe model.\n caffemodel_filename (str): The filename (full path including file extension)\n of the `.caffemodel` file that contains the weights for the Caffe model.\n include_layers_without_weights (bool, optional): If `False`, layers without\n weights (e.g. Input, Reshape, or ReLU layers) will be skipped by the\n converter. This means that the HDF5 output file will only contain those\n layers of a model that have any weights. This is the recommended usage\n of this converter, but if you really must include all layers\n in the output file, then set this option to `True`.\n Note: If `False`, then you should load the weights into the Keras model\n `by_name = True`, since not all layers are present in the HDF5 file.\n include_unknown_layer_types (bool, optional): If `True`, weights from unknown layer\n types will be included, even though it is not guaranteed that they will be\n converted correctly. It is recommended that you keep this option\n activated, see if the converted weights work correctly, and only deactivate\n this option in case they don't.\n keras_backend (str, optional): For which Keras backend to convert the weights.\n Currently only the TensorFlow backend is supported, but you can simply\n follow the procedure [here](https://github.com/keras-team/keras/wiki/Converting-convolution-kernels-from-Theano-to-TensorFlow-and-vice-versa)\n to convert the resulting TensorFlow backend weights to Theano backend\n weights.\n verbose (bool, optional): If `True`, prints out the conversion status for\n every layer as well as some stats when the conversion is complete.\n Returns:\n None.\n '''\n if keras_backend != 'tf':\n raise ValueError(\"Only the TensorFlow backend is supported at the moment.\")\n\n # Create a list of the Caffe model weights as Numpy arrays stored in dictionaries.\n # The reason why we use dictionaries is that we don't only store the weights themselves,\n # but also other information like the layer name, layer type, tops, and bottoms (tops = outputs,\n # bottoms = inputs for the non-Caffe people) for each layer.\n caffe_weights_list = convert_caffemodel_to_dict(prototxt_filename,\n caffemodel_filename,\n out_path=None,\n verbose=False)\n\n # Create the HDF5 file in which to save the extracted weights.\n out_name = '{}.h5'.format(output_filename)\n out = h5py.File(out_name, 'w')\n\n # Save the layer names in this list.\n layer_names = []\n\n # These counters are just to be able to show some statistics upon completion of the conversion.\n counter_unknown = 0\n counter_no_weights = 0\n\n iterator = iter(range(len(caffe_weights_list)))\n\n for i in iterator:\n layer = caffe_weights_list[i]\n layer_name = layer['name']\n layer_type = layer['type']\n if (len(layer['weights']) > 0) or include_layers_without_weights: # Check whether this is a layer that contains weights.\n if layer_type in {'Convolution', 'Deconvolution', 'InnerProduct'}: # If this is a convolution layer or fully connected layer...\n # Get the kernel and transpose it.\n kernel = layer['weights'][0]\n if layer_type in {'Convolution', 'Deconvolution'}:\n # Caffe kernel order for Convolution: `(out_channels, in_channels, filter_height, filter_width)`\n # TensorFlow kernel order for Convolution: `(filter_height, filter_width, in_channels, out_channels)`\n # Caffe kernel order for Deconvolution: `(in_channels, out_channels, filter_height, filter_width)`\n # TensorFlow kernel order for Convolution Transpose: `(filter_height, filter_width, out_channels, in_channels)`\n # That is, the transposition order is the same for both layer types.\n kernel = np.transpose(kernel, (2, 3, 1, 0))\n if layer_type == 'InnerProduct':\n # Transpose the kernel from Caffe's `(out_channels, in_channels)` format\n # to TensorFlow's `(in_channels, out_channels)` format.\n kernel = np.transpose(kernel, (1, 0))\n # Set the name for the kernel.\n weight_names = ['kernel']\n # If this layer has a bias (which does not necessarily have to be the case), add it, too.\n if (len(layer['weights']) > 1):\n bias = layer['weights'][1]\n weight_names.append('bias')\n # Compose the extended weight names with layer name prefix.\n extended_weight_names = np.array(['{}/{}:0'.format(layer_name, weight_names[k]).encode() for k in range(len(weight_names))])\n # Create a group (i.e. folder) named after this layer.\n group = out.create_group(layer_name)\n # Create a weight names attribute for this group, which is just a list of the names of the weights\n # that this layer is expected to have in the Keras model.\n group.attrs.create(name='weight_names', data=extended_weight_names)\n # Create a subgroup (i.e. subfolder) in which to save the weights of this layer.\n subgroup = group.create_group(layer_name)\n # Create the actual weights datasets.\n subgroup.create_dataset(name='{}:0'.format(weight_names[0]), data=kernel)\n if (len(layer['weights']) > 1):\n subgroup.create_dataset(name='{}:0'.format(weight_names[1]), data=bias)\n # One last thing left to do: Append this layer's name to the global list of layer names.\n layer_names.append(layer_name.encode())\n if verbose:\n print(\"Converted weights for layer '{}' of type '{}'\".format(layer_name, layer_type))\n elif layer['type'] == 'BatchNorm': # If this is a batch normalization layer...\n # Caffe has a batch normalization layer, but it doesn't apply a scaling factor or bias\n # after normalizing. Instead, the 'BatchNorm' layer must be followed by a 'Scale' layer\n # in order to implement batch normalization the way you are used to. This means we\n # need to grab the weights from both this 'BatchNorm' layer and also from the subsequent\n # 'Scale' layer and put them together.\n # Gather all weights (expected: mean, variance, gamma, and beta) in this list.\n weights = []\n weight_names = []\n # Get the weights of this layer (the 'BatchNorm' layer).\n mean = layer['weights'][0]\n variance = layer['weights'][1]\n # If the subsequent layer is a 'Scale' layer, grab its weights, too.\n next_layer = caffe_weights_list[i + 1]\n if next_layer['type'] == 'Scale':\n gamma = next_layer['weights'][0]\n weights.append(gamma)\n weight_names.append('gamma')\n if (len(next_layer['weights']) == 1):\n warnings.warn(\"This 'Scale' layer follows a 'BatchNorm' layer and is expected to have a bias, but it doesn't. Make sure to set `center = False` in the respective Keras batch normalization layer.\")\n else:\n beta = next_layer['weights'][1]\n weights.append(beta)\n weight_names.append('beta')\n # Increment the iterator by one since we need to skip the subsequent 'Scale' layer after we're done here.\n next(iterator)\n else:\n warnings.warn(\"No 'Scale' layer after 'BatchNorm' layer. Make sure to set `scale = False` and `center = False` in the respective Keras batch normalization layer.\")\n weights.append(mean)\n weights.append(variance)\n weight_names.append('moving_mean') # It doesn't have to be a moving mean, but that's what Keras calls this parameter.\n weight_names.append('moving_variance') # It doesn't have to be a moving variance, but that's what Keras calls this parameter.\n # Compose the extended weight names with layer name prefix.\n extended_weight_names = np.array(['{}/{}:0'.format(layer_name, weight_names[k]).encode() for k in range(len(weight_names))])\n # Create a group (i.e. folder) named after this layer.\n group = out.create_group(layer_name)\n # Create a weight names attribute for this group, which is just a list of the names of the weights\n # that this layer is expected to have in the Keras model.\n group.attrs.create(name='weight_names', data=extended_weight_names)\n # Create a subgroup (i.e. subfolder) in which to save the weights of this layer.\n subgroup = group.create_group(layer_name)\n # Create the actual weights datasets.\n for j in range(len(weights)):\n subgroup.create_dataset(name='{}:0'.format(weight_names[j]), data=weights[j])\n # One last thing left to do: Append this layer's name to the global list of layer names.\n layer_names.append(layer_name.encode())\n if verbose:\n print(\"Converted weights for layer '{}' of type '{}'\".format(layer_name, layer_type))\n elif (len(layer['weights']) > 0) and include_unknown_layer_types: # For all other (unsupported) layer types...\n # Set the weight names for this layer type.\n weight_names = ['weights_{}'.format(i) for i in range(len(layer['weights']))]\n # Compose the extended weight names with layer name prefix.\n extended_weight_names = np.array(['{}/{}:0'.format(layer_name, weight_names[k]).encode() for k in range(len(weight_names))])\n # Create a group (i.e. folder) named after this layer.\n group = out.create_group(layer_name)\n # Create a weight names attribute for this group, which is just a list of the names of the weights\n # that this layer is expected to have in the Keras model.\n group.attrs.create(name='weight_names', data=extended_weight_names)\n # Create a subgroup (i.e. subfolder) in which to save the weights of this layer.\n subgroup = group.create_group(layer_name)\n # Create the actual weights datasets.\n for j in range(len(layer['weights'])):\n subgroup.create_dataset(name='{}:0'.format(weight_names[j]), data=layer['weights'][j])\n # One last thing left to do: Append this layer's name to the global list of layer names.\n layer_names.append(layer_name.encode())\n if verbose:\n print(\"Converted weights for layer '{}' of unknown type '{}'\".format(layer_name, layer_type))\n counter_unknown += 1\n elif (len(layer['weights']) == 0):\n # Create a group (i.e. folder) named after this layer.\n group = out.create_group(layer_name)\n # Create a weight names attribute for this group, which is just a list of the names of the weights\n # that this layer is expected to have in the Keras model.\n group.attrs.create(name='weight_names', data=np.array([]))\n # Create a subgroup (i.e. subfolder) in which to save the weights of this layer.\n subgroup = group.create_group(layer_name)\n # One last thing left to do: Append this layer's name to the global list of layer names.\n layer_names.append(layer_name.encode())\n if verbose:\n print(\"Processed layer '{}' of type '{}' which doesn't have any weights\".format(layer_name, layer_type))\n counter_no_weights += 1\n elif verbose:\n print(\"Skipped layer '{}' of unknown type '{}'\".format(layer_name, layer_type))\n elif verbose:\n print(\"Skipped layer '{}' of type '{}' because it doesn't have any weights\".format(layer_name, layer_type))\n # Create the global attributes of this HDF5 file.\n out.attrs.create(name='layer_names', data=np.array(layer_names))\n out.attrs.create(name='backend', data=b'tensorflow')\n # Setting the Keras version is actually important since Keras uses this number to determine\n # whether and how it will convert the loaded weights. Since we're preparing the weights\n # in a way that is compatible with Keras version 2, we'll inform Keras about this by\n # setting the version accordingly.\n out.attrs.create(name='keras_version', data=b'2.0.8')\n # We're done, close the output file.\n out.close()\n print(\"Weight conversion complete.\")\n if verbose:\n print(\"{} \\t layers were processed, out of which:\".format(len(layer_names)))\n print(\"{} \\t were of an unknown layer type\".format(counter_unknown))\n print(\"{} \\t did not have any weights\".format(counter_no_weights))\n print('File saved as {}'.format(out_name))\n\ndef convert_caffemodel_to_dict(prototxt_filename,\n caffemodel_filename,\n out_path=None,\n verbose=False):\n '''\n Extracts the weights from a Caffe model into a simple structure of\n Python lists, dictionaries and Numpy arrays.\n Arguments:\n prototxt_filename (str): The full path to the `.prototxt` file that defines\n the Caffe model.\n caffemodel_filename (str): The full path to the `.caffemodel` file that\n contains the weights for this Caffe model.\n out_path (str, optional): The filename (full path, but excluding the file extension)\n under which to save a pickled file with the extracted weights. If `None`,\n then the extracted weights will not be saved to disk.\n verbose (bool, optional): If `True`, prints out the processing status for\n every layer.\n Returns:\n A list of dictionaries. Each dictionary contains the data for one layer of the\n model. The data contained in each dictionary can be accessed by the following keys:\n 'name': The name of the layer.\n 'type': The type of the layer, e.g. 'Convolution'.\n 'weights': The weights of the layer as a list of Numpy arrays.\n 'bottoms': The names and shapes of all inputs into the layer.\n 'tops': The names and shapes of all outputs from the layer.\n In case a layer has no weights, that layer's weights list will be empty.\n '''\n # Load the Caffe net and weights.\n net = caffe.Net(prototxt_filename, 1, weights=caffemodel_filename)\n # Store the weights and other information for each layer in this list.\n layer_list = []\n for li in range(len(net.layers)): # For each layer in the net...\n # ...store the weights and other relevant information in this dictionary.\n layer = {}\n # Store the layer name.\n layer['name'] = net._layer_names[li]\n # Store the layer type.\n layer['type'] = net.layers[li].type\n # Store the layer weights. In case the layer has no weights, this list will be empty.\n layer['weights'] = [net.layers[li].blobs[bi].data[...]\n for bi in range(len(net.layers[li].blobs))]\n # Store the names and shapes of each input to this layer (aka \"bottom\").\n layer['bottoms'] = [(net._blob_names[bi], net.blobs[net._blob_names[bi]].data.shape)\n for bi in list(net._bottom_ids(li))]\n # Store the names and shapes of each output of this layer (aka \"top\").\n layer['tops'] = [(net._blob_names[bi], net.blobs[net._blob_names[bi]].data.shape)\n for bi in list(net._top_ids(li))]\n layer_list.append(layer)\n if verbose:\n print(\"Processed layer '{}' of type '{}'\".format(layer['name'], layer['type']))\n\n # Free the occupied resources.\n del net\n\n if verbose:\n print(\"Weight extraction complete. Processed {} layers.\".format(len(layer_list)))\n\n if not (out_path is None):\n out_name = '{}.pkl'.format(out_path)\n with open(out_name, 'wb') as f:\n pickle.dump(layer_list, f, protocol=pickle.HIGHEST_PROTOCOL)\n print('File saved as {}.'.format(out_name))\n\n return layer_list\n\ndef main(argv):\n if argv.format == 'hdf5':\n convert_caffemodel_to_keras(output_filename=argv.out_file,\n prototxt_filename=argv.prototxt,\n caffemodel_filename=argv.caffemodel,\n include_layers_without_weights=argv.include_non_weight,\n include_unknown_layer_types=not(argv.skip_unknown),\n keras_backend=argv.backend,\n verbose=argv.verbose)\n elif argv.format == 'pickle':\n _ = convert_caffemodel_to_dict(prototxt_filename=argv.prototxt,\n caffemodel_filename=argv.caffemodel,\n out_path=argv.out_file,\n verbose=argv.verbose)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=('Converts `.caffemodel` weights to either of '\n '(1) Keras-compatible HDF5 format or '\n '(2) a more general Python list of dictionaries suitable for further processing.'))\n parser.add_argument('out_file', action='store', type=str, help='The output filename as the full path, but excluding the file extension.')\n parser.add_argument('prototxt', action='store', type=str, help='The filename (full path including file extension) of the `.prototxt` file that defines the Caffe model. ')\n parser.add_argument('caffemodel', action='store', type=str, help='The filename (full path including file extension) of the `.caffemodel` file that contains the weights for the Caffe model.')\n parser.add_argument('-f', '--format', action='store', type=str, default='hdf5', choices={'hdf5', 'pickle'}, help=\"To which format to export the weights. Choices are {%(choices)s}, and the default is %(default)s. \"\n \"If the HDF5 format is selected, the converted weights will be compatible with Keras 2.x. \"\n \"If the Pickle format is selected, the weights will be exported to a more general Python list of \"\n \"dictionaries that contain the weights as Numpy arrays, along with other information such as \"\n \"layer names and types. This format may be useful if you want to process the weights further \"\n \"after exporting them.\")\n parser.add_argument('-n', '--include_non_weight', action='store_true', default=False, help=\"This option is only relevant if the output format is HDF5. Include layers that have no weights \"\n \"(e.g. Input, Reshape, or ReLU layers) in the converted weights file. \"\n \"The recommended usage for HDF5 conversion is not to use this option and to load the weights into \"\n \"the Keras model using `Model.load_weights()` with `by_name = True`.\")\n parser.add_argument('-u', '--skip_unknown', action='store_true', default=False, help=\"This option is only relevant if the output format is HDF5. Skip layer types that are unknown to the \"\n \"converter. It is recommended to try using the converter without this option first, then check whether the \"\n \"converted weights work correctly or not, and only use this option in case they don't.\")\n parser.add_argument('-b', '--backend', action='store', type=str, default='tf', choices={'tf'}, help=\"This option is only relevant if the output format is HDF5. For which Keras backend to convert the weights. \"\n \"At the moment the only choice is 'tf' for the TensorFlow backend. %(default)s is also the default value.\")\n parser.add_argument('-v', '--verbose', action='store_true', default=False, help=\"Prints out the conversion status for every layer.\")\n\n args = parser.parse_args()\n\n main(args)" ]
[ [ "numpy.array", "numpy.transpose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
evandrocapo/Keras
[ "5e2f73254e38d99ea1027b07c8e887be0d71fcac" ]
[ "keras_ai#2.py" ]
[ "import numpy as np\nfrom tensorflow import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nx = np.array([[0.1],[0.2],[0.3]])\ny = np.array([[0.2],[0.4],[0.6]])\n\nmodel = Sequential() # modelo\n\nmodel.add(Dense(3, input_dim=1)) # adicinou uma layer\nmodel.add(Dense(1))\n\nmodel.compile(optimizer='sgd', loss='mse', metrics=['acc'])\n\nmodel.fit(x, y, epochs=8000)\n\nwhile True:\n\n i = input('Digite um numero: ')\n t = float(i)\n t = np.asmatrix(t)\n result = model.predict(t)\n\n print(i, ' previsto => ', result[0])" ]
[ [ "numpy.asmatrix", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
davidcsterratt/snl
[ "5c086fac092209c5efd2e4b882bbcb197e2facf8" ]
[ "simulators/markov_jump_processes.py" ]
[ "from __future__ import division\n\nimport numpy as np\nimport util.math\n\n\nclass SimTooLongException(Exception):\n \"\"\"\n Exception to be thrown when a simulation runs for too long.\n \"\"\"\n\n def __init__(self, max_n_steps):\n self.max_n_steps = max_n_steps\n\n def __str__(self):\n return 'Simulation exceeded the maximum of {} steps.'.format(self.max_n_steps)\n\n\nclass MarkovJumpProcess:\n \"\"\"\n Implements a generic Markov Jump Process. It's an abstract class and must be implemented by a subclass.\n \"\"\"\n\n def __init__(self, init, params):\n \"\"\"\n :param init: initial state\n :param params: parameters\n \"\"\"\n\n self.state = None\n self.params = None\n self.time = None\n self.reset(init, params)\n\n def reset(self, init, params):\n \"\"\"\n Resets the simulator.\n :param init: initial state\n :param params: parameters\n \"\"\"\n\n self.state = np.asarray(init, dtype=float)\n self.params = np.asarray(params, dtype=float)\n self.time = 0.0\n\n def _calc_propensities(self):\n raise NotImplementedError('This is an abstract method and should be implemented in a subclass.')\n\n def _do_reaction(self, reaction):\n raise NotImplementedError('This is an abstract method and should be implemented in a subclass.')\n\n def sim_steps(self, num_steps, include_init_state=True, rng=np.random):\n \"\"\"\n Runs the simulator for a given number of steps.\n :param num_steps: number of steps\n :param include_init_state: if True, include the initial state in the output\n :param rng: random number generator to use\n :return: times, states\n \"\"\"\n\n times = [self.time]\n states = [self.state.copy()]\n\n for _ in xrange(num_steps):\n\n rates = self.params * self._calc_propensities()\n total_rate = rates.sum()\n\n if total_rate == 0:\n self.time = float('inf')\n break\n\n self.time += rng.exponential(scale=1./total_rate)\n\n reaction = util.math.discrete_sample(rates / total_rate, rng=rng)\n self._do_reaction(reaction)\n\n times.append(self.time)\n states.append(self.state.copy())\n\n if not include_init_state:\n times, states = times[1:], states[1:]\n\n return np.array(times), np.array(states)\n\n def sim_time(self, dt, duration, include_init_state=True, max_n_steps=float('inf'), rng=np.random):\n \"\"\"\n Runs the simulator for a given amount of time.\n :param dt: time step\n :param duration: total amount of time\n :param include_init_state: if True, include the initial state in the output\n :param max_n_steps: maximum number of simulator steps allowed. If exceeded, an exception is thrown.\n :param rng: random number generator to use\n :return: states\n \"\"\"\n\n num_rec = int(duration / dt) + 1\n states = np.empty([num_rec, self.state.size], float)\n cur_time = self.time\n n_steps = 0\n\n for i in xrange(num_rec):\n\n while cur_time > self.time:\n\n rates = self.params * self._calc_propensities()\n total_rate = rates.sum()\n\n if total_rate == 0:\n self.time = float('inf')\n break\n\n self.time += rng.exponential(scale=1./total_rate)\n\n reaction = util.math.discrete_sample(rates / total_rate, rng=rng)\n self._do_reaction(reaction)\n\n n_steps += 1\n if n_steps > max_n_steps:\n raise SimTooLongException(max_n_steps)\n\n states[i] = self.state.copy()\n cur_time += dt\n\n return states if include_init_state else states[1:]\n\n\nclass LotkaVolterra(MarkovJumpProcess):\n \"\"\"\n The Lotka-Volterra implementation of the Markov Jump Process.\n \"\"\"\n\n def _calc_propensities(self):\n\n x, y = self.state\n xy = x * y\n return np.array([xy, x, y, xy])\n\n def _do_reaction(self, reaction):\n\n if reaction == 0:\n self.state[0] += 1\n\n elif reaction == 1:\n self.state[0] -= 1\n\n elif reaction == 2:\n self.state[1] += 1\n\n elif reaction == 3:\n self.state[1] -= 1\n\n else:\n raise ValueError('Unknown reaction.')\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FlorentF9/skstab
[ "7f5304d5012f0f1eb468a7670db95db6029097d7" ]
[ "example_modelorderselection.py" ]
[ "import numpy as np\nfrom skstab import ModelOrderSelection\nfrom skstab.datasets import load_dataset\nfrom sklearn.cluster import KMeans\nfrom sklearn.neighbors import KNeighborsClassifier\n\ndataset = 'exemples2_5g'\nX, y = load_dataset(dataset)\nprint('Dataset: {} (true number of clusters: K = {})'.format(dataset, len(np.unique(y))))\n\nalgorithm = KMeans\nkm_kwargs = {'init': 'k-means++', 'n_init': 10}\n\nk_values = list(range(2, 11))\nprint('Evaluated numbers of clusters:', k_values)\n\nstab = ModelOrderSelection(X, algorithm,\n param_name='n_clusters',\n param_values=k_values,\n classifier=KNeighborsClassifier,\n norm_samples=20,\n runs=20,\n algo_kwargs=km_kwargs,\n clf_kwargs={'n_neighbors': 1},\n n_jobs=-1)\n\nscore = stab.score()\nprint('Model order selection scores:\\n', score)\nk_hat = stab.select_param()[0]\nprint('Selected number of clusters: K =', k_hat)\n" ]
[ [ "numpy.unique" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fongchun/ProDy
[ "cd781b105e3f502d581ee184c9009264b6245bcd" ]
[ "prody/proteins/ciffile.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"This module defines functions for parsing `mmCIF files`_.\n\n.. _mmCIF files: http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html\"\"\"\n\n\nfrom collections import defaultdict\nimport os.path\n\n\nimport numpy as np\n\nfrom prody.atomic import AtomGroup\nfrom prody.atomic import flags\nfrom prody.atomic import ATOMIC_FIELDS\nfrom prody.utilities import openFile\nfrom prody import LOGGER, SETTINGS\n\nfrom .header import getHeaderDict, buildBiomolecules, assignSecstr\nfrom .localpdb import fetchPDB\n\n__all__ = ['parseCIFStream', 'parseCIF',]\n\nclass CIFParseError(Exception):\n pass\n\n_parseCIFdoc = \"\"\"\n :arg title: title of the :class:`.AtomGroup` instance, default is the\n PDB filename or PDB identifier\n :type title: str\n\n :arg chain: chain identifiers for parsing specific chains, e.g.\n ``chain='A'``, ``chain='B'``, ``chain='DE'``, by default all\n chains are parsed\n :type chain: str\n\n :arg subset: a predefined keyword to parse subset of atoms, valid keywords\n are ``'calpha'`` (``'ca'``), ``'backbone'`` (``'bb'``), or **None**\n (read all atoms), e.g. ``subset='bb'``\n :type subset: str\n\n :arg model: model index or None (read all models), e.g. ``model=10``\n :type model: int, list\n\n :arg altloc: if a location indicator is passed, such as ``'A'`` or ``'B'``,\n only indicated alternate locations will be parsed as the single\n coordinate set of the AtomGroup, if *altloc* is set **True** all\n alternate locations will be parsed and each will be appended as a\n distinct coordinate set, default is ``\"A\"``\n :type altloc: str\n \"\"\"\n\n_PDBSubsets = {'ca': 'ca', 'calpha': 'ca', 'bb': 'bb', 'backbone': 'bb'}\n\ndef parseCIF(pdb, **kwargs):\n \"\"\"Returns an :class:`.AtomGroup` and/or dictionary containing header data\n parsed from an mmCIF file. If not found, the mmCIF file will be downloaded\n from the PDB. It will be downloaded in uncompressed format regardless of\n the compressed keyword.\n\n This function extends :func:`.parseCIFStream`.\n\n See :ref:`parsecif` for a detailed usage example.\n\n :arg pdb: a PDB identifier or a filename\n If needed, mmCIF files are downloaded using :func:`.fetchPDB()` function.\n :type pdb: str\n \"\"\"\n title = kwargs.get('title', None)\n if not os.path.isfile(pdb):\n if len(pdb) == 4 and pdb.isalnum():\n if title is None:\n title = pdb\n kwargs['title'] = title\n\n if os.path.isfile(pdb + '.cif'):\n filename = pdb + '.cif'\n elif os.path.isfile(pdb + '.cif.gz'):\n filename = pdb + '.cif.gz'\n else:\n filename = fetchPDB(pdb, report=True, format='cif',compressed=False)\n if filename is None:\n raise IOError('mmCIF file for {0} could not be downloaded.'\n .format(pdb))\n pdb = filename\n else:\n raise IOError('{0} is not a valid filename or a valid PDB '\n 'identifier.'.format(pdb))\n if title is None:\n title, ext = os.path.splitext(os.path.split(pdb)[1])\n if ext == '.gz':\n title, ext = os.path.splitext(title)\n if len(title) == 7 and title.startswith('pdb'):\n title = title[3:]\n kwargs['title'] = title\n cif = openFile(pdb, 'rt')\n result = parseCIFStream(cif, **kwargs)\n cif.close()\n return result\n\ndef parseCIFStream(stream, **kwargs):\n \"\"\"Returns an :class:`.AtomGroup` and/or dictionary containing header data\n parsed from a stream of CIF lines.\n :arg stream: Anything that implements the method ``readlines``\n (e.g. :class:`file`, buffer, stdin)\"\"\"\n\n model = kwargs.get('model')\n subset = kwargs.get('subset')\n chain = kwargs.get('chain')\n altloc = kwargs.get('altloc', 'A')\n\n if model is not None:\n if isinstance(model, int):\n if model < 0:\n raise ValueError('model must be greater than 0')\n else:\n raise TypeError('model must be an integer, {0} is invalid'\n .format(str(model)))\n title_suffix = ''\n if subset:\n try:\n subset = _PDBSubsets[subset.lower()]\n except AttributeError:\n raise TypeError('subset must be a string')\n except KeyError:\n raise ValueError('{0} is not a valid subset'\n .format(repr(subset)))\n title_suffix = '_' + subset\n if chain is not None:\n if not isinstance(chain, str):\n raise TypeError('chain must be a string')\n elif len(chain) == 0:\n raise ValueError('chain must not be an empty string')\n title_suffix = '_' + chain + title_suffix\n\n ag = None\n if 'ag' in kwargs:\n ag = kwargs['ag']\n if not isinstance(ag, AtomGroup):\n raise TypeError('ag must be an AtomGroup instance')\n n_csets = ag.numCoordsets()\n elif model != 0:\n ag = AtomGroup(str(kwargs.get('title', 'Unknown')) + title_suffix)\n n_csets = 0\n\n if model != 0:\n LOGGER.timeit()\n try:\n lines = stream.readlines()\n except AttributeError as err:\n try:\n lines = stream.read().split('\\n')\n except AttributeError:\n raise err\n if not len(lines):\n raise ValueError('empty PDB file or stream')\n ag = _parseCIFLines(ag, lines, model, chain, subset, altloc)\n if ag.numAtoms() > 0:\n LOGGER.report('{0} atoms and {1} coordinate set(s) were '\n 'parsed in %.2fs.'.format(ag.numAtoms(),\n ag.numCoordsets() - n_csets))\n else:\n ag = None\n LOGGER.warn('Atomic data could not be parsed, please '\n 'check the input file.')\n return ag\n\nparseCIFStream.__doc__ += _parseCIFdoc\n\ndef _parseCIFLines(atomgroup, lines, model, chain, subset,\n altloc_torf):\n \"\"\"Returns an AtomGroup. See also :func:`.parsePDBStream()`.\n\n :arg lines: CIF lines\n \"\"\"\n\n if subset is not None:\n if subset == 'ca':\n subset = set(('CA',))\n elif subset in 'bb':\n subset = flags.BACKBONE\n protein_resnames = flags.AMINOACIDS\n\n asize = 0\n i = 0\n models = []\n nModels = 0\n fields = {}\n fieldCounter = -1\n foundModelNumFieldID = False\n foundAtomBlock = False\n doneAtomBlock = False\n while not doneAtomBlock:\n line = lines[i]\n if line[:11] == '_atom_site.':\n fieldCounter += 1\n fields[line.split('.')[1].strip()] = fieldCounter\n\n if line.startswith('ATOM') or line.startswith('HETATM'):\n if not foundAtomBlock:\n foundAtomBlock = True\n start = i\n models.append(line.split()[fields['pdbx_PDB_model_num']])\n if models[asize] != models[asize-1]:\n nModels += 1\n asize += 1\n else:\n if foundAtomBlock:\n doneAtomBlock = True\n i += 1\n stop = i-1\n if nModels == 0: nModels = 1\n\n if model is not None and model != 1:\n for i in range(start, stop):\n if str(models[i]) != model and str(models[i+1]) == model:\n start = i+1\n if str(models[i]) == model and str(models[i+1]) != model:\n stop = i+1\n break\n if not str(model) in models:\n raise CIFParseError('model {0} is not found'.format(model))\n\n addcoords = False\n if atomgroup.numCoordsets() > 0:\n addcoords = True\n \n if isinstance(altloc_torf, str):\n if altloc_torf.strip() != 'A':\n LOGGER.info('Parsing alternate locations {0}.'\n .format(altloc_torf))\n which_altlocs = '.' + ''.join(altloc_torf.split())\n else:\n which_altlocs = '.A'\n altloc_torf = False\n else:\n which_altlocs = '.A'\n altloc_torf = True\n\n coordinates = np.zeros((asize, 3), dtype=float)\n atomnames = np.zeros(asize, dtype=ATOMIC_FIELDS['name'].dtype)\n resnames = np.zeros(asize, dtype=ATOMIC_FIELDS['resname'].dtype)\n resnums = np.zeros(asize, dtype=ATOMIC_FIELDS['resnum'].dtype)\n chainids = np.zeros(asize, dtype=ATOMIC_FIELDS['chain'].dtype)\n hetero = np.zeros(asize, dtype=bool)\n termini = np.zeros(asize, dtype=bool)\n altlocs = np.zeros(asize, dtype=ATOMIC_FIELDS['altloc'].dtype)\n icodes = np.zeros(asize, dtype=ATOMIC_FIELDS['icode'].dtype)\n serials = np.zeros(asize, dtype=ATOMIC_FIELDS['serial'].dtype)\n elements = np.zeros(asize, dtype=ATOMIC_FIELDS['element'].dtype)\n bfactors = np.zeros(asize, dtype=ATOMIC_FIELDS['beta'].dtype)\n occupancies = np.zeros(asize, dtype=ATOMIC_FIELDS['occupancy'].dtype)\n\n n_atoms = atomgroup.numAtoms()\n if n_atoms > 0:\n asize = n_atoms\n\n acount = 0\n for line in lines[start:stop]:\n startswith = line.split()[fields['group_PDB']]\n\n atomname = line.split()[fields['auth_atom_id']]\n resname = line.split()[fields['auth_comp_id']]\n\n if subset is not None:\n if not (atomname in subset and resname in protein_resnames):\n continue\n\n chID = line.split()[fields['auth_asym_id']]\n if chain is not None:\n if not chID in chain:\n continue\n\n alt = line.split()[fields['label_alt_id']]\n if alt not in which_altlocs:\n continue\n\n if model is not None:\n if int(models[acount]) < model:\n continue\n elif int(models[acount]) > model:\n break\n\n coordinates[acount] = [line.split()[fields['Cartn_x']], \\\n line.split()[fields['Cartn_y']], \\\n line.split()[fields['Cartn_z']]]\n atomnames[acount] = atomname\n resnames[acount] = resname\n resnums[acount] = line.split()[fields['auth_seq_id']]\n chainids[acount] = chID\n hetero[acount] = startswith == 'HETATM' # True or False\n if chainids[acount] != chainids[acount-1]: termini[acount] = True\n altlocs[acount] = alt\n icodes[acount] = line.split()[fields['pdbx_PDB_ins_code']]\n if icodes[acount] == '?': icodes[acount] = ''\n serials[acount] = line.split()[fields['id']]\n elements[acount] = line.split()[fields['type_symbol']]\n bfactors[acount] = line.split()[fields['B_iso_or_equiv']]\n occupancies[acount] = line.split()[fields['occupancy']]\n \n acount += 1\n\n if model is not None:\n nModels = 1\n\n modelSize = acount//nModels\n\n if addcoords:\n atomgroup.addCoordset(coordinates[:modelSize])\n else:\n atomgroup._setCoords(coordinates[:modelSize])\n\n atomgroup.setNames(atomnames[:modelSize])\n atomgroup.setResnames(resnames[:modelSize])\n atomgroup.setResnums(resnums[:modelSize])\n atomgroup.setChids(chainids[:modelSize])\n atomgroup.setFlags('hetatm', hetero[:modelSize])\n atomgroup.setFlags('pdbter', termini[:modelSize])\n atomgroup.setAltlocs(altlocs[:modelSize])\n atomgroup.setIcodes(icodes[:modelSize])\n atomgroup.setSerials(serials[:modelSize])\n\n atomgroup.setElements(elements[:modelSize])\n from prody.utilities.misctools import getMasses\n atomgroup.setMasses(getMasses(elements[:modelSize]))\n atomgroup.setBetas(bfactors[:modelSize])\n atomgroup.setOccupancies(occupancies[:modelSize])\n\n for n in range(1,nModels):\n atomgroup.addCoordset(coordinates[n*modelSize:(n+1)*modelSize])\n\n return atomgroup\n\n\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
john-ramsey/darts
[ "8fa7dbfa48577587ba247244b381c48d01153a70" ]
[ "darts/models/forecasting/random_forest.py" ]
[ "\"\"\"\nRandom Forest\n-------------\n\nA forecasting model using a random forest regression. It uses some of the target series' lags, as well as optionally\nsome covariate series' lags in order to obtain a forecast.\n\nSee [1]_ for a reference around random forests.\n\nThe implementations is wrapped around `RandomForestRegressor\n<https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html#sklearn.ensemble.RandomForestRegressor>`_.\n\nReferences\n----------\n.. [1] https://en.wikipedia.org/wiki/Random_forest\n\"\"\"\nfrom darts.logging import get_logger\nfrom typing import Optional, Union, Tuple, List\nfrom darts.models.forecasting.regression_model import RegressionModel\nfrom sklearn.ensemble import RandomForestRegressor\n\nlogger = get_logger(__name__)\n\n\nclass RandomForest(RegressionModel):\n def __init__(self,\n lags: Union[int, list] = None,\n lags_past_covariates: Union[int, List[int]] = None,\n lags_future_covariates: Union[Tuple[int, int], List[int]] = None,\n n_estimators: Optional[int] = 100,\n max_depth: Optional[int] = None,\n **kwargs):\n \"\"\"Random Forest Model\n\n Parameters\n ----------\n lags\n Lagged target values used to predict the next time step. If an integer is given the last `lags` past lags\n are used (from -1 backward). Otherwise a list of integers with lags is required (each lag must be < 0).\n lags_past_covariates\n Number of lagged past_covariates values used to predict the next time step. If an integer is given the last\n `lags_past_covariates` past lags are used (inclusive, starting from lag -1). Otherwise a list of integers\n with lags < 0 is required.\n lags_future_covariates\n Number of lagged future_covariates values used to predict the next time step. If an tuple (past, future) is\n given the last `past` lags in the past are used (inclusive, starting from lag -1) along with the first\n `future` future lags (starting from 0 - the prediction time - up to `future - 1` included). Otherwise a list\n of integers with lags is required.\n n_estimators : int\n The number of trees in the forest.\n max_depth : int\n The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all\n leaves contain less than min_samples_split samples.\n **kwargs\n Additional keyword arguments passed to `sklearn.ensemble.RandomForest`.\n \"\"\"\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n self.kwargs = kwargs\n self.kwargs[\"n_estimators\"] = self.n_estimators\n self.kwargs[\"max_depth\"] = self.max_depth\n\n super().__init__(\n lags=lags,\n lags_past_covariates=lags_past_covariates,\n lags_future_covariates=lags_future_covariates,\n model=RandomForestRegressor(**kwargs)\n )\n\n def __str__(self):\n return (f\"RandomForest(lags={self.lags}, lags_past_covariates={self.lags_past_covariates}, \"\n f\"lags_historical_covariates={self.lags_historical_covariates}, \"\n f\"lags_future_covariates={self.lags_future_covariates}, \"\n f\"n_estimators={self.n_estimators}, max_depth={self.max_depth}\")\n" ]
[ [ "sklearn.ensemble.RandomForestRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jphdotam/EAE-ECG-autoencoder
[ "9e556818f9a6ad80254f86a1ba22c7c0513b06cc" ]
[ "lib/training.py" ]
[ "import os\nimport torch\nfrom collections import deque\n\n\nclass Am:\n \"\"\"Simple average meter which stores progress as a running average\"\"\"\n\n def __init__(self, n_for_running_average=100): # n is in samples not batches\n self.n_for_running_average = n_for_running_average\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.running = deque(maxlen=self.n_for_running_average)\n self.running_average = -1\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.running.extend([val] * n)\n self.count += n\n self.avg = self.sum / self.count\n self.running_average = sum(self.running) / len(self.running)\n\ndef cycle(train_or_test, model, dataloader, epoch, criterion, optimizer, cfg, scheduler=None, writer=None):\n log_freq = cfg['output']['log_freq']\n device = cfg['training']['device']\n meter_loss = Am()\n\n model = model.to(device)\n\n if train_or_test == 'train':\n model.train()\n training = True\n elif train_or_test == 'test':\n model.eval()\n training = False\n else:\n raise ValueError(f\"train_or_test must be 'train', or 'test', not {train_or_test}\")\n\n for i_batch, (x, y_true, _info_dict) in enumerate(dataloader):\n x = x.to(device, non_blocking=True)\n y_true = y_true.to(device, non_blocking=True)\n optimizer.zero_grad()\n\n # Forward pass\n if training:\n y_pred = model(x)\n loss = criterion(y_pred, y_true)\n else:\n with torch.no_grad():\n y_pred = model(x)\n loss = criterion(y_pred, y_true)\n\n # Backward pass\n if training:\n loss.backward()\n optimizer.step()\n if scheduler:\n scheduler.step()\n\n meter_loss.update(loss, x.size(0))\n\n # Loss intra-epoch printing\n if (i_batch+1) % log_freq == 0:\n print(f\"{train_or_test.upper(): >5} [{i_batch+1:04d}/{len(dataloader):04d}] \\t\\tLOSS: {meter_loss.running_average:.5f}\")\n\n if train_or_test == 'train':\n i_iter = ((epoch - 1) * len(dataloader)) + i_batch+1\n writer.add_scalar(f\"LossIter/{train_or_test}\", meter_loss.running_average, i_iter + 1)\n\n loss = float(meter_loss.avg.detach().cpu().numpy())\n\n print(f\"{train_or_test.upper(): >5} Complete!\\t\\t\\tLOSS: {meter_loss.avg:.5f}\")\n\n if writer:\n writer.add_scalar(f\"LossEpoch/{train_or_test}\", loss, epoch)\n\n return loss\n\ndef save_state(state, filename, test_metric, best_metric, cfg, last_save_path, lowest_best=True):\n save = cfg['output']['save']\n save_path = os.path.join(cfg['output']['model_dir'], cfg['experiment_id'], filename)\n if save == 'all':\n torch.save(state, save_path)\n elif (test_metric < best_metric) == lowest_best:\n print(f\"{test_metric:.5f} better than {best_metric:.5f} -> SAVING\")\n if save == 'best': # Delete previous best if using best only; otherwise keep previous best\n if last_save_path:\n try:\n os.remove(last_save_path)\n except FileNotFoundError:\n print(f\"Failed to find {last_save_path}\")\n best_metric = test_metric\n torch.save(state, save_path)\n last_save_path = save_path\n else:\n print(f\"{test_metric:.5g} not improved from {best_metric:.5f}\")\n return best_metric, last_save_path" ]
[ [ "torch.no_grad", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sulfurheron/py-motmetrics
[ "cda11f1a4e5feb6716dd906665bbbe08fce384f5" ]
[ "motmetrics/io.py" ]
[ "# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.\n# https://github.com/cheind/py-motmetrics/\n#\n# MIT License\n# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.\n# See LICENSE file for terms.\n\n\"\"\"Functions for loading data and writing summaries.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom enum import Enum\nimport io\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io\nimport xmltodict\n\n\nclass Format(Enum):\n \"\"\"Enumerates supported file formats.\"\"\"\n\n MOT16 = 'mot16'\n \"\"\"Milan, Anton, et al. \"Mot16: A benchmark for multi-object tracking.\" arXiv preprint arXiv:1603.00831 (2016).\"\"\"\n\n MOT15_2D = 'mot15-2D'\n \"\"\"Leal-Taixe, Laura, et al. \"MOTChallenge 2015: Towards a benchmark for multi-target tracking.\" arXiv preprint arXiv:1504.01942 (2015).\"\"\"\n\n VATIC_TXT = 'vatic-txt'\n \"\"\"Vondrick, Carl, Donald Patterson, and Deva Ramanan. \"Efficiently scaling up crowdsourced video annotation.\" International Journal of Computer Vision 101.1 (2013): 184-204.\n https://github.com/cvondrick/vatic\n \"\"\"\n\n DETRAC_MAT = 'detrac-mat'\n \"\"\"Wen, Longyin et al. \"UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking.\" arXiv preprint arXiv:arXiv:1511.04136 (2016).\n http://detrac-db.rit.albany.edu/download\n \"\"\"\n\n DETRAC_XML = 'detrac-xml'\n \"\"\"Wen, Longyin et al. \"UA-DETRAC: A New Benchmark and Protocol for Multi-Object Detection and Tracking.\" arXiv preprint arXiv:arXiv:1511.04136 (2016).\n http://detrac-db.rit.albany.edu/download\n \"\"\"\n\n\ndef load_motchallenge(fname, **kwargs):\n r\"\"\"Load MOT challenge data.\n\n Params\n ------\n fname : str\n Filename to load data from\n\n Kwargs\n ------\n sep : str\n Allowed field separators, defaults to '\\s+|\\t+|,'\n min_confidence : float\n Rows with confidence less than this threshold are removed.\n Defaults to -1. You should set this to 1 when loading\n ground truth MOTChallenge data, so that invalid rectangles in\n the ground truth are not considered during matching.\n\n Returns\n ------\n df : pandas.DataFrame\n The returned dataframe has the following columns\n 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'\n The dataframe is indexed by ('FrameId', 'Id')\n \"\"\"\n\n sep = kwargs.pop('sep', r'\\s+|\\t+|,')\n min_confidence = kwargs.pop('min_confidence', -1)\n df = pd.read_csv(\n fname,\n sep=sep,\n index_col=[0, 1],\n skipinitialspace=True,\n header=None,\n names=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'],\n engine='python'\n )\n\n # Account for matlab convention.\n df[['X', 'Y']] -= (1, 1)\n\n # Removed trailing column\n del df['unused']\n\n # Remove all rows without sufficient confidence\n return df[df['Confidence'] >= min_confidence]\n\n\ndef load_vatictxt(fname, **kwargs):\n \"\"\"Load Vatic text format.\n\n Loads the vatic CSV text having the following columns per row\n\n 0 Track ID. All rows with the same ID belong to the same path.\n 1 xmin. The top left x-coordinate of the bounding box.\n 2 ymin. The top left y-coordinate of the bounding box.\n 3 xmax. The bottom right x-coordinate of the bounding box.\n 4 ymax. The bottom right y-coordinate of the bounding box.\n 5 frame. The frame that this annotation represents.\n 6 lost. If 1, the annotation is outside of the view screen.\n 7 occluded. If 1, the annotation is occluded.\n 8 generated. If 1, the annotation was automatically interpolated.\n 9 label. The label for this annotation, enclosed in quotation marks.\n 10+ attributes. Each column after this is an attribute set in the current frame\n\n Params\n ------\n fname : str\n Filename to load data from\n\n Returns\n ------\n df : pandas.DataFrame\n The returned dataframe has the following columns\n 'X', 'Y', 'Width', 'Height', 'Lost', 'Occluded', 'Generated', 'ClassId', '<Attr1>', '<Attr2>', ...\n where <Attr1> is placeholder for the actual attribute name capitalized (first letter). The order of attribute\n columns is sorted in attribute name. The dataframe is indexed by ('FrameId', 'Id')\n \"\"\"\n # pylint: disable=too-many-locals\n\n sep = kwargs.pop('sep', ' ')\n\n with io.open(fname) as f:\n # First time going over file, we collect the set of all variable activities\n activities = set()\n for line in f:\n for c in line.rstrip().split(sep)[10:]:\n activities.add(c)\n activitylist = sorted(list(activities))\n\n # Second time we construct artificial binary columns for each activity\n data = []\n f.seek(0)\n for line in f:\n fields = line.rstrip().split()\n attrs = ['0'] * len(activitylist)\n for a in fields[10:]:\n attrs[activitylist.index(a)] = '1'\n fields = fields[:10]\n fields.extend(attrs)\n data.append(' '.join(fields))\n\n strdata = '\\n'.join(data)\n\n dtype = {\n 'Id': np.int64,\n 'X': np.float32,\n 'Y': np.float32,\n 'Width': np.float32,\n 'Height': np.float32,\n 'FrameId': np.int64,\n 'Lost': bool,\n 'Occluded': bool,\n 'Generated': bool,\n 'ClassId': str,\n }\n\n # Remove quotes from activities\n activitylist = [a.replace('\\\"', '').capitalize() for a in activitylist]\n\n # Add dtypes for activities\n for a in activitylist:\n dtype[a] = bool\n\n # Read from CSV\n names = ['Id', 'X', 'Y', 'Width', 'Height', 'FrameId', 'Lost', 'Occluded', 'Generated', 'ClassId']\n names.extend(activitylist)\n df = pd.read_csv(io.StringIO(strdata), names=names, index_col=['FrameId', 'Id'], header=None, sep=' ')\n\n # Correct Width and Height which are actually XMax, Ymax in files.\n w = df['Width'] - df['X']\n h = df['Height'] - df['Y']\n df['Width'] = w\n df['Height'] = h\n\n return df\n\n\ndef load_detrac_mat(fname):\n \"\"\"Loads UA-DETRAC annotations data from mat files\n\n Competition Site: http://detrac-db.rit.albany.edu/download\n\n File contains a nested structure of 2d arrays for indexed by frame id\n and Object ID. Separate arrays for top, left, width and height are given.\n\n Params\n ------\n fname : str\n Filename to load data from\n\n Kwargs\n ------\n Currently none of these arguments used.\n\n Returns\n ------\n df : pandas.DataFrame\n The returned dataframe has the following columns\n 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'\n The dataframe is indexed by ('FrameId', 'Id')\n \"\"\"\n\n matData = scipy.io.loadmat(fname)\n\n frameList = matData['gtInfo'][0][0][4][0]\n leftArray = matData['gtInfo'][0][0][0]\n topArray = matData['gtInfo'][0][0][1]\n widthArray = matData['gtInfo'][0][0][3]\n heightArray = matData['gtInfo'][0][0][2]\n\n parsedGT = []\n for f in frameList:\n ids = [i + 1 for i, v in enumerate(leftArray[f - 1]) if v > 0]\n for i in ids:\n row = []\n row.append(f)\n row.append(i)\n row.append(leftArray[f - 1, i - 1] - widthArray[f - 1, i - 1] / 2)\n row.append(topArray[f - 1, i - 1] - heightArray[f - 1, i - 1])\n row.append(widthArray[f - 1, i - 1])\n row.append(heightArray[f - 1, i - 1])\n row.append(1)\n row.append(-1)\n row.append(-1)\n row.append(-1)\n parsedGT.append(row)\n\n df = pd.DataFrame(parsedGT,\n columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])\n df.set_index(['FrameId', 'Id'], inplace=True)\n\n # Account for matlab convention.\n df[['X', 'Y']] -= (1, 1)\n\n # Removed trailing column\n del df['unused']\n\n return df\n\n\ndef load_detrac_xml(fname):\n \"\"\"Loads UA-DETRAC annotations data from xml files\n\n Competition Site: http://detrac-db.rit.albany.edu/download\n\n Params\n ------\n fname : str\n Filename to load data from\n\n Kwargs\n ------\n Currently none of these arguments used.\n\n Returns\n ------\n df : pandas.DataFrame\n The returned dataframe has the following columns\n 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility'\n The dataframe is indexed by ('FrameId', 'Id')\n \"\"\"\n\n with io.open(fname) as fd:\n doc = xmltodict.parse(fd.read())\n frameList = doc['sequence']['frame']\n\n parsedGT = []\n for f in frameList:\n fid = int(f['@num'])\n targetList = f['target_list']['target']\n if not isinstance(targetList, list):\n targetList = [targetList]\n\n for t in targetList:\n row = []\n row.append(fid)\n row.append(int(t['@id']))\n row.append(float(t['box']['@left']))\n row.append(float(t['box']['@top']))\n row.append(float(t['box']['@width']))\n row.append(float(t['box']['@height']))\n row.append(1)\n row.append(-1)\n row.append(-1)\n row.append(-1)\n parsedGT.append(row)\n\n df = pd.DataFrame(parsedGT,\n columns=['FrameId', 'Id', 'X', 'Y', 'Width', 'Height', 'Confidence', 'ClassId', 'Visibility', 'unused'])\n df.set_index(['FrameId', 'Id'], inplace=True)\n\n # Account for matlab convention.\n df[['X', 'Y']] -= (1, 1)\n\n # Removed trailing column\n del df['unused']\n\n return df\n\n\ndef loadtxt(fname, fmt=Format.MOT15_2D, **kwargs):\n \"\"\"Load data from any known format.\"\"\"\n fmt = Format(fmt)\n\n switcher = {\n Format.MOT16: load_motchallenge,\n Format.MOT15_2D: load_motchallenge,\n Format.VATIC_TXT: load_vatictxt,\n Format.DETRAC_MAT: load_detrac_mat,\n Format.DETRAC_XML: load_detrac_xml\n }\n func = switcher.get(fmt)\n return func(fname, **kwargs)\n\n\ndef render_summary(summary, formatters=None, namemap=None, buf=None):\n \"\"\"Render metrics summary to console friendly tabular output.\n\n Params\n ------\n summary : pd.DataFrame\n Dataframe containing summaries in rows.\n\n Kwargs\n ------\n buf : StringIO-like, optional\n Buffer to write to\n formatters : dict, optional\n Dicionary defining custom formatters for individual metrics.\n I.e `{'mota': '{:.2%}'.format}`. You can get preset formatters\n from MetricsHost.formatters\n namemap : dict, optional\n Dictionary defining new metric names for display. I.e\n `{'num_false_positives': 'FP'}`.\n\n Returns\n -------\n string\n Formatted string\n \"\"\"\n\n if namemap is not None:\n summary = summary.rename(columns=namemap)\n if formatters is not None:\n formatters = {namemap.get(c, c): f for c, f in formatters.items()}\n\n output = summary.to_string(\n buf=buf,\n formatters=formatters,\n )\n\n return output\n\n\nmotchallenge_metric_names = {\n 'idf1': 'IDF1',\n 'idp': 'IDP',\n 'idr': 'IDR',\n 'recall': 'Rcll',\n 'precision': 'Prcn',\n 'num_unique_objects': 'GT',\n 'mostly_tracked': 'MT',\n 'partially_tracked': 'PT',\n 'mostly_lost': 'ML',\n 'num_false_positives': 'FP',\n 'num_misses': 'FN',\n 'num_switches': 'IDs',\n 'num_fragmentations': 'FM',\n 'mota': 'MOTA',\n 'motp': 'MOTP',\n 'num_transfer': 'IDt',\n 'num_ascend': 'IDa',\n 'num_migrate': 'IDm',\n 'D': \"Dist\",\n 'inverseD': \"inverseD\",\n}\n\"\"\"A list mappings for metric names to comply with MOTChallenge.\"\"\"\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
thekylesaurus/yellowbrick
[ "7447db6ccc30aea9f38d063162eab55e485dfa9d" ]
[ "tests/test_features/test_scatter.py" ]
[ "# tests.test_features.test_scatter\n# Test the ScatterViz feature analysis visualizers\n#\n# Author: Nathan Danielsen <[email protected]>\n# Created: Fri Feb 26 19:40:00 2017 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: test_scatter.py [fc94ec4] [email protected] $\n\"\"\"\nTest the ScatterViz feature analysis visualizers\n\"\"\"\n\n##########################################################################\n# Imports\n##########################################################################\n\nimport six\nimport pytest\nimport unittest\nimport numpy as np\nimport matplotlib as mptl\n\nfrom yellowbrick.features.scatter import *\nfrom yellowbrick.exceptions import YellowbrickValueError\nfrom yellowbrick.style import palettes\n\nfrom tests.dataset import DatasetMixin\nfrom tests.base import VisualTestCase\nfrom yellowbrick.exceptions import ImageComparisonFailure\n\ntry:\n import pandas\nexcept ImportError:\n pandas = None\n\n##########################################################################\n# ScatterViz Base Tests\n##########################################################################\n\[email protected]('ignore')\nclass ScatterVizTests(VisualTestCase, DatasetMixin):\n\n # yapf: disable\n X = np.array([\n [2.318, 2.727, 4.260, 7.212, 4.792],\n [2.315, 2.726, 4.295, 7.140, 4.783, ],\n [2.315, 2.724, 4.260, 7.135, 4.779, ],\n [2.110, 3.609, 4.330, 7.985, 5.595, ],\n [2.110, 3.626, 4.330, 8.203, 5.621, ],\n [2.110, 3.620, 4.470, 8.210, 5.612, ]\n ])\n # yapf: enable\n y = np.array([1, 0, 1, 0, 1, 0])\n\n def setUp(self):\n self.occupancy = self.load_data('occupancy')\n super(ScatterVizTests, self).setUp()\n\n def tearDown(self):\n self.occupancy = None\n super(ScatterVizTests, self).tearDown()\n\n def test_init_alias(self):\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterVisualizer(features=features, markers=['*'])\n self.assertIsNotNone(visualizer.markers)\n\n def test_deprecated(self):\n with pytest.deprecated_call():\n features = [\"temperature\", \"relative_humidity\"]\n ScatterViz(features=features)\n\n @pytest.mark.skipif(six.PY2, reason=\"deprecation warnings filtered in PY2\")\n def test_deprecated_message(self):\n with pytest.warns(DeprecationWarning, match='Will be moved to yellowbrick.contrib in v0.8'):\n features = [\"temperature\", \"relative_humidity\"]\n ScatterViz(features=features)\n\n def test_scatter(self):\n \"\"\"\n Assert no errors occur during scatter visualizer integration\n \"\"\"\n X_two_cols = self.X[:, :2]\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features)\n visualizer.fit_transform(X_two_cols, self.y)\n\n def test_color_builds(self):\n \"\"\"\n Assert no errors occur during scatter visualizer integration\n \"\"\"\n colors = palettes.PALETTES['pastel']\n X_two_cols = self.X[:, :2]\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features, color=colors)\n visualizer.fit_transform(X_two_cols, self.y)\n\n def test_scatter_no_features(self):\n \"\"\"\n Assert no errors during scatter visualizer integration - no features\n \"\"\"\n X_two_cols = self.X[:, :2]\n visualizer = ScatterViz()\n visualizer.fit_transform_poof(X_two_cols, self.y)\n self.assertEquals(visualizer.features_, ['Feature One', 'Feature Two'])\n\n def test_scatter_only_two_features_allowed_init(self):\n \"\"\"\n Assert that only two features are allowed for scatter visualizer init\n \"\"\"\n features = [\"temperature\", \"relative_humidity\", \"light\"]\n\n with self.assertRaises(YellowbrickValueError):\n ScatterViz(features=features)\n\n def test_scatter_xy_and_features_raise_error(self):\n \"\"\"\n Assert that x,y and features will raise scatterviz error\n \"\"\"\n features = [\"temperature\", \"relative_humidity\", \"light\"]\n\n with self.assertRaises(YellowbrickValueError):\n ScatterViz(features=features, x='one', y='two')\n\n def test_scatter_xy_changes_to_features(self):\n \"\"\"\n Assert that x,y with no features will not raise scatterviz error\n \"\"\"\n visualizer = ScatterViz(x='one', y='two')\n self.assertEquals(visualizer.features_, ['one', 'two'])\n\n def test_scatter_requires_two_features_in_numpy_matrix(self):\n \"\"\"\n Assert only two features allowed for scatter visualizer if not in init\n \"\"\"\n visualizer = ScatterViz()\n with self.assertRaises(YellowbrickValueError) as context:\n visualizer.fit_transform(self.X, self.y)\n self.assertTrue(\n 'only accepts two features' in str(context.exception))\n\n def test_integrated_scatter(self):\n \"\"\"\n Test scatter on the real, occupancy data set\n \"\"\"\n # Load the data from the fixture\n X = self.occupancy[[\n \"temperature\", \"relative_humidity\", \"light\", \"C02\", \"humidity\"\n ]]\n\n # Convert to numpy arrays\n X = X.copy().view((float, len(X.dtype.names)))\n y = self.occupancy['occupancy'].astype(int)\n\n # Test the visualizer\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features)\n visualizer.fit_transform_poof(X[:, :2], y)\n\n def test_scatter_quick_method(self):\n \"\"\"\n Test scatter quick method on the real, occupancy data set\n \"\"\"\n # Load the data from the fixture\n X = self.occupancy[[\n \"temperature\", \"relative_humidity\", \"light\", \"C02\", \"humidity\"\n ]]\n\n # Convert to numpy arrays\n X = X.copy().view((float, len(X.dtype.names)))\n y = self.occupancy['occupancy'].astype(int)\n\n # Test the visualizer\n features = [\"temperature\", \"relative_humidity\"]\n ax = scatterviz(X[:, :2], y=y, ax=None, features=features)\n\n # test that is returns a matplotlib obj with axes\n self.assertIsInstance(ax, mptl.axes.Axes)\n\n @unittest.skipUnless(pandas is not None,\n \"Pandas is not installed, could not run test.\")\n def test_integrated_scatter_with_pandas(self):\n \"\"\"\n Test scatterviz on the real, occupancy data set with pandas\n \"\"\"\n # Load the data from the fixture\n X = self.occupancy[[\n \"temperature\", \"relative_humidity\", \"light\", \"C02\", \"humidity\"\n ]]\n y = self.occupancy['occupancy'].astype(int)\n\n # Convert X to a pandas dataframe\n X = pandas.DataFrame(X)\n X.columns = [\n \"temperature\", \"relative_humidity\", \"light\", \"C02\", \"humidity\"\n ]\n\n # Test the visualizer\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features)\n visualizer.fit_transform_poof(X, y)\n\n def test_integrated_scatter_numpy_named_arrays(self):\n \"\"\"\n Test scatterviz on numpy named arrays\n \"\"\"\n dt = np.dtype({\n 'names': ['one', 'two', 'three', 'four', \"five\"],\n 'formats': [\n np.float64,\n np.float64,\n np.float64,\n np.float64,\n np.float64,\n ]\n })\n\n X_named = self.X.astype(dt, casting='unsafe')\n visualizer = ScatterViz(features=['one', 'two'])\n visualizer.fit_transform_poof(X_named, self.y)\n self.assertEquals(visualizer.features_, ['one', 'two'])\n\n\n def test_integrated_scatter_numpy_arrays_no_names(self):\n \"\"\"\n Test scaterviz on regular numpy arrays\n \"\"\"\n visualizer = ScatterViz(features=[1, 2])\n visualizer.fit_transform_poof(self.X, self.y)\n self.assertEquals(visualizer.features_, [1, 2])\n\n def test_scatter_image(self):\n \"\"\"\n Test the scatterviz image similarity\n \"\"\"\n # self.setUp_ImageTest()\n\n X_two_cols = self.X[:, :2]\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features)\n visualizer.fit(X_two_cols, self.y)\n visualizer.draw(X_two_cols, self.y)\n\n self.assert_images_similar(visualizer)\n\n\n def test_scatter_image_fail(self):\n \"\"\"\n Assert bad image similarity on scatterviz errors\n \"\"\"\n\n X_two_cols = self.X[:, :2]\n features = [\"temperature\", \"relative_humidity\"]\n visualizer = ScatterViz(features=features)\n visualizer.fit(X_two_cols, self.y)\n visualizer.draw(X_two_cols, self.y)\n\n with self.assertRaises(ImageComparisonFailure):\n self.assert_images_similar(visualizer)\n" ]
[ [ "numpy.dtype", "numpy.array", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
bbrauser/DS-Unit-3-Sprint-2-SQL-and-Databases
[ "a0fb261e31891184c2d57d9fb7d018d5896e672b" ]
[ "module4-acid-and-database-scalability-tradeoffs/mongo_titanic.py" ]
[ "import pandas as pd\nfrom sqlalchemy import create_engine\n\ndf = pd.read_csv('titanic.csv')\n\nengine = create_engine('postgres://kbgqljxh:[email protected]:5432/kbgqljxh')\ndf.to_sql('titanic_regress', con = engine)" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
hackerspace/hacked_cnc
[ "dcd70b4de18b525748ec45727ac7f37fcd8b2ac1" ]
[ "hc/ui/glitems/grid.py" ]
[ "import numpy as np\n\nimport OpenGL.GL as gl\nfrom PyQt5 import QtGui\n\nfrom hc.ui.glitems import displaylist, HCItem\n\n\n# based on pyqtgraphs's grid\nclass Grid(HCItem):\n \"\"\"\n Displays a wire-grame grid.\n \"\"\"\n\n def __init__(self, size=None, color=None, antialias=True, glOptions='translucent'):\n super(Grid, self).__init__()\n self.setGLOptions(glOptions)\n self.antialias = antialias\n if size is None:\n size = QtGui.QVector3D(20, 20, 1)\n self.setSize(size=size)\n self.setSpacing(1, 1, 1)\n\n def setSize(self, x=None, y=None, z=None, size=None):\n \"\"\"\n Set the size of the axes (in its local coordinate system; this does not affect the transform)\n Arguments can be x,y,z or size=QVector3D().\n \"\"\"\n if size is not None:\n x = size.x()\n y = size.y()\n z = size.z()\n self.__size = [x, y, z]\n self.update()\n\n def size(self):\n return self.__size[:]\n\n def setSpacing(self, x=None, y=None, z=None, spacing=None):\n \"\"\"\n Set the spacing between grid lines.\n Arguments can be x,y,z or spacing=QVector3D().\n \"\"\"\n if spacing is not None:\n x = spacing.x()\n y = spacing.y()\n z = spacing.z()\n self.__spacing = [x, y, z]\n self.update()\n\n def spacing(self):\n return self.__spacing[:]\n\n @displaylist\n def paint(self):\n self.setupGLState()\n\n if self.antialias:\n gl.glEnable(gl.GL_LINE_SMOOTH)\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)\n gl.glBegin(gl.GL_LINES)\n\n x, y, z = self.size()\n xs, ys, zs = self.spacing()\n xvals = np.arange(-x / 2., x / 2. + xs * 0.001, xs)\n yvals = np.arange(-y / 2., y / 2. + ys * 0.001, ys)\n gl.glColor4f(1, 0.5, 0.5, .3)\n for x in xvals:\n gl.glVertex3f(x, yvals[0], 0)\n gl.glVertex3f(x, yvals[-1], 0)\n for y in yvals:\n gl.glVertex3f(xvals[0], y, 0)\n gl.glVertex3f(xvals[-1], y, 0)\n\n gl.glEnd()\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ilyakava/ACGAN-PyTorch
[ "a2ac29d4d297e091a9c6a80281767f796b390be2" ]
[ "closest_faces_plot.py" ]
[ "from __future__ import absolute_import, division, print_function\nimport argparse\nimport os\nimport re\nfrom collections import defaultdict\nimport glob\nimport time\nimport pathlib\nimport imageio\nimport sys\nimport numpy as np\nimport fid\nimport imageio\nimport torch\nimport torchvision.utils as vutils\nimport torch.utils.data as utils\nimport visdom\nfrom torchvision import transforms\nfrom GAN_training.models import resnet, resnet_extra, resnet_48\nfrom classification.models.vgg_face_dag import vgg_face_dag\nfrom tqdm import tqdm\n\nimport data\n\nfrom sklearn.metrics import pairwise_distances\n\nimport matplotlib.pyplot as plt\n\nfrom mtcnn.mtcnn import MTCNN\nimport cv2\n\nimport pdb\n\nxp = '/scratch0/ilya/locDoc/ACGAN/experiments/'\n# xp = '/fs/vulcan-scratch/ilyak/locDoc/experiments/'\nfig_dir = '~/[email protected]/ramawks69/ACGAN-PyTorch/figs/'\n\n\ndef face_plots(mode, n_used_imgs):\n class optclass:\n workaround = True\n \n \n opt = optclass()\n optdict = {\n #'outf': '/scratch0/ilya/locDoc/ACGAN/experiments/yogesh_acgan_0p2',\n 'outf': xp+'marygan-stl-48-miyato-hyp-lrGp4-auxp4',\n 'netG': xp+'marygan-stl-48-miyato-hyp-lrGp4-auxp4/netG_iter_069999.pth',\n 'marygan': True,\n 'imageSize': 48,\n 'data_root': '/scratch0/ilya/locDoc/data/stl10',\n 'dataset': 'cifar',\n 'dev_batch_size': 100,\n 'size_labeled_data': 4000,\n 'train_batch_size': 128,\n 'train_batch_size_2': 100,\n 'cifar_fname': '/scratch0/ilya/locDoc/data/cifar10/fid_is_scores.npz',\n 'nz': 128,\n 'GAN_nz': 128,\n 'ngpu': 1,\n 'nc':3\n }\n for k, v in optdict.items():\n setattr(opt, k, v)\n\n if mode == 'vanilla':\n opt.outf = xp+'celeb_cpy/celeba_vanillagan'\n opt.netG = opt.outf+'/netG_iter_129999.pth' # 4.130121811844333\n opt.marygan = False\n opt.imageSize = 64\n elif mode == 'marygan':\n opt.outf = xp+'celeb_cpy/celeba5c_marygan'\n opt.netG = opt.outf+'/netG_iter_129999.pth' # 3.6509132644800673\n opt.marygan = True\n opt.imageSize = 64\n elif mode == 'acgan':\n opt.outf = xp+'celeb_cpy/celeba5c_acgan'\n opt.netG = opt.outf+'/netG_iter_119999.pth' # 5.074366134380284\n opt.marygan = False\n opt.imageSize = 64\n\n if opt.netG == '':\n netGfiles = glob.glob(os.path.join(opt.outf, 'netG_iter_*.pth'))\n netGfiles.sort(key = lambda s: int(s.split('_')[-1].split('.')[0]))\n opt.netG = netGfiles[-1]\n print(opt.netG)\n\n if opt.imageSize == 32:\n netG = resnet.Generator(opt)\n elif opt.imageSize == 64:\n netG = resnet_extra.Generator(opt)\n elif opt.imageSize == 48:\n netG = resnet_48.Generator(opt)\n netG.load_state_dict(torch.load(opt.netG))\n netG = netG.cuda()\n\n detector = MTCNN()\n min_conf = 0.99\n\n # gen images\n\n batch_size = opt.train_batch_size\n nz = opt.nz\n noise = torch.FloatTensor(opt.train_batch_size, nz)\n noise = noise.cuda()\n num_classes = 10\n klass_picked = None\n\n # create images\n n_gen_imgs = ((n_used_imgs // opt.train_batch_size) + 1) * opt.train_batch_size\n x = np.empty((n_gen_imgs,3,opt.imageSize,opt.imageSize), dtype=np.uint8)\n save_noise = np.empty((n_gen_imgs,128))\n # create a bunch of GAN images\n start = 0\n pbar = tqdm(total=n_gen_imgs)\n while not start == n_gen_imgs:\n \n #for l in tqdm(range((n_used_imgs // opt.train_batch_size) + 1),desc='Generating'):\n noise.data.resize_(batch_size, nz).normal_(0, 1)\n #label = np.random.randint(0, num_classes, batch_size)\n if klass_picked is None:\n label = np.random.randint(0, num_classes, batch_size)\n else:\n label = np.ones((batch_size,),dtype=int)*klass_picked\n noise_ = np.random.normal(0, 1, (batch_size, nz))\n if not opt.marygan:\n class_onehot = np.zeros((batch_size, num_classes))\n class_onehot[np.arange(batch_size), label] = 1\n noise_[np.arange(batch_size), :num_classes] = class_onehot[np.arange(batch_size)]\n noise_ = (torch.from_numpy(noise_))\n noise.data.copy_(noise_.view(batch_size, nz))\n fake = netG(noise).data.cpu().numpy()\n fake = np.floor((fake + 1) * 255/2.0).astype(np.uint8)\n \n class_filt = np.zeros(batch_size).astype(bool)\n for bi in range(fake.shape[0]):\n res = detector.detect_faces(np.moveaxis(fake[bi],0,-1))\n if res and res[0]['confidence'] > min_conf:\n class_filt[bi] = True\n \n fake_picked = fake[class_filt]\n end = min(start + fake_picked.shape[0], n_gen_imgs)\n \n x[start:end] = fake_picked[:(end-start)]\n start = end\n pbar.update(fake_picked.shape[0])\n pbar.close()\n\n\n\n device = torch.device(\"cuda:1\")\n compnet = vgg_face_dag(weights_path='/scratch0/ilya/locDownloads/vgg_face_dag.pth')\n compnet = compnet.to(device)\n compnet.eval()\n transform_test=transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((224,224)),\n transforms.Lambda(lambda img: np.moveaxis(np.array(img),-1,0) ),\n ])\n\n\n\n #net_in = np.empty((x.shape[0],3,32,32))\n net_in = np.empty((x.shape[0],) + (3,224,224))\n for i in tqdm(range(x.shape[0]),desc='Preprocess'):\n net_in[i] = transform_test(np.moveaxis(x[i],0,-1))\n\n my_dataset = utils.TensorDataset(torch.FloatTensor(net_in))\n my_dataloader = utils.DataLoader(my_dataset, batch_size=opt.train_batch_size, shuffle=False)\n\n\n\n #net_out = np.empty((x.shape[0], 602112))\n #net_out = np.empty((x.shape[0], 12288))\n #net_out = np.empty((x.shape[0], 1024)) # Densenet\n net_out = np.empty((x.shape[0], 2622)) # vgg-face\n for i, batch in enumerate(tqdm(my_dataloader,desc='Extract Feat')):\n start = i * opt.train_batch_size\n end = start + opt.train_batch_size\n batch_in = batch[0].to(device)\n batch_out = compnet(batch_in).detach().data.cpu()\n net_out[start:end] = batch_out\n\n mse_out = x.reshape(x.shape[0], np.prod(x.shape[1:]))\n \n\n D = pairwise_distances(net_out)\n deleted_flag = D.max()\n\n # remove the diagonal and lower triangle\n to_del = np.tril(np.ones((D.shape[0], D.shape[0]), dtype=int))\n D[to_del == 1] = deleted_flag\n\n dists = D.flatten()\n closest_N = 40\n idxs = np.argpartition(dists,closest_N)\n min_idxs = sorted(idxs[:closest_N], key=lambda i: dists[i])\n closest_idxs = [(idx // D.shape[0], idx % D.shape[0]) for idx in min_idxs]\n\n closest_imgs = np.zeros((closest_N * 2,)+x.shape[1:])\n used_idxs = []\n l = 0\n for (i,j) in closest_idxs:\n res1 = detector.detect_faces(np.moveaxis(x[i],0,-1))\n res2 = detector.detect_faces(np.moveaxis(x[j],0,-1))\n if (i not in used_idxs) and (j not in used_idxs) and res1 and res2:\n closest_imgs[2*l] = x[min(i,j)]\n closest_imgs[2*l + 1] = x[max(i,j)]\n used_idxs.append(i)\n used_idxs.append(j)\n l += 1\n\n # save top 10 pairs\n fake_grid = vutils.make_grid(torch.Tensor(closest_imgs[:20]), nrow=2, padding=0, normalize=True)\n fake_grid_img = np.moveaxis(fake_grid.data.cpu().numpy(),0,-1)\n return fake_grid_img\n\nif __name__ == '__main__':\n\n\n ngen = 2000\n for trial in range(3):\n imageio.imsave(fig_dir + 'vanilla_%i_trial_%i.png' % (ngen, trial),\n face_plots('vanilla', ngen))\n imageio.imsave(fig_dir + 'marygan_%i_trial_%i.png' % (ngen, trial),\n face_plots('marygan', ngen))\n imageio.imsave(fig_dir + 'acgan_%i_trial_%i.png' % (ngen, trial),\n face_plots('acgan', ngen))\n\n\n # ngen = 4000\n # for trial in range(3):\n # imageio.imsave(fig_dir + 'vanilla_%i_trial_%i.png' % (ngen, trial),\n # face_plots('vanilla', ngen))\n # imageio.imsave(fig_dir + 'marygan_%i_trial_%i.png' % (ngen, trial),\n # face_plots('marygan', ngen))\n # imageio.imsave(fig_dir + 'acgan_%i_trial_%i.png' % (ngen, trial),\n # face_plots('acgan', ngen))\n\n\n\n \n\n" ]
[ [ "sklearn.metrics.pairwise_distances", "torch.Tensor", "torch.load", "numpy.arange", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.ones", "numpy.random.normal", "torch.FloatTensor", "numpy.argpartition", "numpy.prod", "numpy.moveaxis", "torch.device", "numpy.floor", "numpy.array", "numpy.zeros", "numpy.empty", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ArthurAllshire/hack-day-2018
[ "8b694a3e7110825a219a837ca68e7d6aaa2fe021" ]
[ "github.py" ]
[ "import requests\nfrom datetime import datetime\nimport time\nimport math\nimport numpy as np\nfrom urllib.parse import urlparse, parse_qs\n\nclass GitHub:\n \"\"\"\n Class to fetch from GitHub REST api and return list of commits from a\n specific repo.\n \"\"\"\n API_URL = \"https://api.github.com\"\n HOUR = 3600 # sec\n INTERVAL_LENGTH = HOUR * 6\n API_TIME_STR = \"%Y-%m-%dT%H:%M:%S\"\n MAX_PAGES = 1000\n\n def __init__(self):\n keyfile = open('token.txt', 'r')\n self.token = keyfile.read().strip('\\n')\n keyfile.close()\n self.headers = {'Authorization': 'token %s' % self.token}\n # print(self.headers)\n\n def request(self, url):\n page = 1\n last = 1\n page_head = \"?page=\"\n add_head = \"&per_page=100\"\n response = []\n while not page > last and page <= self.MAX_PAGES:\n r = requests.get(url+page_head+str(page)+add_head,\n headers=self.headers)\n try:\n last_link = r.links['last']['url']\n except KeyError:\n response += r.json()\n break\n # print(last_link)\n # massive hack\n last = int(last_link.split('=')[1].split('&')[0])\n print(f'Last {last} Page {page}')\n page += 1\n response += r.json()\n return response\n\n def get_commits(self, repo_owner, repo_name):\n url = GitHub.API_URL + f\"/repos/{repo_owner}/{repo_name}/commits\"\n commits_list_raw = self.request(url)\n\n sec_list = []\n\n for i in commits_list_raw:\n date_i_string = i[\"commit\"][\"author\"][\"date\"].strip(\"Z\")\n date_i_time = datetime.strptime(date_i_string, self.API_TIME_STR)\n sec_i = time.mktime(date_i_time.timetuple())\n sec_list.append(sec_i)\n\n total_time = max(sec_list) - min(sec_list)\n interval_number = math.ceil(total_time / self.INTERVAL_LENGTH)\n start = min(sec_list)\n end = min(sec_list) + interval_number * self.INTERVAL_LENGTH\n # print(f\"Time Delta {end-start}\")\n\n bins = np.linspace(start, end, interval_number+1)\n bin_means = [(bins[i-1] + bins[i])/2\n for i in range(len(bins[1:]))]\n digitised = np.digitize(sec_list, bins)\n\n commit_counts = np.zeros(shape=(len(bin_means)))\n\n for bn in digitised:\n commit_counts[bn-1] = commit_counts[bn-1] + 1\n # time, commit_count, rate\n # rate is returned in bins/hour, as the eventual frequency will be\n # per hour\n return bin_means, commit_counts, self.INTERVAL_LENGTH/self.HOUR\n" ]
[ [ "numpy.digitize", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Sawato/gail-driver
[ "358040faa6591e0b0a24776e15778fe39e8eb1c4" ]
[ "tf_rllab/samplers/batch_sampler.py" ]
[ "from rllab.sampler.base import BaseSampler\nfrom rllab.sampler import parallel_sampler\nfrom rllab.sampler.stateful_pool import singleton_pool\nimport tensorflow as tf\n\n\ndef worker_init_tf(G):\n G.sess = tf.Session()\n G.sess.__enter__()\n\n\ndef worker_init_tf_vars(G):\n G.sess.run(tf.initialize_all_variables())\n\n\nclass BatchSampler(BaseSampler):\n def start_worker(self):\n if singleton_pool.n_parallel > 1:\n singleton_pool.run_each(worker_init_tf)\n parallel_sampler.populate_task(self.algo.env, self.algo.policy)\n if singleton_pool.n_parallel > 1:\n singleton_pool.run_each(worker_init_tf_vars)\n\n def shutdown_worker(self):\n parallel_sampler.terminate_task(scope=self.algo.scope)\n\n def obtain_samples(self, itr):\n cur_policy_params = self.algo.policy.get_param_values()\n paths = parallel_sampler.sample_paths(\n policy_params=cur_policy_params,\n env_params=None,\n max_samples=self.algo.batch_size,\n max_path_length=self.algo.max_path_length,\n scope=self.algo.scope,\n )\n if self.algo.whole_paths:\n return paths\n else:\n paths_truncated = parallel_sampler.truncate_paths(\n paths, self.algo.batch_size)\n return paths_truncated\n" ]
[ [ "tensorflow.initialize_all_variables", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
MoMe36/Q-Learning-PyTorch
[ "bc2ecb9df2d7b294d490eea653b691ef8ff4c680" ]
[ "ddqn.py" ]
[ "import torch \nimport torch.nn as nn \nimport torch.nn.functional as F \nimport torch.optim as optim \n\n\nfrom common import ReplayBuffer, Env \nimport numpy as np \nimport gym \nfrom collections import deque\nimport random \nimport copy\n\nimport matplotlib.pyplot as plt \nplt.style.use('ggplot')\n\n\nclass DQN(nn.Module): \n\n\tdef __init__(self, sizes, hidden = 32): \n\n\t\tnn.Module.__init__(self)\n\n\t\tself.sizes = sizes\n\t\tself.model = nn.Sequential(nn.Linear(sizes[0], hidden), nn.ReLU(), \n\t\t\t\t\t\t\t\t nn.Linear(hidden, hidden), nn.ReLU(), \n\t\t\t\t\t\t\t\t nn.Linear(hidden, sizes[1]))\n\n\t\t# for module in self.model: \n\t\t# \tif isinstance(module, nn.Linear): \n\t\t# \t\tmodule.weight.data.uniform_(-0.01,0.01)\n\t\t# \t\tmodule.bias.data.zero_()\n\n\tdef forward(self, x): \n\n\t\toutput = self.model(x)\n\t\treturn output\n\n\tdef act(self, x, eps): \n\n\t\tif np.random.random() < eps: \n\t\t\taction = np.random.randint(0,2)\n\t\telse: \n\t\t\tvals = self(x).detach()\n\t\t\taction = torch.max(vals, 1)[1].item()\n\n\t\treturn action \n\n\tdef sample_action(self): \n\n\t\treturn torch.randint(low = 0, high = self.sizes[1], size= [1]).long().item()\n\n\tdef compute_loss(self, memory, target, batch_size = 32): \n\n\t\tstates, actions, rewards, next_obs, done = memory.sample(batch_size) \n\n\t\tq_values = self(torch.tensor(states).float())\n\t\tselected_q_values = torch.gather(q_values, 1, torch.tensor(actions).long().reshape(-1,1))\n\n\t\trewards = torch.tensor(rewards).reshape(-1,1).float()\n\n\t\tnext_q_vals = self(torch.tensor(next_obs).float()) \n\t\tnext_q_state_val = target(torch.tensor(next_obs).float()) # USING TARGET TO PREDICT NEXT Q VALUES\n\t\tnext_q_vals = torch.gather(next_q_state_val, 1, torch.max(next_q_vals,1)[1].reshape(-1,1)).reshape(-1,1)\n\n\t\tmasks = torch.tensor(done).float().reshape(-1,1)\n\n\t\texpected = rewards + 0.99*next_q_vals*masks\n\t\tloss = torch.mean(torch.pow(q_values - expected.detach(),2)) #F.smooth_l1_loss(selected_q_values, expected)\n\n\t\treturn loss \n\n\ndef create_target(agent): \n\n\treturn copy.deepcopy(agent)\n\n\nenv = Env('CartPole-v0')\nagent = DQN(env.sizes) \ntarget_network = create_target(agent)\n\nadam = optim.Adam(agent.parameters(),1e-3)\n\nmemory = ReplayBuffer(1000)\n\nepochs = 20000\nbatch_size = 32\n\nmax_eps = 1. \nmin_eps = 0.01 \neps_decay = 8000 \n\neps = lambda max_eps, min_eps, eps_decay, epoch : min_eps + (max_eps - min_eps)*np.exp(-1.*epoch/eps_decay)\nrecap = []\n\nepisode_mean_reward = 0 \n\nfor episode in range(epochs): \n\n\ts = env.reset()\n\tepisode_reward = 0\n\tdone = False\n\t\n\twhile not done: \n\t\n\t\taction = agent.act(s, eps(max_eps, min_eps, eps_decay, episode))\n\n\t\tns, r, done, _ = env.step(action)\n\n\t\tmemory.observe_episode(s, action, r, ns, done)\n\n\t\t\n\t\ts = ns\n\t\tepisode_reward += r\n\n\t\tif done: \n\t\t\t\n\t\t\tepisode_mean_reward += episode_reward\n\t\t\tif(episode%100 == 0 and episode > 0): \n\t\t\t\tprint('Episode :{} Reward: {:.3f} Loss:{:.3f} Eps:{:.3f}'.format(episode, episode_mean_reward/100., loss.item(), eps(max_eps, min_eps, eps_decay, episode)))\n\t\t\t\tepisode_mean_reward = 0 \n\n\t\t\trecap.append(episode_reward)\n\t\t\tepisode_reward = 0 \n\n\n\t\t\t# TRAINING \n\t\t\tif(len(memory) > batch_size): \n\t\t\t\tloss = agent.compute_loss(memory, target_network, batch_size)\n\t\t\t\tadam.zero_grad()\n\t\t\t\tloss.backward()\n\t\t\t\tadam.step()\n\n\n\t\t\tif episode % 10 == 0: \n\t\t\t\ttarget_network = create_target(agent)\n\nplt.plot(recap)\n\nplt.show()\n\n\n\n\n\t\t\n\n\n\n\n\n" ]
[ [ "numpy.random.random", "torch.max", "torch.nn.Module.__init__", "torch.randint", "torch.tensor", "matplotlib.pyplot.plot", "torch.nn.Linear", "numpy.exp", "torch.nn.ReLU", "matplotlib.pyplot.show", "matplotlib.pyplot.style.use", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
odenio/beam
[ "d6d6f30723fbfb1a0472fe814b419232e06952a5" ]
[ "sdks/python/apache_beam/dataframe/frames_test.py" ]
[ "#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\nimport pandas as pd\nfrom parameterized import parameterized\n\nimport apache_beam as beam\nfrom apache_beam.dataframe import expressions\nfrom apache_beam.dataframe import frame_base\nfrom apache_beam.dataframe import frames # pylint: disable=unused-import\n\nPD_VERSION = tuple(map(int, pd.__version__.split('.')))\n\nGROUPBY_DF = pd.DataFrame({\n 'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)],\n 'foo': [None if i % 11 == 0 else i for i in range(100)],\n 'bar': [None if i % 7 == 0 else 99 - i for i in range(100)],\n 'baz': [None if i % 13 == 0 else i * 2 for i in range(100)],\n 'bool': [i % 17 == 0 for i in range(100)],\n 'str': [str(i) for i in range(100)],\n})\n\n\ndef _get_deferred_args(*args):\n return [\n frame_base.DeferredFrame.wrap(\n expressions.ConstantExpression(arg, arg[0:0])) for arg in args\n ]\n\n\nclass DeferredFrameTest(unittest.TestCase):\n def _run_error_test(\n self, func, *args, construction_time=True, distributed=True):\n \"\"\"Verify that func(*args) raises the same exception in pandas and in Beam.\n\n Note that by default this only checks for exceptions that the Beam DataFrame\n API raises during expression generation (i.e. construction time).\n Exceptions raised while the pipeline is executing are less helpful, but\n are sometimes unavoidable (e.g. data validation exceptions), to check for\n these exceptions use construction_time=False.\"\"\"\n deferred_args = _get_deferred_args(*args)\n\n # Get expected error\n try:\n expected = func(*args)\n except Exception as e:\n expected_error = e\n else:\n raise AssertionError(\n \"Expected an error, but executing with pandas successfully \"\n f\"returned:\\n{expected}\")\n\n # Get actual error\n if construction_time:\n try:\n _ = func(*deferred_args)._expr\n except Exception as e:\n actual = e\n else:\n raise AssertionError(\n f\"Expected an error:\\n{expected_error}\\nbut Beam successfully \"\n f\"generated an expression.\")\n else: # not construction_time\n # Check for an error raised during pipeline execution\n expr = func(*deferred_args)._expr\n session_type = (\n expressions.PartitioningSession\n if distributed else expressions.Session)\n try:\n result = session_type({}).evaluate(expr)\n except Exception as e:\n actual = e\n else:\n raise AssertionError(\n f\"Expected an error:\\n{expected_error}\\nbut Beam successfully \"\n f\"Computed the result:\\n{result}.\")\n\n # Verify\n if (not isinstance(actual, type(expected_error)) or\n not str(actual) == str(expected_error)):\n raise AssertionError(\n f'Expected {expected_error!r} to be raised, but got {actual!r}'\n ) from actual\n\n def _run_test(self, func, *args, distributed=True, nonparallel=False):\n \"\"\"Verify that func(*args) produces the same result in pandas and in Beam.\n\n Args:\n distributed (bool): Whether or not to use PartitioningSession to\n simulate parallel execution.\n nonparallel (bool): Whether or not this function contains a\n non-parallelizable operation. If True, the expression will be\n generated twice, once outside of an allow_non_parallel_operations\n block (to verify NonParallelOperation is raised), and again inside\n of an allow_non_parallel_operations block to actually generate an\n expression to verify.\"\"\"\n # Compute expected value\n expected = func(*args)\n\n # Compute actual value\n deferred_args = _get_deferred_args(*args)\n if nonparallel:\n # First run outside a nonparallel block to confirm this raises as expected\n with self.assertRaises(expressions.NonParallelOperation) as raised:\n func(*deferred_args)\n\n if raised.exception.msg.startswith(\n \"Encountered non-parallelizable form of\"):\n raise AssertionError(\n \"Default NonParallelOperation raised, please specify a reason in \"\n \"the Singleton() partitioning requirement for this operation.\"\n ) from raised.exception\n\n # Re-run in an allow non parallel block to get an expression to verify\n with beam.dataframe.allow_non_parallel_operations():\n expr = func(*deferred_args)._expr\n else:\n expr = func(*deferred_args)._expr\n\n # Compute the result of the generated expression\n session_type = (\n expressions.PartitioningSession if distributed else expressions.Session)\n\n actual = session_type({}).evaluate(expr)\n\n # Verify\n if isinstance(expected, pd.core.generic.NDFrame):\n if distributed:\n if expected.index.is_unique:\n expected = expected.sort_index()\n actual = actual.sort_index()\n else:\n expected = expected.sort_values(list(expected.columns))\n actual = actual.sort_values(list(actual.columns))\n\n if isinstance(expected, pd.Series):\n pd.testing.assert_series_equal(expected, actual)\n elif isinstance(expected, pd.DataFrame):\n pd.testing.assert_frame_equal(expected, actual)\n else:\n raise ValueError(\n f\"Expected value is a {type(expected)},\"\n \"not a Series or DataFrame.\")\n else:\n # Expectation is not a pandas object\n if isinstance(expected, float):\n cmp = lambda x: np.isclose(expected, x)\n else:\n cmp = expected.__eq__\n self.assertTrue(\n cmp(actual), 'Expected:\\n\\n%r\\n\\nActual:\\n\\n%r' % (expected, actual))\n\n def test_series_arithmetic(self):\n a = pd.Series([1, 2, 3])\n b = pd.Series([100, 200, 300])\n self._run_test(lambda a, b: a - 2 * b, a, b)\n\n def test_get_column(self):\n df = pd.DataFrame({\n 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n 'Speed': [380., 370., 24., 26.]\n })\n self._run_test(lambda df: df['Animal'], df)\n self._run_test(lambda df: df.Speed, df)\n\n def test_set_column(self):\n def new_column(df):\n df['NewCol'] = df['Speed']\n return df\n\n df = pd.DataFrame({\n 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n 'Speed': [380., 370., 24., 26.]\n })\n self._run_test(new_column, df)\n\n def test_str_split(self):\n s = pd.Series([\n \"this is a regular sentence\",\n \"https://docs.python.org/3/tutorial/index.html\",\n np.nan\n ])\n\n # TODO(BEAM-11931): pandas produces None for empty values with expand=True,\n # while we produce NaN (from pd.concat). This replicates some doctests that\n # verify that behavior, but with a replace call to ignore the difference.\n self._run_test(\n lambda s: s.str.split(expand=True).replace({None: np.nan}), s)\n self._run_test(\n lambda s: s.str.rsplit(\"/\", n=1, expand=True).replace({None: np.nan}),\n s)\n\n def test_set_column_from_index(self):\n def new_column(df):\n df['NewCol'] = df.index\n return df\n\n df = pd.DataFrame({\n 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],\n 'Speed': [380., 370., 24., 26.]\n })\n self._run_test(new_column, df)\n\n def test_tz_localize_ambiguous_series(self):\n # This replicates a tz_localize doctest:\n # s.tz_localize('CET', ambiguous=np.array([True, True, False]))\n # But using a DeferredSeries instead of a np array\n\n s = pd.Series(\n range(3),\n index=pd.DatetimeIndex([\n '2018-10-28 01:20:00', '2018-10-28 02:36:00', '2018-10-28 03:46:00'\n ]))\n ambiguous = pd.Series([True, True, False], index=s.index)\n\n self._run_test(\n lambda s,\n ambiguous: s.tz_localize('CET', ambiguous=ambiguous),\n s,\n ambiguous)\n\n def test_sort_index_columns(self):\n df = pd.DataFrame({\n 'c': range(10),\n 'a': range(10),\n 'b': range(10),\n np.nan: range(10),\n })\n\n self._run_test(lambda df: df.sort_index(axis=1), df)\n self._run_test(lambda df: df.sort_index(axis=1, ascending=False), df)\n self._run_test(lambda df: df.sort_index(axis=1, na_position='first'), df)\n\n def test_where_callable_args(self):\n df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n\n self._run_test(\n lambda df: df.where(lambda df: df % 2 == 0, lambda df: df * 10), df)\n\n def test_where_concrete_args(self):\n df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])\n\n self._run_test(\n lambda df: df.where(\n df % 2 == 0, pd.Series({\n 'A': 123, 'B': 456\n }), axis=1),\n df)\n\n def test_add_prefix(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n s = pd.Series([1, 2, 3, 4])\n\n self._run_test(lambda df: df.add_prefix('col_'), df)\n self._run_test(lambda s: s.add_prefix('col_'), s)\n\n def test_add_suffix(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})\n s = pd.Series([1, 2, 3, 4])\n\n self._run_test(lambda df: df.add_suffix('_col'), df)\n self._run_test(lambda s: s.add_prefix('_col'), s)\n\n def test_groupby(self):\n df = pd.DataFrame({\n 'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)],\n 'value': [None if i % 11 == 0 else i for i in range(100)]\n })\n self._run_test(lambda df: df.groupby('group').agg(sum), df)\n self._run_test(lambda df: df.groupby('group').sum(), df)\n self._run_test(lambda df: df.groupby('group').median(), df)\n self._run_test(lambda df: df.groupby('group').size(), df)\n self._run_test(lambda df: df.groupby('group').count(), df)\n self._run_test(lambda df: df.groupby('group').max(), df)\n self._run_test(lambda df: df.groupby('group').min(), df)\n self._run_test(lambda df: df.groupby('group').mean(), df)\n\n self._run_test(lambda df: df[df.value > 30].groupby('group').sum(), df)\n self._run_test(lambda df: df[df.value > 30].groupby('group').mean(), df)\n self._run_test(lambda df: df[df.value > 30].groupby('group').size(), df)\n\n # Grouping by a series is not currently supported\n #self._run_test(lambda df: df[df.value > 40].groupby(df.group).sum(), df)\n #self._run_test(lambda df: df[df.value > 40].groupby(df.group).mean(), df)\n #self._run_test(lambda df: df[df.value > 40].groupby(df.group).size(), df)\n\n # Example from https://pandas.pydata.org/docs/user_guide/groupby.html\n arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n\n index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])\n\n df = pd.DataFrame({\n 'A': [1, 1, 1, 1, 2, 2, 3, 3], 'B': np.arange(8)\n },\n index=index)\n\n self._run_test(lambda df: df.groupby(['second', 'A']).sum(), df)\n\n def test_groupby_project(self):\n df = GROUPBY_DF\n\n self._run_test(lambda df: df.groupby('group').foo.agg(sum), df)\n\n self._run_test(lambda df: df.groupby('group').sum(), df)\n self._run_test(lambda df: df.groupby('group').foo.sum(), df)\n self._run_test(lambda df: df.groupby('group').bar.sum(), df)\n self._run_test(lambda df: df.groupby('group')['foo'].sum(), df)\n self._run_test(lambda df: df.groupby('group')['baz'].sum(), df)\n self._run_error_test(\n lambda df: df.groupby('group')[['bar', 'baz']].bar.sum(), df)\n self._run_error_test(lambda df: df.groupby('group')[['bat']].sum(), df)\n self._run_error_test(lambda df: df.groupby('group').bat.sum(), df)\n\n self._run_test(lambda df: df.groupby('group').median(), df)\n self._run_test(lambda df: df.groupby('group').foo.median(), df)\n self._run_test(lambda df: df.groupby('group').bar.median(), df)\n self._run_test(lambda df: df.groupby('group')['foo'].median(), df)\n self._run_test(lambda df: df.groupby('group')['baz'].median(), df)\n self._run_test(lambda df: df.groupby('group')[['bar', 'baz']].median(), df)\n\n def test_groupby_errors_non_existent_projection(self):\n df = GROUPBY_DF\n\n # non-existent projection column\n self._run_error_test(\n lambda df: df.groupby('group')[['bar', 'baz']].bar.median(), df)\n self._run_error_test(lambda df: df.groupby('group')[['bad']].median(), df)\n\n self._run_error_test(lambda df: df.groupby('group').bad.median(), df)\n\n def test_groupby_errors_non_existent_label(self):\n df = GROUPBY_DF\n\n # non-existent grouping label\n self._run_error_test(\n lambda df: df.groupby(['really_bad', 'foo', 'bad']).foo.sum(), df)\n self._run_error_test(lambda df: df.groupby('bad').foo.sum(), df)\n\n def test_groupby_callable(self):\n df = GROUPBY_DF\n\n self._run_test(lambda df: df.groupby(lambda x: x % 2).foo.sum(), df)\n self._run_test(lambda df: df.groupby(lambda x: x % 5).median(), df)\n\n def test_set_index(self):\n df = pd.DataFrame({\n # [19, 18, ..]\n 'index1': reversed(range(20)),\n # [15, 16, .., 0, 1, .., 13, 14]\n 'index2': np.roll(range(20), 5),\n # ['', 'a', 'bb', ...]\n 'values': [chr(ord('a') + i) * i for i in range(20)],\n })\n\n self._run_test(lambda df: df.set_index(['index1', 'index2']), df)\n self._run_test(lambda df: df.set_index(['index1', 'index2'], drop=True), df)\n self._run_test(lambda df: df.set_index('values'), df)\n\n self._run_error_test(lambda df: df.set_index('bad'), df)\n self._run_error_test(\n lambda df: df.set_index(['index2', 'bad', 'really_bad']), df)\n\n def test_series_drop_ignore_errors(self):\n midx = pd.MultiIndex(\n levels=[['lama', 'cow', 'falcon'], ['speed', 'weight', 'length']],\n codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)\n\n # drop() requires singleton partitioning unless errors are ignored\n # Add some additional tests here to make sure the implementation works in\n # non-singleton partitioning.\n self._run_test(lambda s: s.drop('lama', level=0, errors='ignore'), s)\n self._run_test(lambda s: s.drop(('cow', 'speed'), errors='ignore'), s)\n self._run_test(lambda s: s.drop('falcon', level=0, errors='ignore'), s)\n\n def test_dataframe_drop_ignore_errors(self):\n midx = pd.MultiIndex(\n levels=[['lama', 'cow', 'falcon'], ['speed', 'weight', 'length']],\n codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n df = pd.DataFrame(\n index=midx,\n columns=['big', 'small'],\n data=[[45, 30], [200, 100], [1.5, 1], [30, 20], [250, 150], [1.5, 0.8],\n [320, 250], [1, 0.8], [0.3, 0.2]])\n\n # drop() requires singleton partitioning unless errors are ignored\n # Add some additional tests here to make sure the implementation works in\n # non-singleton partitioning.\n self._run_test(\n lambda df: df.drop(index='lama', level=0, errors='ignore'), df)\n self._run_test(\n lambda df: df.drop(index=('cow', 'speed'), errors='ignore'), df)\n self._run_test(\n lambda df: df.drop(index='falcon', level=0, errors='ignore'), df)\n self._run_test(\n lambda df: df.drop(index='cow', columns='small', errors='ignore'), df)\n\n def test_groupby_apply(self):\n df = GROUPBY_DF\n\n def median_sum_fn(x):\n return (x.foo + x.bar).median()\n\n # Note this is the same as DataFrameGroupBy.describe. Using it here is\n # just a convenient way to test apply() with a user fn that returns a Series\n describe = lambda df: df.describe()\n\n self._run_test(lambda df: df.groupby('group').foo.apply(describe), df)\n self._run_test(\n lambda df: df.groupby('group')[['foo', 'bar']].apply(describe), df)\n self._run_test(lambda df: df.groupby('group').apply(median_sum_fn), df)\n self._run_test(\n lambda df: df.set_index('group').foo.groupby(level=0).apply(describe),\n df)\n self._run_test(lambda df: df.groupby(level=0).apply(median_sum_fn), df)\n self._run_test(lambda df: df.groupby(lambda x: x % 3).apply(describe), df)\n self._run_test(\n lambda df: df.set_index(['str', 'group', 'bool']).groupby(\n level='group').apply(median_sum_fn),\n df)\n\n @unittest.skip('BEAM-11710')\n def test_groupby_aggregate_grouped_column(self):\n df = pd.DataFrame({\n 'group': ['a' if i % 5 == 0 or i % 3 == 0 else 'b' for i in range(100)],\n 'foo': [None if i % 11 == 0 else i for i in range(100)],\n 'bar': [None if i % 7 == 0 else 99 - i for i in range(100)],\n 'baz': [None if i % 13 == 0 else i * 2 for i in range(100)],\n })\n\n self._run_test(lambda df: df.groupby('group').group.count(), df)\n self._run_test(lambda df: df.groupby('group')[['group', 'bar']].count(), df)\n self._run_test(\n lambda df: df.groupby('group')[['group', 'bar']].apply(\n lambda x: x.describe()),\n df)\n\n def test_merge(self):\n # This is from the pandas doctests, but fails due to re-indexing being\n # order-sensitive.\n df1 = pd.DataFrame({\n 'lkey': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5]\n })\n df2 = pd.DataFrame({\n 'rkey': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8]\n })\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, left_on='lkey', right_on='rkey').rename(\n index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n self._run_test(\n lambda df1,\n df2: df1.merge(\n df2, left_on='lkey', right_on='rkey', suffixes=('_left', '_right')).\n rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n\n def test_merge_left_join(self):\n # This is from the pandas doctests, but fails due to re-indexing being\n # order-sensitive.\n df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, how='left', on='a').rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n\n def test_merge_on_index(self):\n # This is from the pandas doctests, but fails due to re-indexing being\n # order-sensitive.\n df1 = pd.DataFrame({\n 'lkey': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5]\n }).set_index('lkey')\n df2 = pd.DataFrame({\n 'rkey': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8]\n }).set_index('rkey')\n\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, left_index=True, right_index=True),\n df1,\n df2)\n\n def test_merge_same_key(self):\n df1 = pd.DataFrame({\n 'key': ['foo', 'bar', 'baz', 'foo'], 'value': [1, 2, 3, 5]\n })\n df2 = pd.DataFrame({\n 'key': ['foo', 'bar', 'baz', 'foo'], 'value': [5, 6, 7, 8]\n })\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, on='key').rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, on='key', suffixes=('_left', '_right')).rename(\n index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n\n def test_merge_same_key_doctest(self):\n df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})\n\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, how='left', on='a').rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n # Test without specifying 'on'\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, how='left').rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n\n def test_merge_same_key_suffix_collision(self):\n df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2], 'a_lsuffix': [5, 6]})\n df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4], 'a_rsuffix': [7, 8]})\n\n self._run_test(\n lambda df1,\n df2: df1.merge(\n df2, how='left', on='a', suffixes=('_lsuffix', '_rsuffix')).rename(\n index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n # Test without specifying 'on'\n self._run_test(\n lambda df1,\n df2: df1.merge(df2, how='left', suffixes=('_lsuffix', '_rsuffix')).\n rename(index=lambda x: '*'),\n df1,\n df2,\n nonparallel=True)\n\n def test_series_getitem(self):\n s = pd.Series([x**2 for x in range(10)])\n self._run_test(lambda s: s[...], s)\n self._run_test(lambda s: s[:], s)\n self._run_test(lambda s: s[s < 10], s)\n self._run_test(lambda s: s[lambda s: s < 10], s)\n\n s.index = s.index.map(float)\n self._run_test(lambda s: s[1.5:6], s)\n\n @parameterized.expand([\n (pd.Series(range(10)), ), # unique\n (pd.Series(list(range(100)) + [0]), ), # non-unique int\n (pd.Series(list(range(100)) + [0]) / 100, ), # non-unique flt\n (pd.Series(['a', 'b', 'c', 'd']), ), # unique str\n (pd.Series(['a', 'b', 'a', 'c', 'd']), ), # non-unique str\n ])\n def test_series_is_unique(self, series):\n self._run_test(lambda s: s.is_unique, series)\n\n def test_dataframe_getitem(self):\n df = pd.DataFrame({'A': [x**2 for x in range(6)], 'B': list('abcdef')})\n self._run_test(lambda df: df['A'], df)\n self._run_test(lambda df: df[['A', 'B']], df)\n\n self._run_test(lambda df: df[:], df)\n self._run_test(lambda df: df[df.A < 10], df)\n\n df.index = df.index.map(float)\n self._run_test(lambda df: df[1.5:4], df)\n\n def test_loc(self):\n dates = pd.date_range('1/1/2000', periods=8)\n # TODO(BEAM-11757): We do not preserve the freq attribute on a DateTime\n # index\n dates.freq = None\n df = pd.DataFrame(\n np.arange(32).reshape((8, 4)),\n index=dates,\n columns=['A', 'B', 'C', 'D'])\n self._run_test(lambda df: df.loc[:], df)\n self._run_test(lambda df: df.loc[:, 'A'], df)\n self._run_test(lambda df: df.loc[:dates[3]], df)\n self._run_test(lambda df: df.loc[df.A > 10], df)\n self._run_test(lambda df: df.loc[lambda df: df.A > 10], df)\n\n def test_series_agg(self):\n s = pd.Series(list(range(16)))\n self._run_test(lambda s: s.agg('sum'), s)\n self._run_test(lambda s: s.agg(['sum']), s)\n self._run_test(lambda s: s.agg(['sum', 'mean']), s, nonparallel=True)\n self._run_test(lambda s: s.agg(['mean']), s, nonparallel=True)\n self._run_test(lambda s: s.agg('mean'), s, nonparallel=True)\n\n def test_append_sort(self):\n # yapf: disable\n df1 = pd.DataFrame({'int': [1, 2, 3], 'str': ['a', 'b', 'c']},\n columns=['int', 'str'],\n index=[1, 3, 5])\n df2 = pd.DataFrame({'int': [4, 5, 6], 'str': ['d', 'e', 'f']},\n columns=['str', 'int'],\n index=[2, 4, 6])\n # yapf: enable\n\n self._run_test(lambda df1, df2: df1.append(df2, sort=True), df1, df2)\n self._run_test(lambda df1, df2: df1.append(df2, sort=False), df1, df2)\n self._run_test(lambda df1, df2: df2.append(df1, sort=True), df1, df2)\n self._run_test(lambda df1, df2: df2.append(df1, sort=False), df1, df2)\n\n def test_dataframe_agg(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, 3, 5, 7]})\n self._run_test(lambda df: df.agg('sum'), df)\n self._run_test(lambda df: df.agg(['sum', 'mean']), df, nonparallel=True)\n self._run_test(lambda df: df.agg({'A': 'sum', 'B': 'sum'}), df)\n self._run_test(\n lambda df: df.agg({\n 'A': 'sum', 'B': 'mean'\n }), df, nonparallel=True)\n self._run_test(\n lambda df: df.agg({'A': ['sum', 'mean']}), df, nonparallel=True)\n self._run_test(\n lambda df: df.agg({\n 'A': ['sum', 'mean'], 'B': 'min'\n }),\n df,\n nonparallel=True)\n\n def test_smallest_largest(self):\n df = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [2, 3, 5, 7]})\n self._run_test(lambda df: df.nlargest(1, 'A', keep='all'), df)\n self._run_test(lambda df: df.nsmallest(3, 'A', keep='all'), df)\n self._run_test(lambda df: df.nlargest(3, ['A', 'B'], keep='all'), df)\n\n def test_series_cov_corr(self):\n for s in [pd.Series([1, 2, 3]),\n pd.Series(range(100)),\n pd.Series([x**3 for x in range(-50, 50)])]:\n self._run_test(lambda s: s.std(), s)\n self._run_test(lambda s: s.var(), s)\n self._run_test(lambda s: s.corr(s), s)\n self._run_test(lambda s: s.corr(s + 1), s)\n self._run_test(lambda s: s.corr(s * s), s)\n self._run_test(lambda s: s.cov(s * s), s)\n\n def test_dataframe_cov_corr(self):\n df = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])\n df.loc[df.index[:5], 'a'] = np.nan\n df.loc[df.index[5:10], 'b'] = np.nan\n self._run_test(lambda df: df.corr(), df)\n self._run_test(lambda df: df.cov(), df)\n self._run_test(lambda df: df.corr(min_periods=12), df)\n self._run_test(lambda df: df.cov(min_periods=12), df)\n self._run_test(lambda df: df.corrwith(df.a), df)\n self._run_test(lambda df: df[['a', 'b']].corrwith(df[['b', 'c']]), df)\n\n @unittest.skipIf(PD_VERSION < (1, 2), \"na_action added in pandas 1.2.0\")\n def test_applymap_na_action(self):\n # Replicates a doctest for na_action which is incompatible with\n # doctest framework\n df = pd.DataFrame([[pd.NA, 2.12], [3.356, 4.567]])\n self._run_test(\n lambda df: df.applymap(lambda x: len(str(x)), na_action='ignore'), df)\n\n def test_categorical_groupby(self):\n df = pd.DataFrame({'A': np.arange(6), 'B': list('aabbca')})\n df['B'] = df['B'].astype(pd.CategoricalDtype(list('cab')))\n df = df.set_index('B')\n # TODO(BEAM-11190): These aggregations can be done in index partitions, but\n # it will require a little more complex logic\n self._run_test(lambda df: df.groupby(level=0).sum(), df, nonparallel=True)\n self._run_test(lambda df: df.groupby(level=0).mean(), df, nonparallel=True)\n\n def test_dataframe_eval_query(self):\n df = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])\n self._run_test(lambda df: df.eval('foo = a + b - c'), df)\n self._run_test(lambda df: df.query('a > b + c'), df)\n\n def eval_inplace(df):\n df.eval('foo = a + b - c', inplace=True)\n return df.foo\n\n self._run_test(eval_inplace, df)\n\n # Verify that attempting to access locals raises a useful error\n deferred_df = frame_base.DeferredFrame.wrap(\n expressions.ConstantExpression(df, df[0:0]))\n self.assertRaises(\n NotImplementedError, lambda: deferred_df.eval('foo = a + @b - c'))\n self.assertRaises(\n NotImplementedError, lambda: deferred_df.query('a > @b + c'))\n\n def test_index_name_assignment(self):\n df = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})\n df = df.set_index(['a', 'b'], drop=False)\n\n def change_index_names(df):\n df.index.names = ['A', None]\n return df\n\n self._run_test(change_index_names, df)\n\n @parameterized.expand((x, ) for x in [\n 0,\n [1],\n 3,\n [0, 3],\n [2, 1],\n ['foo', 0],\n [1, 'str'],\n [3, 0, 2, 1],\n ])\n def test_groupby_level_agg(self, level):\n df = GROUPBY_DF.set_index(['group', 'foo', 'bar', 'str'], drop=False)\n self._run_test(lambda df: df.groupby(level=level).bar.max(), df)\n self._run_test(\n lambda df: df.groupby(level=level).sum(numeric_only=True), df)\n self._run_test(\n lambda df: df.groupby(level=level).apply(\n lambda x: (x.foo + x.bar).median()),\n df)\n\n def test_quantile(self):\n df = pd.DataFrame(\n np.array([[1, 1], [2, 10], [3, 100], [4, 100]]), columns=['a', 'b'])\n\n self._run_test(lambda df: df.quantile(0.1), df, nonparallel=True)\n self._run_test(lambda df: df.quantile([0.1, 0.9]), df, nonparallel=True)\n\n self._run_test(lambda df: df.quantile(0.1, axis='columns'), df)\n with self.assertRaisesRegex(frame_base.WontImplementError,\n r\"df\\.quantile\\(q=0\\.1, axis='columns'\\)\"):\n self._run_test(lambda df: df.quantile([0.1, 0.5], axis='columns'), df)\n\n @unittest.skipIf(PD_VERSION < (1, 1), \"drop_na added in pandas 1.1.0\")\n def test_groupby_count_na(self):\n # Verify we can do a groupby.count() that doesn't drop NaN values\n self._run_test(\n lambda df: df.groupby('foo', dropna=True).bar.count(), GROUPBY_DF)\n self._run_test(\n lambda df: df.groupby('foo', dropna=False).bar.count(), GROUPBY_DF)\n\n def test_dataframe_melt(self):\n\n df = pd.DataFrame({\n 'A': {\n 0: 'a', 1: 'b', 2: 'c'\n },\n 'B': {\n 0: 1, 1: 3, 2: 5\n },\n 'C': {\n 0: 2, 1: 4, 2: 6\n }\n })\n\n self._run_test(\n lambda df: df.melt(id_vars=['A'], value_vars=['B'], ignore_index=False),\n df)\n self._run_test(\n lambda df: df.melt(\n id_vars=['A'], value_vars=['B', 'C'], ignore_index=False),\n df)\n self._run_test(\n lambda df: df.melt(\n id_vars=['A'],\n value_vars=['B'],\n var_name='myVarname',\n value_name='myValname',\n ignore_index=False),\n df)\n self._run_test(\n lambda df: df.melt(\n id_vars=['A'], value_vars=['B', 'C'], ignore_index=False),\n df)\n\n df.columns = [list('ABC'), list('DEF')]\n self._run_test(\n lambda df: df.melt(\n col_level=0, id_vars=['A'], value_vars=['B'], ignore_index=False),\n df)\n self._run_test(\n lambda df: df.melt(\n id_vars=[('A', 'D')], value_vars=[('B', 'E')], ignore_index=False),\n df)\n\n def test_fillna_columns(self):\n df = pd.DataFrame(\n [[np.nan, 2, np.nan, 0], [3, 4, np.nan, 1], [np.nan, np.nan, np.nan, 5],\n [np.nan, 3, np.nan, 4], [3, np.nan, np.nan, 4]],\n columns=list('ABCD'))\n\n self._run_test(lambda df: df.fillna(method='ffill', axis='columns'), df)\n self._run_test(\n lambda df: df.fillna(method='ffill', axis='columns', limit=1), df)\n self._run_test(\n lambda df: df.fillna(method='bfill', axis='columns', limit=1), df)\n\n # Intended behavior is unclear here. See\n # https://github.com/pandas-dev/pandas/issues/40989\n # self._run_test(lambda df: df.fillna(axis='columns', value=100,\n # limit=2), df)\n\n def test_append_verify_integrity(self):\n df1 = pd.DataFrame({'A': range(10), 'B': range(10)}, index=range(10))\n df2 = pd.DataFrame({'A': range(10), 'B': range(10)}, index=range(9, 19))\n\n self._run_error_test(\n lambda s1,\n s2: s1.append(s2, verify_integrity=True),\n df1['A'],\n df2['A'],\n construction_time=False)\n self._run_error_test(\n lambda df1,\n df2: df1.append(df2, verify_integrity=True),\n df1,\n df2,\n construction_time=False)\n\n\nclass AllowNonParallelTest(unittest.TestCase):\n def _use_non_parallel_operation(self):\n _ = frame_base.DeferredFrame.wrap(\n expressions.PlaceholderExpression(pd.Series([1, 2, 3]))).replace(\n 'a', 'b', limit=1)\n\n def test_disallow_non_parallel(self):\n with self.assertRaises(expressions.NonParallelOperation):\n self._use_non_parallel_operation()\n\n def test_allow_non_parallel_in_context(self):\n with beam.dataframe.allow_non_parallel_operations():\n self._use_non_parallel_operation()\n\n def test_allow_non_parallel_nesting(self):\n # disallowed\n with beam.dataframe.allow_non_parallel_operations():\n # allowed\n self._use_non_parallel_operation()\n with beam.dataframe.allow_non_parallel_operations(False):\n # disallowed again\n with self.assertRaises(expressions.NonParallelOperation):\n self._use_non_parallel_operation()\n # allowed\n self._use_non_parallel_operation()\n # disallowed\n with self.assertRaises(expressions.NonParallelOperation):\n self._use_non_parallel_operation()\n\n\nclass ConstructionTimeTest(unittest.TestCase):\n \"\"\"Tests for operations that can be executed eagerly.\"\"\"\n DF = pd.DataFrame({\n 'str_col': ['foo', 'bar'],\n 'int_col': [1, 2],\n 'flt_col': [1.1, 2.2],\n })\n DEFERRED_DF = frame_base.DeferredFrame.wrap(\n expressions.PlaceholderExpression(DF))\n\n def _run_test(self, fn):\n self.assertEqual(fn(self.DEFERRED_DF), fn(self.DF))\n\n @parameterized.expand(DF.columns)\n def test_series_name(self, col_name):\n self._run_test(lambda df: df[col_name])\n\n @parameterized.expand(DF.columns)\n def test_series_dtype(self, col_name):\n self._run_test(lambda df: df[col_name].dtype)\n self._run_test(lambda df: df[col_name].dtypes)\n\n def test_dataframe_columns(self):\n self._run_test(lambda df: list(df.columns))\n\n def test_dataframe_dtypes(self):\n self._run_test(lambda df: list(df.dtypes))\n\n\nclass DocstringTest(unittest.TestCase):\n @parameterized.expand([\n (frames.DeferredDataFrame, pd.DataFrame),\n (frames.DeferredSeries, pd.Series),\n (frames._DeferredIndex, pd.Index),\n (frames._DeferredStringMethods, pd.core.strings.StringMethods),\n (frames.DeferredGroupBy, pd.core.groupby.generic.DataFrameGroupBy),\n (frames._DeferredGroupByCols, pd.core.groupby.generic.DataFrameGroupBy),\n ])\n @unittest.skip('BEAM-12074')\n def test_docs_defined(self, beam_type, pd_type):\n beam_attrs = set(dir(beam_type))\n pd_attrs = set(dir(pd_type))\n\n docstring_required = sorted([\n attr for attr in beam_attrs.intersection(pd_attrs)\n if getattr(pd_type, attr).__doc__ and not attr.startswith('_')\n ])\n\n docstring_missing = [\n attr for attr in docstring_required\n if not getattr(beam_type, attr).__doc__\n ]\n\n self.assertTrue(\n len(docstring_missing) == 0,\n f'{beam_type.__name__} is missing a docstring for '\n f'{len(docstring_missing)}/{len(docstring_required)} '\n f'({len(docstring_missing)/len(docstring_required):%}) '\n f'operations:\\n{docstring_missing}')\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.testing.assert_series_equal", "pandas.Series", "pandas.MultiIndex", "numpy.arange", "pandas.DataFrame", "pandas.MultiIndex.from_arrays", "pandas.DatetimeIndex", "numpy.random.randn", "pandas.__version__.split", "pandas.testing.assert_frame_equal", "pandas.date_range", "numpy.array", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
ostamand/tensorflow-tabnet
[ "cc676d75a5879df61d3b154ea783fbc364caf2a2" ]
[ "tabnet/models/model.py" ]
[ "from typing import List, Tuple\n\nimport tensorflow as tf\n\nfrom tabnet.models.transformers import (\n FeatureTransformer,\n AttentiveTransformer,\n)\n\n\nclass TabNet(tf.keras.Model):\n def __init__(\n self,\n num_features: int,\n feature_dim: int,\n output_dim: int,\n feature_columns: List = None,\n n_step: int = 1,\n n_total: int = 4,\n n_shared: int = 2,\n relaxation_factor: float = 1.5,\n bn_epsilon: float = 1e-5,\n bn_momentum: float = 0.7,\n bn_virtual_divider: int = 1,\n ):\n \"\"\"TabNet\n\n Will output a vector of size output_dim.\n\n Args:\n num_features (int): Number of features.\n feature_dim (int): Embedding feature dimention to use.\n output_dim (int): Output dimension.\n feature_columns (List, optional): If defined will add a DenseFeatures layer first. Defaults to None.\n n_step (int, optional): Total number of steps. Defaults to 1.\n n_total (int, optional): Total number of feature transformer blocks. Defaults to 4.\n n_shared (int, optional): Number of shared feature transformer blocks. Defaults to 2.\n relaxation_factor (float, optional): >1 will allow features to be used more than once. Defaults to 1.5.\n bn_epsilon (float, optional): Batch normalization, epsilon. Defaults to 1e-5.\n bn_momentum (float, optional): Batch normalization, momentum. Defaults to 0.7.\n bn_virtual_divider (int, optional): Batch normalization. Full batch will be divided by this.\n \"\"\"\n super(TabNet, self).__init__()\n self.output_dim, self.num_features = output_dim, num_features\n self.n_step, self.relaxation_factor = n_step, relaxation_factor\n self.feature_columns = feature_columns\n\n if feature_columns is not None:\n self.input_features = tf.keras.layers.DenseFeatures(feature_columns)\n\n # ? Switch to Ghost Batch Normalization\n self.bn = tf.keras.layers.BatchNormalization(\n momentum=bn_momentum, epsilon=bn_epsilon\n )\n\n kargs = {\n \"feature_dim\": feature_dim + output_dim,\n \"n_total\": n_total,\n \"n_shared\": n_shared,\n \"bn_momentum\": bn_momentum,\n \"bn_virtual_divider\": bn_virtual_divider,\n }\n\n # first feature transformer block is built first to get the shared blocks\n self.feature_transforms: List[FeatureTransformer] = [\n FeatureTransformer(**kargs)\n ]\n self.attentive_transforms: List[AttentiveTransformer] = []\n for i in range(n_step):\n self.feature_transforms.append(\n FeatureTransformer(**kargs, fcs=self.feature_transforms[0].shared_fcs)\n )\n self.attentive_transforms.append(\n AttentiveTransformer(num_features, bn_momentum, bn_virtual_divider)\n )\n\n def call(\n self, features: tf.Tensor, training: bool = None, alpha: float = 0.0\n ) -> Tuple[tf.Tensor, tf.Tensor]:\n if self.feature_columns is not None:\n features = self.input_features(features)\n\n bs = tf.shape(features)[0]\n out_agg = tf.zeros((bs, self.output_dim))\n prior_scales = tf.ones((bs, self.num_features))\n masks = []\n\n features = self.bn(features, training=training)\n masked_features = features\n\n total_entropy = 0.0\n\n for step_i in range(self.n_step + 1):\n x = self.feature_transforms[step_i](\n masked_features, training=training, alpha=alpha\n )\n\n if step_i > 0:\n out = tf.keras.activations.relu(x[:, : self.output_dim])\n out_agg += out\n\n # no need to build the features mask for the last step\n if step_i < self.n_step:\n x_for_mask = x[:, self.output_dim :]\n\n mask_values = self.attentive_transforms[step_i](\n x_for_mask, prior_scales, training=training, alpha=alpha\n )\n\n # relaxation factor of 1 forces the feature to be only used once.\n prior_scales *= self.relaxation_factor - mask_values\n\n masked_features = tf.multiply(mask_values, features)\n\n # entropy is used to penalize the amount of sparsity in feature selection\n total_entropy = tf.reduce_mean(\n tf.reduce_sum(\n tf.multiply(mask_values, tf.math.log(mask_values + 1e-15)),\n axis=1,\n )\n )\n\n masks.append(tf.expand_dims(tf.expand_dims(mask_values, 0), 3))\n\n loss = total_entropy / self.n_step\n\n return out_agg, loss, masks\n" ]
[ [ "tensorflow.multiply", "tensorflow.keras.layers.DenseFeatures", "tensorflow.shape", "tensorflow.zeros", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.math.log", "tensorflow.keras.activations.relu", "tensorflow.keras.layers.BatchNormalization" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Danial-Hussain/MyModel
[ "acbe4354a7b5e3dd32cdd92f3b4003c5848bbb3d" ]
[ "server/algo/utils.py" ]
[ "from typing import Union\nfrom django.core import exceptions\nimport statsmodels.api as sm\nfrom io import StringIO\nimport sklearn as sk\nimport numpy as np\nimport pandas as pd\nimport typing\nimport jwt\nimport os\n\n\ndef authorize(token: str) -> typing.Union[str, None]:\n \"\"\"\n Authorize jwt token\n \"\"\"\n try:\n decoded = jwt.decode(\n jwt=token, key=os.environ.get(\"JWT_KEY\"), algorithms=\"HS256\"\n )\n except (KeyError, jwt.exceptions.InvalidSignatureError):\n return None\n else:\n return decoded[\"username\"]\n\n\ndef parseCsv(csv: str) -> pd.DataFrame:\n \"\"\"\n Parse csv string to dataframe\n \"\"\"\n data = StringIO(csv)\n df = pd.read_csv(data, sep=\",\")\n return df\n\n\ndef parseVariables(\n model_data: pd.DataFrame, model_resp: pd.DataFrame, model_pred: pd.DataFrame\n) -> typing.Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Get X and Y variables from request data\n \"\"\"\n Y = model_data[[model_resp[\"value\"]]]\n X = model_data[[datapoint[\"value\"] for datapoint in model_pred]]\n X = sm.add_constant(X)\n return (X, Y)\n\n\ndef compute_cm_scores(\n Y: typing.Union[np.array, pd.DataFrame], Y_pred: np.array\n) -> typing.Tuple[float, float, float]:\n \"\"\"\n Compute metrics from confusion matrix\n \"\"\"\n cm = sk.metrics.confusion_matrix(Y, list(map(round, Y_pred)))\n recall = cm[0][0] / (cm[0][0] + cm[1][0])\n precision = cm[0][0] / (cm[0][0] + cm[0][1])\n accuracy = cm[0][0] + cm[1][1] / (cm[0][0] + cm[0][1] + cm[1][0] + cm[1][1])\n f_measure = 2 * recall * precision / (recall + precision)\n return recall, accuracy, precision, f_measure\n\n\ndef regression_metrics(\n Y: typing.Union[np.array, pd.DataFrame], Y_pred: np.array\n) -> typing.Tuple[float, float, float]:\n \"\"\"\n Compute regression model metrics\n \"\"\"\n mae = sk.metrics.mean_absolute_error(Y, Y_pred)\n mse = sk.metrics.mean_squared_error(Y, Y_pred)\n r2 = sk.metrics.r2_score(Y, Y_pred)\n return mae, mse, r2\n\nparseToken = lambda r: r.headers[\"Authorization\"].split(\" \")[1]" ]
[ [ "sklearn.metrics.mean_absolute_error", "pandas.read_csv", "sklearn.metrics.r2_score", "sklearn.metrics.mean_squared_error" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
bfbechlin/meanderpy
[ "502cf6a0cb4b254908977576fd5edf477ccd740e" ]
[ "meanderpy/main00.py" ]
[ "import meanderpy as mp\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nONE_YEAR = 365*24*60*60.0\n\nL = 20000\nds = 100\n\nx = np.linspace(0, L, int(L/ds) + 1)\ny = 500 * np.exp(( 1.0 / L) * x) * np.cos((x / L) * 16 * np.pi) / (np.exp((x - 0.75 * L) / (0.025 * L)) + 1)\n\nz = np.tan(5.0 * np.pi / 180) / (2 * L) * (x ** 2 + L * ( L - 2 * x ) )\n\nevents = [\n mp.ChannelEvent(nit = 100, saved_ts = 25, mode='INCISION', kv = 0.0033 / ONE_YEAR),\n mp.ChannelEvent(nit = 100, saved_ts = 25, mode='AGGREGATION', aggr_factor=2, kv = 0.002 / ONE_YEAR)\n]\n\nchannel = mp.Channel(x, y)\nbasin = mp.Basin(x, z)\n\nbelt = mp.ChannelBelt(channel, basin)\nfor event in events:\n belt.simulate(event)\n\nmodel = belt.build_3d_model(25)\n\ndef plots():\n for xsec in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:\n model.plot_xsection(xsec, 3)\n plt.show()\n\n#plots()\n#model.plot()\n#model.render()\nmodel.export_objs(ve = 3)" ]
[ [ "numpy.tan", "numpy.exp", "numpy.cos", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LibRec-Practical/ideaman-offline
[ "f8341fc9ca77adcc1191c01037dda18c02d77b29" ]
[ "ideaman_rec/Sort/GBDT/GBDT.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn import metrics\nimport matplotlib.pylab as plt\nfrom sklearn.model_selection import train_test_split\n\nimport sys, os\n\nsys.path.append(\"../../\")\nsys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk(\"../../\") for name in dirs])\n\n\ndef load_dataset(path):\n df = pd.read_csv(path, header=None)\n df = df.dropna()\n X = df.drop([1824], axis=1)\n Y = df[1824]\n return X, Y\n\n\ndef load_model(*args, **kwargs):\n gbdt = GradientBoostingClassifier(random_state=10, *args, **kwargs)\n return gbdt\n\n\ndef train(model, X, Y):\n model.fit(X, Y)\n\n\ndef predict(model, X):\n return model.predict(X)\n\n\ndef run():\n # 加载数据\n X, Y = load_dataset()\n # 加载模型\n model = load_dataset()\n # 训练数据\n train(model, X, Y)\n\n" ]
[ [ "pandas.read_csv", "sklearn.ensemble.GradientBoostingClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
JRMeyer/tensorflow-tutorial
[ "dbbf65bc7e4516a61d27d30954bf59e1477e28f3" ]
[ "logistic_regression_predict.py" ]
[ "import numpy as np\nimport tensorflow as tf\nimport tarfile\nimport os\n\ndef csv_to_numpy_array(filePath, delimiter):\n return np.genfromtxt(filePath, delimiter=delimiter, dtype=None)\n\ndef import_data():\n if \"data\" not in os.listdir(os.getcwd()):\n # Untar directory of data if we haven't already\n tarObject = tarfile.open(\"data.tar.gz\")\n tarObject.extractall()\n tarObject.close()\n print(\"Extracted tar to current directory\")\n else:\n # we've already extracted the files\n pass\n\n print(\"loading training data\")\n trainX = csv_to_numpy_array(\"data/trainX.csv\", delimiter=\"\\t\")\n trainY = csv_to_numpy_array(\"data/trainY.csv\", delimiter=\"\\t\")\n print(\"loading test data\")\n testX = csv_to_numpy_array(\"data/testX.csv\", delimiter=\"\\t\")\n testY = csv_to_numpy_array(\"data/testY.csv\", delimiter=\"\\t\")\n return trainX,trainY,testX,testY\n\n\n###################\n### IMPORT DATA ###\n###################\n\ntrainX,trainY,testX,testY = import_data()\n\n\n#########################\n### GLOBAL PARAMETERS ###\n#########################\n\n# Get our dimensions for our different variables and placeholders:\n# numFeatures = the number of words extracted from each email\nnumFeatures = trainX.shape[1]\n# numLabels = number of classes we are predicting (here just 2: ham or spam)\nnumLabels = trainY.shape[1]\n\n#create a tensorflow session\nsess = tf.Session()\n\n\n####################\n### PLACEHOLDERS ###\n####################\n\n# X = X-matrix / feature-matrix / data-matrix... It's a tensor to hold our email\n# data. 'None' here means that we can hold any number of emails\nX = tf.placeholder(tf.float32, [None, numFeatures])\n# yGold = Y-matrix / label-matrix / labels... This will be our correct answers\n# matrix. Every row has either [1,0] for SPAM or [0,1] for HAM. 'None' here\n# means that we can hold any number of emails\nyGold = tf.placeholder(tf.float32, [None, numLabels])\n\n\n#################\n### VARIABLES ###\n#################\n\n#all values must be initialized to a value before loading can occur\n\nweights = tf.Variable(tf.zeros([numFeatures,numLabels]))\n\nbias = tf.Variable(tf.zeros([1,numLabels]))\n\n########################\n### OPS / OPERATIONS ###\n########################\n\n#since we don't have to train the model, the only Ops are the prediction operations\n\napply_weights_OP = tf.matmul(X, weights, name=\"apply_weights\")\nadd_bias_OP = tf.add(apply_weights_OP, bias, name=\"add_bias\")\nactivation_OP = tf.nn.sigmoid(add_bias_OP, name=\"activation\")\n\n\n# argmax(activation_OP, 1) gives the label our model thought was most likely\n# argmax(yGold, 1) is the correct label\ncorrect_predictions_OP = tf.equal(tf.argmax(activation_OP,1),tf.argmax(yGold,1))\n\n# False is 0 and True is 1, what was our average?\naccuracy_OP = tf.reduce_mean(tf.cast(correct_predictions_OP, \"float\"))\n\n# Initializes everything we've defined made above, but doesn't run anything\n# until sess.run()\ninit_OP = tf.initialize_all_variables()\n\nsess.run(init_OP) #initialize variables BEFORE loading\n\n#load variables from file\nsaver = tf.train.Saver()\nsaver.restore(sess, \"trained_variables.ckpt\")\n\n#####################\n### RUN THE GRAPH ###\n#####################\n\n# Initialize all tensorflow objects\n# sess.run(init_OP)\n\n#method for converting tensor label to string label\ndef labelToString(label):\n if np.argmax(label) == 0:\n return \"ham\"\n else:\n return \"spam\"\n\n#make prediction on a given test set item\ndef predict(features, goldLabel):\n #run through graph\n tensor_prediction = sess.run(activation_OP, feed_dict={X: features.reshape(1, len(features)), yGold: goldLabel.reshape(1, len(goldLabel))}) #had to make sure that each input in feed_dict was an array\n prediction = labelToString(tensor_prediction)\n actual = labelToString(goldLabel)\n print(\"regression predicts email to be %s and is actually %s\" %(prediction, actual))\n\nif __name__ == \"__main__\":\n\n #show predictions and accuracy of entire test set\n prediction, evaluation = sess.run([activation_OP, accuracy_OP], feed_dict={X: testX, yGold: testY})\n\n for i in range(len(testX)):\n print(\"regression predicts email %s to be %s and is actually %s\" %(str(i + 1), labelToString(prediction[i]), labelToString(testY[i])))\n print(\"overall accuracy of dataset: %s percent\" %str(evaluation))\n\n" ]
[ [ "tensorflow.matmul", "tensorflow.nn.sigmoid", "tensorflow.zeros", "tensorflow.cast", "tensorflow.placeholder", "numpy.genfromtxt", "tensorflow.initialize_all_variables", "numpy.argmax", "tensorflow.add", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
uditarora/pytorch-lightning
[ "7245e48153909d9de8458b1f5b8b2bc740d80104" ]
[ "pytorch_lightning/trainer/trainer.py" ]
[ "import inspect\nimport os\nfrom argparse import ArgumentParser, Namespace\nfrom typing import Union, Optional, List, Dict, Tuple, Iterable, Any\n\nimport torch\nimport torch.distributed as torch_distrib\nimport torch.multiprocessing as mp\nfrom torch.utils.data import DataLoader\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, Callback, ProgressBarBase\nfrom pytorch_lightning.core.lightning import LightningModule\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler, BaseProfiler\nfrom pytorch_lightning.trainer.seed import seed_everything\nfrom pytorch_lightning.trainer.auto_mix_precision import TrainerAMPMixin\nfrom pytorch_lightning.trainer.callback_config import TrainerCallbackConfigMixin\nfrom pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin\nfrom pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin\nfrom pytorch_lightning.trainer.deprecated_api import TrainerDeprecatedAPITillVer0_8, TrainerDeprecatedAPITillVer0_9\nfrom pytorch_lightning.trainer.distrib_data_parallel import TrainerDDPMixin\nfrom pytorch_lightning.trainer.distrib_parts import (\n TrainerDPMixin, parse_gpu_ids, determine_root_gpu_device, pick_multiple_gpus)\nfrom pytorch_lightning.trainer.evaluation_loop import TrainerEvaluationLoopMixin\nfrom pytorch_lightning.trainer.logging import TrainerLoggingMixin\nfrom pytorch_lightning.trainer.model_hooks import TrainerModelHooksMixin\nfrom pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin\nfrom pytorch_lightning.trainer.supporters import TensorRunningAccum\nfrom pytorch_lightning.trainer.training_io import TrainerIOMixin\nfrom pytorch_lightning.trainer.training_loop import TrainerTrainLoopMixin\nfrom pytorch_lightning.trainer.training_tricks import TrainerTrainingTricksMixin\nfrom pytorch_lightning.trainer.lr_finder import TrainerLRFinderMixin\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\nfrom pytorch_lightning.utilities import rank_zero_warn, parsing\n\ntry:\n from apex import amp\nexcept ImportError:\n APEX_AVAILABLE = False\nelse:\n APEX_AVAILABLE = True\n\ntry:\n import torch_xla\n import torch_xla.core.xla_model as xm\n import torch_xla.distributed.xla_multiprocessing as xmp\nexcept ImportError:\n XLA_AVAILABLE = False\nelse:\n XLA_AVAILABLE = True\n\ntry:\n import horovod.torch as hvd\nexcept ImportError:\n HOROVOD_AVAILABLE = False\nelse:\n HOROVOD_AVAILABLE = True\n\n\nclass Trainer(\n TrainerIOMixin,\n TrainerOptimizersMixin,\n TrainerAMPMixin,\n TrainerDPMixin,\n TrainerDDPMixin,\n TrainerLoggingMixin,\n TrainerModelHooksMixin,\n TrainerTrainingTricksMixin,\n TrainerDataLoadingMixin,\n TrainerEvaluationLoopMixin,\n TrainerTrainLoopMixin,\n TrainerCallbackConfigMixin,\n TrainerCallbackHookMixin,\n TrainerLRFinderMixin,\n TrainerDeprecatedAPITillVer0_8,\n TrainerDeprecatedAPITillVer0_9,\n):\n DEPRECATED_IN_0_8 = (\n 'gradient_clip', 'nb_gpu_nodes', 'max_nb_epochs', 'min_nb_epochs',\n 'add_row_log_interval', 'nb_sanity_val_steps', 'tng_tqdm_dic',\n )\n DEPRECATED_IN_0_9 = ('use_amp', 'show_progress_bar', 'training_tqdm_dict', 'num_tpu_cores')\n\n def __init__(\n self,\n logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,\n checkpoint_callback: Union[ModelCheckpoint, bool] = True,\n early_stop_callback: Optional[Union[EarlyStopping, bool]] = False,\n callbacks: Optional[List[Callback]] = None,\n default_root_dir: Optional[str] = None,\n gradient_clip_val: float = 0,\n process_position: int = 0,\n num_nodes: int = 1,\n num_processes: int = 1,\n gpus: Optional[Union[List[int], str, int]] = None,\n auto_select_gpus: bool = False,\n tpu_cores: Optional[Union[List[int], int]] = None,\n log_gpu_memory: Optional[str] = None,\n progress_bar_refresh_rate: int = 1,\n overfit_pct: float = 0.0,\n track_grad_norm: Union[int, float, str] = -1,\n check_val_every_n_epoch: int = 1,\n fast_dev_run: bool = False,\n accumulate_grad_batches: Union[int, Dict[int, int], List[list]] = 1,\n max_epochs: int = 1000,\n min_epochs: int = 1,\n max_steps: Optional[int] = None,\n min_steps: Optional[int] = None,\n train_percent_check: float = 1.0,\n val_percent_check: float = 1.0,\n test_percent_check: float = 1.0,\n val_check_interval: float = 1.0,\n log_save_interval: int = 100,\n row_log_interval: int = 10,\n add_row_log_interval=None, # backward compatible, todo: remove in v0.8.0\n distributed_backend: Optional[str] = None,\n precision: int = 32,\n print_nan_grads: bool = False, # backward compatible, todo: remove in v0.9.0\n weights_summary: Optional[str] = 'top',\n weights_save_path: Optional[str] = None,\n num_sanity_val_steps: int = 2,\n truncated_bptt_steps: Optional[int] = None,\n resume_from_checkpoint: Optional[str] = None,\n profiler: Optional[Union[BaseProfiler, bool]] = None,\n benchmark: bool = False,\n deterministic: bool = False,\n reload_dataloaders_every_epoch: bool = False,\n auto_lr_find: Union[bool, str] = False,\n replace_sampler_ddp: bool = True,\n terminate_on_nan: bool = False,\n auto_scale_batch_size: Union[str, bool] = False,\n num_tpu_cores: Optional[int] = None, # backward compatible, todo: remove in v0.9.0\n amp_level: str = 'O1', # backward compatible, todo: remove in v0.8.0\n default_save_path=None, # backward compatible, todo: remove in v0.8.0\n gradient_clip=None, # backward compatible, todo: remove in v0.8.0\n nb_gpu_nodes=None, # backward compatible, todo: remove in v0.8.0\n max_nb_epochs=None, # backward compatible, todo: remove in v0.8.0\n min_nb_epochs=None, # backward compatible, todo: remove in v0.8.0\n use_amp=None, # backward compatible, todo: remove in v0.9.0\n show_progress_bar=None, # backward compatible, todo: remove in v0.9.0\n nb_sanity_val_steps=None, # backward compatible, todo: remove in v0.8.0\n ):\n r\"\"\"\n\n Customize every aspect of training via flags\n\n Args:\n logger: Logger (or iterable collection of loggers) for experiment tracking.\n\n checkpoint_callback: Callback for checkpointing.\n\n early_stop_callback (:class:`pytorch_lightning.callbacks.EarlyStopping`):\n\n callbacks: Add a list of callbacks.\n\n default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed\n\n default_save_path:\n .. warning:: .. deprecated:: 0.7.3\n\n Use `default_root_dir` instead. Will remove 0.9.0.\n\n gradient_clip_val: 0 means don't clip.\n\n gradient_clip:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `gradient_clip_val` instead. Will remove 0.9.0.\n\n process_position: orders the progress bar when running multiple models on same machine.\n\n num_nodes: number of GPU nodes for distributed training.\n\n nb_gpu_nodes:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `num_nodes` instead. Will remove 0.9.0.\n\n gpus: Which GPUs to train on.\n\n auto_select_gpus:\n\n If enabled and `gpus` is an integer, pick available\n gpus automatically. This is especially useful when\n GPUs are configured to be in \"exclusive mode\", such\n that only one process at a time can access them.\n\n tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on [1]\n\n num_tpu_cores: How many TPU cores to train on (1 or 8)\n .. warning:: .. deprecated:: 0.7.6. Will remove 0.9.0.\n\n log_gpu_memory: None, 'min_max', 'all'. Might slow performance\n\n show_progress_bar:\n .. warning:: .. deprecated:: 0.7.2\n\n Set `progress_bar_refresh_rate` to positive integer to enable. Will remove 0.9.0.\n\n progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.\n Ignored when a custom callback is passed to :paramref:`~Trainer.callbacks`.\n\n overfit_pct: How much of training-, validation-, and test dataset to check.\n\n track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm.\n\n check_val_every_n_epoch: Check val every n train epochs.\n\n fast_dev_run: runs 1 batch of train, test and val to find any bugs (ie: a sort of unit test).\n\n accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.\n\n max_epochs: Stop training once this number of epochs is reached.\n\n max_nb_epochs:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `max_epochs` instead. Will remove 0.9.0.\n\n min_epochs: Force training for at least these many epochs\n\n min_nb_epochs:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `min_epochs` instead. Will remove 0.9.0.\n\n max_steps: Stop training after this number of steps. Disabled by default (None).\n\n min_steps: Force training for at least these number of steps. Disabled by default (None).\n\n train_percent_check: How much of training dataset to check.\n\n val_percent_check: How much of validation dataset to check.\n\n test_percent_check: How much of test dataset to check.\n\n val_check_interval: How often within one training epoch to check the validation set\n\n log_save_interval: Writes logs to disk this often\n\n row_log_interval: How often to add logging rows (does not write to disk)\n\n add_row_log_interval:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `row_log_interval` instead. Will remove 0.9.0.\n\n distributed_backend: The distributed backend to use (dp, ddp, ddp2, ddp_spawn)\n\n use_amp:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `precision` instead. Will remove 0.9.0.\n\n precision: Full precision (32), half precision (16).\n\n print_nan_grads:\n .. warning:: .. deprecated:: 0.7.2\n\n Has no effect. When detected, NaN grads will be printed automatically.\n Will remove 0.9.0.\n\n weights_summary: Prints a summary of the weights when training begins.\n\n weights_save_path: Where to save weights if specified. Will override default_root_dir\n for checkpoints only. Use this if for whatever reason you need the checkpoints\n stored in a different place than the logs written in `default_root_dir`.\n\n amp_level: The optimization level to use (O1, O2, etc...).\n\n num_sanity_val_steps: Sanity check runs n batches of val before starting the training routine.\n\n nb_sanity_val_steps:\n .. warning:: .. deprecated:: 0.7.0\n\n Use `num_sanity_val_steps` instead. Will remove 0.8.0.\n\n truncated_bptt_steps: Truncated back prop breaks performs backprop every k steps of\n\n resume_from_checkpoint: To resume training from a specific checkpoint pass in the path here.\n\n profiler: To profile individual steps during training and assist in\n\n reload_dataloaders_every_epoch: Set to True to reload dataloaders every epoch\n\n auto_lr_find: If set to True, will `initially` run a learning rate finder,\n trying to optimize initial learning for faster convergence. Sets learning\n rate in self.lr or self.learning_rate in the LightningModule.\n To use a different key, set a string instead of True with the key name.\n\n replace_sampler_ddp: Explicitly enables or disables sampler replacement.\n If not specified this will toggled automatically ddp is used\n\n benchmark: If true enables cudnn.benchmark.\n\n deterministic: If true enables cudnn.deterministic\n\n terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the\n end of each training batch, if any of the parameters or the loss are NaN or +/-inf.\n\n auto_scale_batch_size: If set to True, will `initially` run a batch size\n finder trying to find the largest batch size that fits into memory.\n The result will be stored in self.batch_size in the LightningModule.\n Additionally, can be set to either `power` that estimates the batch size through\n a power search or `binsearch` that estimates the batch size through a binary search.\n \"\"\"\n super().__init__()\n\n self.deterministic = deterministic\n torch.backends.cudnn.deterministic = self.deterministic\n if self.deterministic:\n # fixing non-deterministic part of horovod\n # https://github.com/PyTorchLightning/pytorch-lightning/pull/1572/files#r420279383\n os.environ[\"HOROVOD_FUSION_THRESHOLD\"] = str(0)\n\n # Init callbacks\n self.callbacks = callbacks or []\n self.on_init_start()\n\n # benchmarking\n self.benchmark = benchmark\n torch.backends.cudnn.benchmark = self.benchmark\n\n # Transfer params\n self.num_nodes = num_nodes\n # Backward compatibility, TODO: remove in v0.8.0\n if nb_gpu_nodes is not None:\n rank_zero_warn(\"Argument `nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.num_gpu_nodes = nb_gpu_nodes\n self.log_gpu_memory = log_gpu_memory\n\n self.gradient_clip_val = gradient_clip_val\n # Backward compatibility, TODO: remove in v0.8.0\n if gradient_clip is not None:\n rank_zero_warn(\"Argument `gradient_clip` has renamed to `gradient_clip_val` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.gradient_clip = gradient_clip\n\n self.check_val_every_n_epoch = check_val_every_n_epoch\n\n if not isinstance(track_grad_norm, (int, float)) and track_grad_norm != 'inf':\n raise MisconfigurationException(\n \"track_grad_norm can be an int, a float or 'inf' (infinity norm).\")\n self.track_grad_norm = float(track_grad_norm)\n\n self.on_gpu = True if (gpus and torch.cuda.is_available()) else False\n\n # tpu config\n if num_tpu_cores is not None:\n rank_zero_warn(\"Argument `num_tpu_cores` is now set by `tpu_cores` since v0.7.6\"\n \" and this argument will be removed in v0.9.0\", DeprecationWarning)\n\n if tpu_cores is None:\n tpu_cores = num_tpu_cores\n self.on_tpu = tpu_cores is not None\n self.tpu_cores = tpu_cores\n assert self.tpu_cores in (1, 8, None) or (\n isinstance(self.tpu_cores, (list, tuple, set)) and len(self.tpu_cores) == 1\n ), '`tpu_cores` can only be 1, 8 or [<1-8>]'\n\n self.tpu_id = tpu_cores[0] if isinstance(tpu_cores, list) else None\n\n if num_processes != 1 and distributed_backend != \"ddp_cpu\":\n rank_zero_warn(\"num_processes is only used for distributed_backend=\\\"ddp_cpu\\\". Ignoring it.\")\n self.num_processes = num_processes\n\n self.weights_summary = weights_summary\n\n self.max_epochs = max_epochs\n # Backward compatibility, TODO: remove in v0.8.0\n if max_nb_epochs is not None:\n rank_zero_warn(\"Argument `max_nb_epochs` has renamed to `max_epochs` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.max_nb_epochs = max_nb_epochs\n\n self.min_epochs = min_epochs\n # Backward compatibility, TODO: remove in v0.8.0\n if min_nb_epochs is not None:\n rank_zero_warn(\"Argument `min_nb_epochs` has renamed to `min_epochs` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.min_nb_epochs = min_nb_epochs\n\n self.max_steps = max_steps\n self.min_steps = min_steps\n\n self.num_sanity_val_steps = num_sanity_val_steps\n # Backward compatibility, TODO: remove in v0.8.0\n if nb_sanity_val_steps is not None:\n rank_zero_warn(\"Argument `nb_sanity_val_steps` has renamed to \"\n \"`num_sanity_val_steps` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n self.nb_sanity_val_steps = nb_sanity_val_steps\n\n # Backward compatibility, TODO: remove in v0.9.0\n if print_nan_grads:\n rank_zero_warn(\"Argument `print_nan_grads` has no effect and will be removed in v0.9.0.\"\n \" NaN grads will be printed automatically when detected.\", DeprecationWarning)\n\n self.reload_dataloaders_every_epoch = reload_dataloaders_every_epoch\n\n self.auto_lr_find = auto_lr_find\n self.auto_scale_batch_size = auto_scale_batch_size\n self._is_data_prepared = False\n self.replace_sampler_ddp = replace_sampler_ddp\n\n self.truncated_bptt_steps = truncated_bptt_steps\n self.resume_from_checkpoint = resume_from_checkpoint\n self.terminate_on_nan = terminate_on_nan\n self.shown_warnings = set()\n\n self.fast_dev_run = fast_dev_run\n if self.fast_dev_run:\n self.num_sanity_val_steps = 0\n self.max_epochs = 1\n log.info('Running in fast_dev_run mode: will run a full train,'\n ' val and test loop using a single batch')\n\n # set default save path if user didn't provide one\n self.default_root_dir = default_root_dir\n\n # Backward compatibility, TODO: remove in v0.8.0\n if default_save_path is not None:\n self.default_root_dir = default_save_path\n\n if self.default_root_dir is None:\n self.default_root_dir = os.getcwd()\n\n # training bookeeping\n self.total_batch_idx = 0\n self.running_loss = TensorRunningAccum(window_length=20)\n self.batch_idx = 0\n self.progress_bar_metrics = {}\n self.callback_metrics = {}\n self.num_val_batches = 0\n self.num_training_batches = 0\n self.num_test_batches = 0\n self.train_dataloader = None\n self.test_dataloaders = None\n self.val_dataloaders = None\n\n # training state\n self.model = None\n self.testing = False\n self.disable_validation = False\n self.lr_schedulers = []\n self.optimizers = None\n self.optimizer_frequencies = []\n self.global_step = 0\n self.current_epoch = 0\n self.interrupted = False\n\n # configure logger\n self.configure_logger(logger)\n\n # configure profiler\n if profiler is True:\n profiler = SimpleProfiler()\n self.profiler = profiler or PassThroughProfiler()\n\n # configure early stop callback\n # creates a default one if none passed in\n self.configure_early_stopping(early_stop_callback)\n\n # configure checkpoint callback\n self.checkpoint_callback = checkpoint_callback\n self.weights_save_path = weights_save_path\n\n # accumulated grads\n self.accumulate_grad_batches = accumulate_grad_batches\n self.configure_accumulated_gradients(accumulate_grad_batches)\n\n # for gpus allow int, string and gpu list\n if auto_select_gpus and isinstance(gpus, int):\n self.gpus = pick_multiple_gpus(gpus)\n else:\n self.gpus = gpus\n\n self.data_parallel_device_ids = parse_gpu_ids(self.gpus)\n self.root_gpu = determine_root_gpu_device(self.data_parallel_device_ids)\n self.root_device = torch.device(\"cpu\")\n\n # tpu state flags\n self.use_tpu = False\n self.tpu_local_core_rank = None\n self.tpu_global_core_rank = None\n\n # distributed backend choice\n self.distributed_backend = distributed_backend\n self.set_distributed_mode(distributed_backend)\n\n # override dist backend when using tpus\n if self.on_tpu:\n self.init_tpu()\n\n # init flags for SLURM+ddp to work\n self.proc_rank = 0\n self.world_size = 1\n self.interactive_ddp_procs = []\n self.configure_slurm_ddp(self.num_nodes)\n self.node_rank = self.determine_ddp_node_rank()\n\n # nvidia setup\n self.set_nvidia_flags(self.is_slurm_managing_tasks, self.data_parallel_device_ids)\n\n # backward compatibility\n if show_progress_bar is not None:\n self.show_progress_bar = show_progress_bar\n\n self._progress_bar_callback = self.configure_progress_bar(progress_bar_refresh_rate, process_position)\n\n # logging\n self.log_save_interval = log_save_interval\n self.val_check_interval = val_check_interval\n\n # backward compatibility\n if add_row_log_interval is not None:\n rank_zero_warn(\"`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0\"\n \" and this method will be removed in v0.8.0\", DeprecationWarning)\n if not row_log_interval: # in case you did not set the proper value\n row_log_interval = add_row_log_interval\n self.row_log_interval = row_log_interval\n\n # how much of the data to use\n self.overfit_pct = overfit_pct\n self.determine_data_use_amount(train_percent_check, val_percent_check,\n test_percent_check, overfit_pct)\n\n # AMP init\n # These are the only lines needed after v0.8.0\n # we wrap the user's forward with autocast and give it back at the end of fit\n self.autocast_original_forward = None\n self.use_native_amp = hasattr(torch.cuda, \"amp\") and hasattr(torch.cuda.amp, \"autocast\")\n self.precision = precision\n self.scaler = None\n\n # TODO: remove for v0.8.0\n self.amp_level = amp_level\n self.init_amp(use_amp)\n\n self.on_colab_kaggle = os.getenv('COLAB_GPU') or os.getenv('KAGGLE_URL_BASE')\n\n # Callback system\n self.on_init_end()\n\n @property\n def slurm_job_id(self) -> int:\n try:\n job_id = os.environ['SLURM_JOB_ID']\n job_id = int(job_id)\n\n # in interactive mode, don't make logs use the same job id\n in_slurm_interactive_mode = os.environ['SLURM_JOB_NAME'] == 'bash'\n if in_slurm_interactive_mode:\n job_id = None\n\n except Exception:\n job_id = None\n return job_id\n\n @classmethod\n def default_attributes(cls):\n init_signature = inspect.signature(Trainer)\n\n args = {}\n for param_name in init_signature.parameters:\n value = init_signature.parameters[param_name].default\n args[param_name] = value\n\n return args\n\n @classmethod\n def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n r\"\"\"Scans the Trainer signature and returns argument names, types and default values.\n\n Returns:\n List with tuples of 3 values:\n (argument name, set with argument types, argument default value).\n\n Examples:\n >>> args = Trainer.get_init_arguments_and_types()\n >>> import pprint\n >>> pprint.pprint(sorted(args)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [('accumulate_grad_batches',\n (<class 'int'>, typing.Dict[int, int], typing.List[list]),\n 1),\n ...\n ('callbacks',\n (typing.List[pytorch_lightning.callbacks.base.Callback],\n <class 'NoneType'>),\n None),\n ('check_val_every_n_epoch', (<class 'int'>,), 1),\n ...\n ('max_epochs', (<class 'int'>,), 1000),\n ...\n ('precision', (<class 'int'>,), 32),\n ('print_nan_grads', (<class 'bool'>,), False),\n ('process_position', (<class 'int'>,), 0),\n ('profiler',\n (<class 'pytorch_lightning.profiler.profilers.BaseProfiler'>,\n <class 'bool'>,\n <class 'NoneType'>),\n None),\n ...\n \"\"\"\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default\n\n @classmethod\n def get_deprecated_arg_names(cls) -> List:\n \"\"\"Returns a list with deprecated Trainer arguments.\"\"\"\n depr_arg_names = []\n for name, val in cls.__dict__.items():\n if name.startswith('DEPRECATED') and isinstance(val, (tuple, list)):\n depr_arg_names.extend(val)\n return depr_arg_names\n\n @classmethod\n def add_argparse_args(cls, parent_parser: ArgumentParser) -> ArgumentParser:\n r\"\"\"Extends existing argparse by default `Trainer` attributes.\n\n Args:\n parent_parser:\n The custom cli arguments parser, which will be extended by\n the Trainer default arguments.\n\n Only arguments of the allowed types (str, float, int, bool) will\n extend the `parent_parser`.\n\n Examples:\n >>> import argparse\n >>> import pprint\n >>> parser = argparse.ArgumentParser()\n >>> parser = Trainer.add_argparse_args(parser)\n >>> args = parser.parse_args([])\n >>> pprint.pprint(vars(args)) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n {...\n 'check_val_every_n_epoch': 1,\n 'checkpoint_callback': True,\n 'default_root_dir': None,\n 'deterministic': False,\n 'distributed_backend': None,\n 'early_stop_callback': False,\n ...\n 'logger': True,\n 'max_epochs': 1000,\n 'max_steps': None,\n 'min_epochs': 1,\n 'min_steps': None,\n ...\n 'profiler': None,\n 'progress_bar_refresh_rate': 1,\n ...}\n\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser], add_help=False, )\n\n blacklist = ['kwargs']\n depr_arg_names = cls.get_deprecated_arg_names() + blacklist\n\n allowed_types = (str, float, int, bool)\n\n # TODO: get \"help\" from docstring :)\n for arg, arg_types, arg_default in (at for at in cls.get_init_arguments_and_types()\n if at[0] not in depr_arg_names):\n arg_types = [at for at in allowed_types if at in arg_types]\n if not arg_types:\n # skip argument with not supported type\n continue\n arg_kwargs = {}\n if bool in arg_types:\n arg_kwargs.update(nargs=\"?\")\n # if the only arg type is bool\n if len(arg_types) == 1:\n # redefine the type for ArgParser needed\n def use_type(x):\n return bool(parsing.str_to_bool(x))\n else:\n # filter out the bool as we need to use more general\n use_type = [at for at in arg_types if at is not bool][0]\n else:\n use_type = arg_types[0]\n\n if arg == 'gpus':\n use_type = Trainer._allowed_type\n arg_default = Trainer._arg_default\n\n parser.add_argument(\n f'--{arg}',\n dest=arg,\n default=arg_default,\n type=use_type,\n help='autogenerated by pl.Trainer',\n **arg_kwargs,\n )\n\n return parser\n\n def _allowed_type(x) -> Union[int, str]:\n if ',' in x:\n return str(x)\n else:\n return int(x)\n\n def _arg_default(x) -> Union[int, str]:\n if ',' in x:\n return str(x)\n else:\n return int(x)\n\n @staticmethod\n def parse_argparser(arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:\n \"\"\"Parse CLI arguments, required for custom bool types.\"\"\"\n args = arg_parser.parse_args() if isinstance(arg_parser, ArgumentParser) else arg_parser\n args = {k: True if v is None else v for k, v in vars(args).items()}\n return Namespace(**args)\n\n @classmethod\n def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs) -> 'Trainer':\n \"\"\"\n Create an instance from CLI arguments.\n\n Args:\n args: The parser or namespace to take arguments from. Only known arguments will be\n parsed and passed to the :class:`Trainer`.\n **kwargs: Additional keyword arguments that may override ones in the parser or namespace.\n These must be valid Trainer arguments.\n\n Example:\n >>> parser = ArgumentParser(add_help=False)\n >>> parser = Trainer.add_argparse_args(parser)\n >>> parser.add_argument('--my_custom_arg', default='something') # doctest: +SKIP\n >>> args = Trainer.parse_argparser(parser.parse_args(\"\"))\n >>> trainer = Trainer.from_argparse_args(args, logger=False)\n \"\"\"\n if isinstance(args, ArgumentParser):\n args = cls.parse_argparser(args)\n params = vars(args)\n\n # we only want to pass in valid Trainer args, the rest may be user specific\n valid_kwargs = inspect.signature(cls.__init__).parameters\n trainer_kwargs = dict((name, params[name]) for name in valid_kwargs if name in params)\n trainer_kwargs.update(**kwargs)\n\n return cls(**trainer_kwargs)\n\n @property\n def num_gpus(self) -> int:\n gpus = self.data_parallel_device_ids\n if gpus is None:\n return 0\n return len(gpus)\n\n @property\n def data_parallel(self) -> bool:\n return self.use_dp or self.use_ddp or self.use_ddp2\n\n @property\n def progress_bar_callback(self):\n return self._progress_bar_callback\n\n @property\n def progress_bar_dict(self) -> dict:\n \"\"\" Read-only for progress bar metrics. \"\"\"\n ref_model = self.model if not self.data_parallel else self.model.module\n return dict(**ref_model.get_progress_bar_dict(), **self.progress_bar_metrics)\n\n # -----------------------------\n # MODEL TRAINING\n # -----------------------------\n def fit(\n self,\n model: LightningModule,\n train_dataloader: Optional[DataLoader] = None,\n val_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None\n ):\n r\"\"\"\n Runs the full optimization routine.\n\n Args:\n model: Model to fit.\n\n train_dataloader: A Pytorch\n DataLoader with training samples. If the model has\n a predefined train_dataloader method this will be skipped.\n\n val_dataloaders: Either a single\n Pytorch Dataloader or a list of them, specifying validation samples.\n If the model has a predefined val_dataloaders method this will be skipped\n\n Example::\n\n # Option 1,\n # Define the train_dataloader() and val_dataloader() fxs\n # in the lightningModule\n # RECOMMENDED FOR MOST RESEARCH AND APPLICATIONS TO MAINTAIN READABILITY\n trainer = Trainer()\n model = LightningModule()\n trainer.fit(model)\n\n # Option 2\n # in production cases we might want to pass different datasets to the same model\n # Recommended for PRODUCTION SYSTEMS\n train, val = DataLoader(...), DataLoader(...)\n trainer = Trainer()\n model = LightningModule()\n trainer.fit(model, train_dataloader=train, val_dataloaders=val)\n\n # Option 1 & 2 can be mixed, for example the training set can be\n # defined as part of the model, and validation can then be feed to .fit()\n\n \"\"\"\n # bind logger and other properties\n model.logger = self.logger\n self.copy_trainer_model_properties(model)\n\n # clean hparams\n if hasattr(model, 'hparams'):\n parsing.clean_namespace(model.hparams)\n\n # set up the passed in dataloaders (if needed)\n self.__attach_dataloaders(model, train_dataloader, val_dataloaders)\n\n # check that model is configured correctly\n self.check_model_configuration(model)\n\n # download the data and do whatever transforms we need\n # do before any spawn calls so that the model can assign properties\n # only on proc 0 because no spawn has happened yet\n if not self._is_data_prepared:\n model.prepare_data()\n self._is_data_prepared = True\n\n # Run auto batch size scaling\n if self.auto_scale_batch_size:\n if isinstance(self.auto_scale_batch_size, bool):\n self.auto_scale_batch_size = 'power'\n self.scale_batch_size(model, mode=self.auto_scale_batch_size)\n model.logger = self.logger # reset logger binding\n\n # Run learning rate finder:\n if self.auto_lr_find:\n self._run_lr_finder_internally(model)\n model.logger = self.logger # reset logger binding\n\n # route to appropriate start method\n # when using multi-node or DDP within a node start each module in a separate process\n if self.use_ddp2:\n if self.is_slurm_managing_tasks:\n task = int(os.environ['SLURM_LOCALID'])\n\n # torchelastic or general non_slurm ddp2\n elif 'WORLD_SIZE' in os.environ and ('GROUP_RANK' in os.environ or 'NODE_RANK' in os.environ):\n task = int(os.environ['LOCAL_RANK'])\n self.ddp_train(task, model)\n elif self.use_ddp:\n if self.is_slurm_managing_tasks:\n task = int(os.environ['SLURM_LOCALID'])\n self.ddp_train(task, model)\n\n # torchelastic or general non_slurm ddp\n elif 'WORLD_SIZE' in os.environ and ('GROUP_RANK' in os.environ or 'NODE_RANK' in os.environ):\n task = int(os.environ['LOCAL_RANK'])\n self.ddp_train(task, model)\n\n elif self.distributed_backend == 'cpu_ddp':\n self.__set_random_port()\n self.model = model\n mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model,))\n\n elif self.distributed_backend == 'ddp_spawn':\n model.share_memory()\n\n # spin up peers\n mp.spawn(self.ddp_train, nprocs=self.num_processes, args=(model, ))\n\n elif self.distributed_backend == 'ddp':\n self.spawn_ddp_children(model)\n\n # 1 gpu or dp option triggers training using DP module\n # easier to avoid NCCL issues\n elif self.use_dp:\n self.dp_train(model)\n\n elif self.use_horovod:\n self.horovod_train(model)\n\n elif self.single_gpu:\n self.single_gpu_train(model)\n\n elif self.use_tpu: # pragma: no-cover\n log.info(f'training on {self.tpu_cores} TPU cores')\n\n # COLAB_GPU is an env var available by default in Colab environments.\n start_method = 'fork' if self.on_colab_kaggle else 'spawn'\n\n # track for predict\n self.model = model\n\n # train\n if self.tpu_id is not None:\n self.tpu_train(self.tpu_id, model)\n else:\n xmp.spawn(self.tpu_train, args=(model,), nprocs=self.tpu_cores, start_method=start_method)\n\n # load weights if not interrupted\n self.load_spawn_weights(model)\n self.model = model\n\n # ON CPU\n else:\n # run through amp wrapper\n if self.use_amp:\n raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option')\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n self.optimizers, self.lr_schedulers, self.optimizer_frequencies = self.init_optimizers(model)\n\n self.run_pretrain_routine(model)\n\n # return 1 when finished\n # used for testing or when we need to know that training succeeded\n return 1\n\n def __attach_dataloaders(self, model, train_dataloader=None, val_dataloaders=None, test_dataloaders=None):\n # when dataloader is passed via fit, patch the train_dataloader\n # functions to overwrite with these implementations\n if train_dataloader is not None:\n model.train_dataloader = _PatchDataLoader(train_dataloader)\n\n if val_dataloaders is not None:\n model.val_dataloader = _PatchDataLoader(val_dataloaders)\n\n if test_dataloaders is not None:\n model.test_dataloader = _PatchDataLoader(test_dataloaders)\n\n def run_pretrain_routine(self, model: LightningModule):\n \"\"\"Sanity check a few things before starting actual training.\n\n Args:\n model: The model to run sanity test on.\n \"\"\"\n ref_model = model\n if self.data_parallel:\n ref_model = model.module\n\n # give model convenience properties\n ref_model.trainer = self\n\n # set local properties on the model\n self.copy_trainer_model_properties(ref_model)\n\n # init amp. Must be done here instead of __init__ to allow ddp to work\n if self.use_native_amp and self.precision == 16:\n self.scaler = torch.cuda.amp.GradScaler()\n\n # log hyper-parameters\n if self.logger is not None:\n # save exp to get started\n self.logger.log_hyperparams(ref_model.hparams)\n\n self.logger.save()\n\n if self.use_ddp or self.use_ddp2:\n torch_distrib.barrier()\n\n # wait for all models to restore weights\n if self.on_tpu and XLA_AVAILABLE:\n # wait for all processes to catch up\n torch_xla.core.xla_model.rendezvous(\"pl.Trainer.run_pretrain_routine\")\n\n elif self.use_horovod:\n # wait for all processes to catch up\n hvd.join()\n\n # register auto-resubmit when on SLURM\n self.register_slurm_signal_handlers()\n\n # print model summary\n # TODO: remove self.testing condition because model.summarize() is wiping out the weights\n if self.proc_rank == 0 and self.weights_summary is not None and not self.testing:\n if self.weights_summary in ['full', 'top']:\n ref_model.summarize(mode=self.weights_summary)\n else:\n raise MisconfigurationException(\"weights_summary can be None, 'full' or 'top'\")\n\n # track model now.\n # if cluster resets state, the model will update with the saved weights\n self.model = model\n\n # set up checkpoint callback\n self.configure_checkpoint_callback()\n\n # restore training and model before hpc call\n self.restore_weights(model)\n\n # when testing requested only run test and return\n if self.testing:\n # only load test dataloader for testing\n # self.reset_test_dataloader(ref_model)\n self.run_evaluation(test_mode=True)\n return\n\n # check if we should run validation during training\n self.disable_validation = not (self.is_overridden('validation_step') and self.val_percent_check > 0) \\\n and not self.fast_dev_run\n\n # run tiny validation (if validation defined)\n # to make sure program won't crash during val\n if not self.disable_validation and self.num_sanity_val_steps > 0:\n self.reset_val_dataloader(ref_model)\n\n # hook and callback\n ref_model.on_sanity_check_start()\n self.on_sanity_check_start()\n\n eval_results = self._evaluate(model,\n self.val_dataloaders,\n self.num_sanity_val_steps,\n False)\n _, _, _, callback_metrics, _ = self.process_output(eval_results)\n\n self.on_sanity_check_end()\n\n # verify that early stop has conditioned on a metric that exists\n if self.enable_early_stop:\n self.early_stop_callback._validate_condition_metric(callback_metrics)\n\n # clear cache before training\n if self.on_gpu and self.root_gpu is not None:\n # use context because of:\n # https://discuss.pytorch.org/t/out-of-memory-when-i-use-torch-cuda-empty-cache/57898\n with torch.cuda.device(f'cuda:{self.root_gpu}'):\n torch.cuda.empty_cache()\n\n # CORE TRAINING LOOP\n self.train()\n\n def test(\n self,\n model: Optional[LightningModule] = None,\n test_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None\n ):\n r\"\"\"\n\n Separates from fit to make sure you never run on your test set until you want to.\n\n Args:\n model: The model to test.\n\n test_dataloaders: Either a single\n Pytorch Dataloader or a list of them, specifying validation samples.\n\n Example::\n\n # Option 1\n # run test after fitting\n test = DataLoader(...)\n trainer = Trainer()\n model = LightningModule()\n\n trainer.fit(model)\n trainer.test(test_dataloaders=test)\n\n # Option 2\n # run test from a loaded model\n test = DataLoader(...)\n model = LightningModule.load_from_checkpoint('path/to/checkpoint.ckpt')\n trainer = Trainer()\n trainer.test(model, test_dataloaders=test)\n \"\"\"\n\n self.testing = True\n\n if test_dataloaders is not None:\n if model:\n self.__attach_dataloaders(model, test_dataloaders=test_dataloaders)\n else:\n self.__attach_dataloaders(self.model, test_dataloaders=test_dataloaders)\n\n if model is not None:\n self.model = model\n self.fit(model)\n\n # on tpu, .spawn means we don't have a trained model\n # TODO: remove TPU spawn\n elif self.use_tpu: # pragma: no-cover\n # attempt to load weights from a spawn\n path = os.path.join(self.default_root_dir, '__temp_weight_ddp_end.ckpt')\n test_model = self.model\n if os.path.exists(path):\n test_model = self.load_spawn_weights(self.model)\n\n self.fit(test_model)\n else:\n self.run_evaluation(test_mode=True)\n\n self.testing = False\n\n def check_model_configuration(self, model: LightningModule):\n r\"\"\"\n Checks that the model is configured correctly before training or testing is started.\n\n Args:\n model: The model to check the configuration.\n\n \"\"\"\n # Check training_step, train_dataloader, configure_optimizer methods\n if not self.testing:\n if not self.is_overridden('training_step', model):\n raise MisconfigurationException(\n 'No `training_step()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n if not self.is_overridden('train_dataloader', model):\n raise MisconfigurationException(\n 'No `train_dataloader()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n if not self.is_overridden('configure_optimizers', model):\n raise MisconfigurationException(\n 'No `configure_optimizers()` method defined. Lightning `Trainer` expects as minimum a'\n ' `training_step()`, `training_dataloader()` and `configure_optimizers()` to be defined.')\n\n # Check val_dataloader, validation_step and validation_epoch_end\n if self.is_overridden('val_dataloader', model):\n if not self.is_overridden('validation_step', model):\n raise MisconfigurationException('You have passed in a `val_dataloader()`'\n ' but have not defined `validation_step()`.')\n else:\n if not self.is_overridden('validation_epoch_end', model):\n rank_zero_warn(\n 'You have defined a `val_dataloader()` and have defined a `validation_step()`,'\n ' you may also want to define `validation_epoch_end()` for accumulating stats.',\n RuntimeWarning\n )\n else:\n if self.is_overridden('validation_step', model):\n raise MisconfigurationException('You have defined `validation_step()`,'\n ' but have not passed in a `val_dataloader()`.')\n\n # Check test_dataloader, test_step and test_epoch_end\n if self.is_overridden('test_dataloader', model):\n if not self.is_overridden('test_step', model):\n raise MisconfigurationException('You have passed in a `test_dataloader()`'\n ' but have not defined `test_step()`.')\n else:\n if not self.is_overridden('test_epoch_end', model):\n rank_zero_warn(\n 'You have defined a `test_dataloader()` and have defined a `test_step()`, you may also want to'\n ' define `test_epoch_end()` for accumulating stats.', RuntimeWarning\n )\n else:\n if self.testing and self.is_overridden('test_step', model):\n raise MisconfigurationException('You have defined `test_step()` but did not'\n ' implement `test_dataloader` nor passed in `.test(test_dataloader)`.')\n\n\nclass _PatchDataLoader(object):\n r\"\"\"\n Callable object for patching dataloaders passed into trainer.fit().\n Use this class to override model.*_dataloader() and be pickle-compatible.\n\n Args:\n dataloader: Dataloader object to return when called.\n\n \"\"\"\n\n def __init__(self, dataloader: Union[List[DataLoader], DataLoader]):\n self.dataloader = dataloader\n\n # cannot pickle __code__ so cannot verify if PatchDataloader\n # exists which shows dataloader methods have been overwritten.\n # so, we hack it by using the string representation\n self.patch_loader_code = str(self.__call__.__code__)\n\n def __call__(self) -> Union[List[DataLoader], DataLoader]:\n return self.dataloader\n" ]
[ [ "torch.multiprocessing.spawn", "torch.cuda.empty_cache", "torch.distributed.barrier", "torch.cuda.amp.GradScaler", "torch.cuda.is_available", "torch.device", "torch.cuda.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sebastian-sosa/nlpaug
[ "db8f0412a09ca0decb3eb6f8d1deeb4f03e6968e" ]
[ "nlpaug/model/lang_models/xlnet.py" ]
[ "import logging\n\ntry:\n import torch\n from transformers import AutoModelForCausalLM, AutoTokenizer\nexcept ImportError:\n # No installation required if not using this function\n pass\n\nfrom nlpaug.model.lang_models import LanguageModels\nfrom nlpaug.util.selection.filtering import *\n\n\n# TODO: no optimize process yet\nclass XlNet(LanguageModels):\n # https://arxiv.org/abs/1906.08237\n\n # Since XLNet is not good on short inputs, Aman Rusia proposed to add padding text to overcome this limitation.\n # https://github.com/rusiaaman/XLNet-gen#methodology and https://github.com/huggingface/pytorch-transformers/issues/846\n PADDING_TEXT = \"\"\"\n The quick brown fox jumps over the lazy dog. A horrible, messy split second presents\n itself to the heart-shaped version as Scott is moved. The upcoming movie benefits at \n the mental cost of ages 14 to 12. Nothing substantial is happened for almost 48 days. \n When that happens, we lose our heart. <eod>\n \"\"\"\n\n MASK_TOKEN = '<mask>'\n PAD_TOKEN = '<pad>'\n UNKNOWN_TOKEN = '<unk>' \n SUBWORD_PREFIX = '▁'\n NEW_PARAGRAPH_TOKEN = '<eop>'\n MASK_TOKEN_ID = 6\n\n def __init__(self, model_path='xlnet-base-cased', temperature=1.0, top_k=None, top_p=None, padding_text=None,\n optimize=None, device=None, silence=True):\n super().__init__(device, temperature=temperature, top_k=top_k, top_p=top_p, optimize=optimize, silence=True)\n try:\n from transformers import AutoModelForCausalLM, AutoTokenizer\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Missed transformers library. Install transfomers by `pip install transformers`')\n \n self.model_path = model_path\n\n # TODO: Evaluted to use mems in XLNet but the result is quite weird.\n self.optimize['external_memory'] = 0\n self.tokenizer = AutoTokenizer.from_pretrained(model_path)\n self.mask_id = self.token2id(self.MASK_TOKEN)\n self.pad_id = self.token2id(self.PAD_TOKEN)\n config = {\n 'mem_len': self.optimize['external_memory']\n }\n if silence:\n # Transformers thrown an warning regrading to weight initialization. It is expected\n orig_log_level = logging.getLogger('transformers.' + 'modeling_utils').getEffectiveLevel()\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(logging.ERROR)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, config=config)\n logging.getLogger('transformers.' + 'modeling_utils').setLevel(orig_log_level)\n else:\n self.model = AutoModelForCausalLM.from_pretrained(model_path, config=config)\n\n self.padding_text_idxes = self.tokenizer.encode(padding_text or self.PADDING_TEXT)\n\n self.model.to(self.device)\n self.model.eval()\n\n def get_max_num_token(self):\n return 500\n\n def token2id(self, token):\n return self.tokenizer._convert_token_to_id(token)\n\n def id2token(self, _id):\n return self.tokenizer._convert_id_to_token(_id)\n\n def clean(self, text):\n return text.replace(self.NEW_PARAGRAPH_TOKEN, '').strip()\n\n def predict(self, texts, target_words=None, n=1, external_memory=None, \n include_punctuation=False):\n # Prepare inputs\n input_idxes = [self.tokenizer.encode(text) for text in texts]\n if target_words is None:\n target_words = [None] * len(input_idxes)\n # target_words = [t.replace(self.SUBWORD_PREFIX, '') for t in target_words if t]\n\n # Pad token\n max_token_size = max([len(t) for t in input_idxes])\n for i, token_input in enumerate(input_idxes):\n for _ in range(max_token_size - len(token_input)):\n input_idxes[i].append(self.pad_id)\n\n target_poses = []\n if external_memory is None: # First step or does not enable optimization\n for i, tokens in enumerate(input_idxes):\n target_poses.append(len(self.padding_text_idxes) + tokens.index(self.mask_id))\n input_idxes[i] = self.padding_text_idxes + tokens\n else:\n for i, tokens in enumerate(input_idxes):\n target_poses.append(tokens.index(self.mask_id))\n\n perm_masks = torch.zeros((len(input_idxes), len(input_idxes[0]), len(input_idxes[0])), dtype=torch.float)\n target_mappings = torch.zeros((len(input_idxes), 1, len(input_idxes[0])), dtype=torch.float)\n for i, target_pos in enumerate(target_poses):\n perm_masks[i][:, target_pos] = 1.0 # Mask the target word\n target_mappings[i, 0, target_pos] = 1.0\n\n # Convert to feature\n input_idxes = torch.tensor(input_idxes).to(self.device)\n perm_masks = perm_masks.to(self.device)\n target_mappings = target_mappings.to(self.device)\n\n # Prediction\n results = []\n with torch.no_grad():\n outputs = self.model(input_ids=input_idxes, perm_mask=perm_masks, target_mapping=target_mappings,\n mems=external_memory)\n\n # Selection\n for output, target_token in zip(outputs[0], target_words):\n target_token_logits = output[0]\n\n seed = {'temperature': self.temperature, 'top_k': self.top_k, 'top_p': self.top_p}\n target_token_logits = self.control_randomness(target_token_logits, seed)\n target_token_logits, target_token_idxes = self.filtering(target_token_logits, seed)\n if len(target_token_idxes) != 0:\n new_tokens = self.pick(target_token_logits, target_token_idxes, target_word=target_token, \n n=10, include_punctuation=include_punctuation)\n results.append([t[0] for t in new_tokens])\n else:\n results.append([''])\n\n return results\n" ]
[ [ "torch.no_grad", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
eshanking/seascapes_figures
[ "a9afc339a3565a5c71a05cd44d790eb839a79262" ]
[ "figure_code/seascape_v_landscape_fig.py" ]
[ "from seascapes_figures.utils import plotter, results_manager\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\n\ndef unpack(sim_path):\n\n data_dict = pickle.load(open(sim_path,'rb'))\n counts = data_dict['counts']\n drug_conc = data_dict['drug_curve']\n\n return counts, drug_conc\n\nsuffix = '05252022_0000'\n# suffix = '04262022_0001'\nexp_folder,exp_info = results_manager.get_experiment_results(suffix=suffix)\n# fitness axes\nfig,ax = plt.subplots(nrows=2,ncols=2,figsize=(7,5))\nlinewidth = 2\n\nlabelsize=8\n\np = exp_info.p_landscape\n\nf,ax[0,0] = p.plot_fitness_curves(pop=exp_info.p_landscape,\n ax=ax[0,0],\n show_legend=False,\n show_axes_labels=False,\n labelsize=labelsize,\n linewidth=linewidth)\n\nax[0,0].set_xticks([10**-3,10**-1,10**1,10**3,10**5])\nax[0,0].xaxis.tick_top()\n\nf,ax[0,1] = p.plot_fitness_curves(pop=exp_info.p_seascape,\n ax=ax[0,1], \n show_legend=False,\n show_axes_labels=False,\n labelsize=labelsize,\n linewidth=linewidth)\n\nax[0,1].set_xticks([10**-3,10**-1,10**1,10**3,10**5])\nax[0,1].xaxis.tick_top()\n\n# timecourse axes\nlandscape_exp = exp_folder[0]\n\nsim = os.listdir(path=landscape_exp)\nsim = sim[0]\nsim = landscape_exp + os.sep + sim\ncounts, dc = unpack(sim)\n\ndrug_kwargs = {'color':'black',\n 'alpha':0.5,\n 'linestyle':'--'}\n\nax[1,0],drug_ax = p.plot_timecourse_to_axes(counts,\n ax[1,0],\n labelsize=labelsize,\n linewidth=linewidth,\n drug_curve=dc,\n # drug_curve_linestyle='--',\n drug_curve_label='',\n drug_kwargs=drug_kwargs)\n\ndrug_ax.set_ylim([10**-5,10**7])\ndrug_ax.set_yticks([10**-3,10**1,10**5])\n\nseascape_exp = exp_folder[1]\n\nsim = os.listdir(path=seascape_exp)\nsim = sim[0]\nsim = seascape_exp + os.sep + sim\ncounts, dc = unpack(sim)\n\nax[1,1],drug_ax = p.plot_timecourse_to_axes(counts,\n ax[1,1],\n labelsize=labelsize,\n linewidth=linewidth,\n # drug_curve_linestyle='--',\n drug_curve=dc,\n drug_kwargs=drug_kwargs)\n\ndrug_ax.set_ylim([10**-5,10**7])\ndrug_ax.set_yticks([10**-3,10**1,10**5])\n\n# landscape axes\n\nnull_ax = ax[0,0]\nconc = [exp_info.first_dose,exp_info.second_dose,exp_info.third_dose]\ncmap = 'Blues'\nedgecolor='black'\ntextcolor='goldenrod'\n# pad = -0.35\npad=0.9\n\nyl = null_ax.get_ylim()\nydata = np.arange(yl[0],yl[1],0.1)\n\nfor c in conc:\n p.add_landscape_to_fitness_curve(c,null_ax,exp_info.p_landscape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n square=True,\n node_size = 200,\n colorbar=False,\n pad=pad)\n \nsea_ax = ax[0,1]\n\nfor i in range(len(conc)-1):\n c = conc[i]\n p.add_landscape_to_fitness_curve(c,sea_ax,exp_info.p_seascape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n square=True,\n node_size = 200,\n colorbar=False,\n pad=pad)\n\nc = conc[-1]\n# cbax = fig.add_subplot()\nl1 = p.add_landscape_to_fitness_curve(c,sea_ax,exp_info.p_seascape,\n textcolor=textcolor,\n cmap=cmap,\n edgecolor=edgecolor,\n linewidths=0.5,\n textsize=9,\n position='bottom',\n vert_lines_ydata=ydata,\n square=True,\n node_size = 200,\n colorbar=True,\n cbloc = [0.1,0.35,0.3,0.5],\n pad=pad)\n\n# reposition axes\n# w = 0.3\n# h = 0.27\nw = 0.26\nh = 0.22\n\n# wspace = (1-2*w)/3\nwspace = (1-2*w)/2.7\nhspace = (1-2*h)/2.7\n\nbottom = np.array([[1-hspace-h,1-hspace-h],[hspace,hspace]])\nleft = np.array([[wspace,1-wspace-w],[wspace,1-wspace-w]])\n\nfor a in ax[0,:]:\n # a.set_ylabel('Growth rate',fontsize=labelsize)\n a.set_xlabel('Drug concentration ($\\u03BC$M)',fontsize=labelsize)\n a.xaxis.set_label_position('top') \n \nfor a in ax[1,:]:\n # a.set_ylabel('Cell count',labelpad=0,fontsize=labelsize)\n a.set_xlabel('Days',fontsize=labelsize)\n \nax[1,0].set_ylabel('Cell count',labelpad=0,fontsize=labelsize)\nax[1,1].set_ylabel('',labelpad=0,fontsize=labelsize)\n\nax[0,0].set_ylabel('Growth rate ($hr^{-1}$)',fontsize=labelsize)\n \nax[1,1].legend(frameon=False,fontsize=7,\n bbox_to_anchor=(-0.75, -0.45, 1., .102), loc='lower left',\n ncol=4, mode=\"expand\", borderaxespad=0.)\n \nfor row in range(2):\n for col in range(2):\n a = ax[row,col]\n pos = [left[row,col],bottom[row,col],w,h]\n a.set_position(pos)\n \nax[0,0].annotate('a.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[1,0].annotate('c.', xy=(-0.15,1.5), xycoords='axes fraction')\nax[0,1].annotate('b.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[1,1].annotate('d.', xy=(-0.15,1.5), xycoords='axes fraction')\nax[1,1].annotate('f.', xy=(-0.15,1.05), xycoords='axes fraction')\nax[1,0].annotate('e.', xy=(-0.15,1.05), xycoords='axes fraction')\n \nresults_manager.save_fig(fig,'seascape_v_landscape.pdf',bbox_inches='tight')" ]
[ [ "numpy.arange", "numpy.array", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kenneym/py-feat
[ "59a25139ad52914d41ebf7fd63e25357c097b745" ]
[ "feat/facepose_detectors/pnp/pnp_model.py" ]
[ "import os\r\nimport cv2\r\nimport numpy as np\r\nfrom feat.utils import get_resource_path\r\nfrom feat.facepose_detectors.utils import convert_to_euler\r\nTHREED_FACE_MODEL = os.path.join(get_resource_path(), \"reference_3d_68_points_trans.npy\")\r\n\r\n\r\nclass PerspectiveNPointModel:\r\n \"\"\" Class that leverages 68 2D facial landmark points to estimate head pose using the Perspective-n-Point\r\n algorithm.\r\n\r\n Code adapted from https://github.com/yinguobing/head-pose-estimation/ and\r\n https://github.com/lincolnhard/head-pose-estimation/. Each code base licensed under MIT Licenses, which can be\r\n found here: https://github.com/yinguobing/head-pose-estimation/blob/master/LICENSE and here:\r\n https://github.com/lincolnhard/head-pose-estimation/blob/master/LICENSE\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\" Initializes the model, with a reference 3D model (xyz coordinates) of a standard face\"\"\"\r\n # self.model_points = get_full_model_points(os.path.join(get_resource_path(), \"3d_face_model.txt\"))\r\n self.model_points = np.load(THREED_FACE_MODEL, allow_pickle=True)\r\n\r\n def predict(self, img, landmarks):\r\n \"\"\" Determines headpose using passed 68 2D landmarks\r\n\r\n Args:\r\n img (np.ndarray) : The cv2 image from which the landmarks were produced\r\n landmarks (np.ndarray) : The landmarks to use to produce the headpose estimate\r\n\r\n Returns:\r\n np.ndarray: Euler angles ([pitch, roll, yaw])\r\n \"\"\"\r\n # Obtain camera intrinsics to solve PnP algorithm. These intrinsics represent defaults - users may modify this\r\n # code to pass their own camera matrix and distortion coefficients if they happen to have calibrated their\r\n # camera: https://learnopencv.com/camera-calibration-using-opencv/\r\n h, w = img.shape[:2]\r\n camera_matrix = np.array([[w + h, 0, w // 2],\r\n [0, w + h, h // 2],\r\n [0, 0, 1]], dtype='float32')\r\n dist_coeffs = np.zeros((4, 1), dtype='float32') # Assuming no lens distortion\r\n\r\n # Solve PnP using all 68 points:\r\n landmarks = landmarks.astype('float32')\r\n _, rotation_vector, translation_vector = cv2.solvePnP(self.model_points, landmarks, camera_matrix, dist_coeffs,\r\n flags=cv2.SOLVEPNP_EPNP)\r\n\r\n # Convert to Euler Angles\r\n euler_angles = convert_to_euler(np.squeeze(rotation_vector))\r\n\r\n # PnP may give values outside the range of (-90, 90), and sometimes misinterprets a face as facing\r\n # AWAY from the camera (since 2D landmarks do not convey whether face is facing towards or away from camera)\r\n # Thus, we adjust below to ensure the face is interpreted as front-facing\r\n euler_angles[euler_angles > 90] -= 180\r\n euler_angles[euler_angles < -90] += 180\r\n return euler_angles\r\n" ]
[ [ "numpy.load", "numpy.squeeze", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
angelolovatto/raylab
[ "ebaea8df1a391fb844e75df62ccf1e2e07311d88" ]
[ "raylab/policy/torch_policy.py" ]
[ "\"\"\"Base for all PyTorch policies.\"\"\"\nimport textwrap\nfrom typing import Dict, List, Optional, Set, Tuple, Type, Union\n\nimport torch\nfrom gym.spaces import Space\nfrom nnrl.utils import convert_to_tensor\nfrom ray.rllib import Policy, SampleBatch\nfrom ray.rllib.evaluation.episode import MultiAgentEpisode\nfrom ray.rllib.models.modelv2 import flatten, restore_original_dimensions\nfrom ray.rllib.policy.view_requirement import ViewRequirement\nfrom ray.rllib.utils import override\nfrom ray.rllib.utils.torch_ops import convert_to_non_torch_type, convert_to_torch_tensor\nfrom ray.rllib.utils.typing import ModelGradients, TensorType\nfrom ray.tune.logger import pretty_print\nfrom torch import Tensor, nn\n\nfrom raylab.options import RaylabOptions, configure, option\n\nfrom .action_dist import BaseActionDist\nfrom .compat import WrapRawModule\nfrom .modules import get_module\nfrom .optimizer_collection import OptimizerCollection\n\n\n@configure\n@option(\"env\", default=None)\n@option(\"env_config/\", allow_unknown_subkeys=True)\n@option(\"explore\", default=True)\n@option(\n \"exploration_config/\", allow_unknown_subkeys=True, override_all_if_type_changes=True\n)\n@option(\"framework\", default=\"torch\")\n@option(\"gamma\", default=0.99)\n@option(\"num_workers\", default=0)\n@option(\"seed\", default=None)\n@option(\"worker_index\", default=0)\n@option(\"normalize_actions\", default=True)\n@option(\"clip_actions\", default=False)\n@option(\n \"module/\",\n help=\"Type and config of the PyTorch NN module.\",\n allow_unknown_subkeys=True,\n override_all_if_type_changes=True,\n)\n@option(\n \"optimizer/\",\n help=\"Config dict for PyTorch optimizers.\",\n allow_unknown_subkeys=True,\n)\n@option(\"compile\", False, help=\"Whether to optimize the policy's backend\")\nclass TorchPolicy(Policy):\n \"\"\"A Policy that uses PyTorch as a backend.\n\n Attributes:\n observation_space: Space of possible observation inputs\n action_space: Space of possible action outputs\n config: Policy configuration\n dist_class: Action distribution class for computing actions. Must be set\n by subclasses before calling `__init__`.\n device: Device in which the parameter tensors reside. All input samples\n will be converted to tensors and moved to this device\n module: The policy's neural network module. Should be compilable to\n TorchScript\n optimizers: The optimizers bound to the neural network (or submodules)\n options: Configuration object for this class\n \"\"\"\n\n observation_space: Space\n action_space: Space\n dist_class: Type[BaseActionDist]\n config: dict\n global_config: dict\n device: torch.device\n model: WrapRawModule\n module: nn.Module\n optimizers: OptimizerCollection\n options: RaylabOptions = RaylabOptions()\n\n def __init__(self, observation_space: Space, action_space: Space, config: dict):\n # Allow subclasses to set `dist_class` before calling init\n action_dist: Optional[Type[BaseActionDist]] = getattr(self, \"dist_class\", None)\n super().__init__(observation_space, action_space, self._build_config(config))\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.module = self._make_module(observation_space, action_space, self.config)\n self.module.to(self.device)\n self.model = WrapRawModule(\n self.observation_space,\n self.action_space,\n self.module,\n num_outputs=1,\n model_config=self.config[\"module\"],\n )\n\n self.optimizers = self._make_optimizers()\n\n # === Policy attributes ===\n self.dist_class: Type[BaseActionDist] = action_dist\n self.dist_class.check_model_compat(self.module)\n self.framework = \"torch\" # Needed to create exploration\n self.exploration = self._create_exploration()\n\n # ==========================================================================\n # PublicAPI\n # ==========================================================================\n\n @property\n def pull_from_global(self) -> Set[str]:\n \"\"\"Keys to pull from global configuration.\n\n Configurations passed down from caller (usually by the trainer) that are\n not under the `policy` config.\n \"\"\"\n return {\n \"env\",\n \"env_config\",\n \"explore\",\n # \"exploration_config\",\n \"gamma\",\n \"num_workers\",\n \"seed\",\n \"worker_index\",\n \"normalize_actions\",\n \"clip_actions\",\n }\n\n def compile(self):\n \"\"\"Optimize modules with TorchScript.\n\n Warnings:\n This action cannot be undone.\n \"\"\"\n self.module = torch.jit.script(self.module)\n\n @torch.no_grad()\n @override(Policy)\n def compute_actions(\n self,\n obs_batch: Union[List[TensorType], TensorType],\n state_batches: Optional[List[TensorType]] = None,\n prev_action_batch: Union[List[TensorType], TensorType] = None,\n prev_reward_batch: Union[List[TensorType], TensorType] = None,\n info_batch: Optional[Dict[str, list]] = None,\n episodes: Optional[List[MultiAgentEpisode]] = None,\n explore: Optional[bool] = None,\n timestep: Optional[int] = None,\n **kwargs,\n ) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:\n # pylint:disable=too-many-arguments,too-many-locals\n explore = explore if explore is not None else self.config[\"explore\"]\n timestep = timestep if timestep is not None else self.global_timestep\n\n input_dict = self.lazy_tensor_dict(\n SampleBatch({SampleBatch.CUR_OBS: obs_batch, \"is_training\": False})\n )\n if prev_action_batch:\n input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch\n if prev_reward_batch:\n input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch\n state_batches = convert_to_torch_tensor(state_batches or [], device=self.device)\n\n # Call the exploration before_compute_actions hook.\n self.exploration.before_compute_actions(timestep=timestep)\n\n unpacked = unpack_observations(\n input_dict, self.observation_space, self.framework\n )\n state_out = state_batches\n\n # pylint:disable=not-callable\n action_dist = self.dist_class({\"obs\": unpacked[\"obs\"]}, self.model)\n # pylint:enable=not-callable\n actions, logp = self.exploration.get_exploration_action(\n action_distribution=action_dist, timestep=timestep, explore=explore\n )\n input_dict[SampleBatch.ACTIONS] = actions\n\n # Add default and custom fetches.\n extra_fetches = {}\n if logp is not None:\n extra_fetches[SampleBatch.ACTION_PROB] = logp.exp()\n extra_fetches[SampleBatch.ACTION_LOGP] = logp\n\n return convert_to_non_torch_type((actions, state_out, extra_fetches))\n\n def _get_default_view_requirements(self):\n # Add extra fetch keys to view requirements so that they're available\n # for training\n return {\n SampleBatch.ACTION_PROB: ViewRequirement(used_for_training=True),\n SampleBatch.ACTION_LOGP: ViewRequirement(used_for_training=True),\n **super()._get_default_view_requirements(),\n }\n\n @torch.no_grad()\n @override(Policy)\n def compute_log_likelihoods(\n self,\n actions: Union[List[TensorType], TensorType],\n obs_batch: Union[List[TensorType], TensorType],\n state_batches: Optional[List[TensorType]] = None,\n prev_action_batch: Optional[Union[List[TensorType], TensorType]] = None,\n prev_reward_batch: Optional[Union[List[TensorType], TensorType]] = None,\n actions_normalized: bool = True,\n ) -> TensorType:\n # pylint:disable=too-many-arguments\n input_dict = self.lazy_tensor_dict(\n SampleBatch({SampleBatch.CUR_OBS: obs_batch, SampleBatch.ACTIONS: actions})\n )\n if prev_action_batch:\n input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch\n if prev_reward_batch:\n input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch\n\n dist_inputs, _ = self.module(\n unpack_observations(input_dict, self.observation_space, self.framework),\n state_batches,\n self.convert_to_tensor([1]),\n )\n # pylint:disable=not-callable\n action_dist = self.dist_class(dist_inputs, self.module)\n # pylint:enable=not-callable\n log_likelihoods = action_dist.logp(input_dict[SampleBatch.ACTIONS])\n return log_likelihoods\n\n @override(Policy)\n def postprocess_trajectory(\n self, sample_batch, other_agent_batches=None, episode=None\n ):\n if not self.config[\"env_config\"].get(\"time_aware\", False):\n hit_limit = sample_batch[SampleBatch.INFOS][-1].get(\"TimeLimit.truncated\")\n env_done = sample_batch[SampleBatch.DONES][-1]\n sample_batch[SampleBatch.DONES][-1] = False if hit_limit else env_done\n return sample_batch\n\n @override(Policy)\n def get_weights(self) -> dict:\n return {\n \"module\": convert_to_non_torch_type(self.module.state_dict()),\n # Optimizer state dicts don't store tensors, only ids\n \"optimizers\": self.optimizers.state_dict(),\n }\n\n @override(Policy)\n def set_weights(self, weights: dict):\n self.module.load_state_dict(\n convert_to_torch_tensor(weights[\"module\"], device=self.device)\n )\n # Optimizer state dicts don't store tensors, only ids\n self.optimizers.load_state_dict(weights[\"optimizers\"])\n\n def convert_to_tensor(self, arr) -> Tensor:\n \"\"\"Convert an array to a PyTorch tensor in this policy's device.\n\n Args:\n arr (array_like): object which can be converted using `np.asarray`\n \"\"\"\n return convert_to_tensor(arr, self.device)\n\n def lazy_tensor_dict(self, sample_batch: SampleBatch) -> SampleBatch:\n \"\"\"Convert a sample batch into a dictionary of lazy tensors.\n\n The sample batch is wrapped with a UsageTrackingDict to convert array-\n likes into tensors upon querying.\n\n Args:\n sample_batch: the sample batch to convert\n\n Returns:\n A dictionary which intercepts key queries to lazily convert arrays\n to tensors.\n \"\"\"\n tensor_batch = sample_batch.copy(shallow=True)\n tensor_batch.set_get_interceptor(self.convert_to_tensor)\n return tensor_batch\n\n def __repr__(self):\n name = self.__class__.__name__\n args = [f\"{self.observation_space},\", f\"{self.action_space},\"]\n\n config = pretty_print(self.config).rstrip(\"\\n\")\n if \"\\n\" in config:\n config = textwrap.indent(config, \" \" * 2)\n config = \"{\\n\" + config + \"\\n}\"\n\n args += [config]\n args_repr = \"\\n\".join(args)\n args_repr = textwrap.indent(args_repr, \" \" * 2)\n constructor = f\"{name}(\\n{args_repr}\\n)\"\n else:\n args += [config]\n args_repr = \" \".join(args[1:-1])\n constructor = f\"{name}({args_repr})\"\n return constructor\n\n def apply_gradients(self, gradients: ModelGradients) -> None:\n pass\n\n def compute_gradients(\n self, postprocessed_batch: SampleBatch\n ) -> Tuple[ModelGradients, Dict[str, TensorType]]:\n pass\n\n # ==========================================================================\n # InternalAPI\n # ==========================================================================\n\n def _build_config(self, config: dict) -> dict:\n if not self.options.all_options_set:\n raise RuntimeError(\n f\"{type(self).__name__} still has configs to be set.\"\n \" Did you call `configure` as the last decorator?\"\n )\n\n passed = config.get(\"policy\", {}).copy()\n passed.update({k: config[k] for k in self.pull_from_global if k in config})\n new = self.options.merge_defaults_with(passed)\n return new\n\n @staticmethod\n def _make_module(obs_space: Space, action_space: Space, config: dict) -> nn.Module:\n \"\"\"Build the PyTorch nn.Module to be used by this policy.\n\n Args:\n obs_space: the observation space for this policy\n action_space: the action_space for this policy\n config: the user config containing the 'module' key\n\n Returns:\n A neural network module.\n \"\"\"\n return get_module(obs_space, action_space, config[\"module\"])\n\n def _make_optimizers(self) -> OptimizerCollection:\n \"\"\"Build PyTorch optimizers to use.\n\n The result will be set as the policy's optimizer collection.\n\n The user should update the optimizer collection (mutable mapping)\n returned by the base implementation.\n\n Returns:\n A mapping from names to optimizer instances\n \"\"\"\n # pylint:disable=no-self-use\n return OptimizerCollection()\n\n # ==========================================================================\n # Unimplemented Policy methods\n # ==========================================================================\n\n def export_model(self, export_dir):\n pass\n\n def export_checkpoint(self, export_dir):\n pass\n\n def import_model_from_h5(self, import_file):\n pass\n\n\ndef unpack_observations(input_dict, observation_space: Space, framework: str):\n \"\"\"Cast observations to original space and add a separate flattened view.\"\"\"\n restored = input_dict.copy()\n restored[\"obs\"] = restore_original_dimensions(\n input_dict[\"obs\"], observation_space, framework\n )\n if len(input_dict[\"obs\"].shape) > 2:\n restored[\"obs_flat\"] = flatten(input_dict[\"obs\"], framework)\n else:\n restored[\"obs_flat\"] = input_dict[\"obs\"]\n return restored\n" ]
[ [ "torch.jit.script", "torch.no_grad", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
frednam93/FDY-SED
[ "5824ffb1f7806315f85946c27a7ec62b3bd3b46c" ]
[ "utils/model.py" ]
[ "#Some codes are adopted from https://github.com/DCASE-REPO/DESED_task\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\n\nclass GLU(nn.Module):\n def __init__(self, in_dim):\n super(GLU, self).__init__()\n self.sigmoid = nn.Sigmoid()\n self.linear = nn.Linear(in_dim, in_dim)\n\n def forward(self, x): #x size = [batch, chan, freq, frame]\n lin = self.linear(x.permute(0, 2, 3, 1)) #x size = [batch, freq, frame, chan]\n lin = lin.permute(0, 3, 1, 2) #x size = [batch, chan, freq, frame]\n sig = self.sigmoid(x)\n res = lin * sig\n return res\n\n\nclass ContextGating(nn.Module):\n def __init__(self, in_dim):\n super(ContextGating, self).__init__()\n self.sigmoid = nn.Sigmoid()\n self.sigmoid = nn.Sigmoid()\n self.linear = nn.Linear(in_dim, in_dim)\n\n def forward(self, x): #x size = [batch, chan, freq, frame]\n lin = self.linear(x.permute(0, 2, 3, 1)) #x size = [batch, freq, frame, chan]\n lin = lin.permute(0, 3, 1, 2) #x size = [batch, chan, freq, frame]\n sig = self.sigmoid(lin)\n res = x * sig\n return res\n\n\nclass Dynamic_conv2d(nn.Module):\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, bias=False, n_basis_kernels=4,\n temperature=31, pool_dim='freq'):\n super(Dynamic_conv2d, self).__init__()\n\n self.in_planes = in_planes\n self.out_planes = out_planes\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.pool_dim = pool_dim\n\n self.n_basis_kernels = n_basis_kernels\n self.attention = attention2d(in_planes, self.kernel_size, self.stride, self.padding, n_basis_kernels,\n temperature, pool_dim)\n\n self.weight = nn.Parameter(torch.randn(n_basis_kernels, out_planes, in_planes, self.kernel_size, self.kernel_size),\n requires_grad=True)\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(n_basis_kernels, out_planes))\n else:\n self.bias = None\n\n for i in range(self.n_basis_kernels):\n nn.init.kaiming_normal_(self.weight[i])\n\n def forward(self, x): #x size : [bs, in_chan, frames, freqs]\n if self.pool_dim in ['freq', 'chan']:\n softmax_attention = self.attention(x).unsqueeze(2).unsqueeze(4) # size : [bs, n_ker, 1, frames, 1]\n elif self.pool_dim == 'time':\n softmax_attention = self.attention(x).unsqueeze(2).unsqueeze(3) # size : [bs, n_ker, 1, 1, freqs]\n elif self.pool_dim == 'both':\n softmax_attention = self.attention(x).unsqueeze(-1).unsqueeze(-1).unsqueeze(-1) # size : [bs, n_ker, 1, 1, 1]\n\n batch_size = x.size(0)\n\n aggregate_weight = self.weight.view(-1, self.in_planes, self.kernel_size, self.kernel_size) # size : [n_ker * out_chan, in_chan]\n\n if self.bias is not None:\n aggregate_bias = self.bias.view(-1)\n output = F.conv2d(x, weight=aggregate_weight, bias=aggregate_bias, stride=self.stride, padding=self.padding)\n else:\n output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding)\n # output size : [bs, n_ker * out_chan, frames, freqs]\n\n output = output.view(batch_size, self.n_basis_kernels, self.out_planes, output.size(-2), output.size(-1))\n # output size : [bs, n_ker, out_chan, frames, freqs]\n\n if self.pool_dim in ['freq', 'chan']:\n assert softmax_attention.shape[-2] == output.shape[-2]\n elif self.pool_dim == 'time':\n assert softmax_attention.shape[-1] == output.shape[-1]\n\n output = torch.sum(output * softmax_attention, dim=1) # output size : [bs, out_chan, frames, freqs]\n\n return output\n\n\nclass attention2d(nn.Module):\n def __init__(self, in_planes, kernel_size, stride, padding, n_basis_kernels, temperature, pool_dim):\n super(attention2d, self).__init__()\n self.pool_dim = pool_dim\n self.temperature = temperature\n\n hidden_planes = int(in_planes / 4)\n\n if hidden_planes < 4:\n hidden_planes = 4\n\n if not pool_dim == 'both':\n self.conv1d1 = nn.Conv1d(in_planes, hidden_planes, kernel_size, stride=stride, padding=padding, bias=False)\n self.bn = nn.BatchNorm1d(hidden_planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv1d2 = nn.Conv1d(hidden_planes, n_basis_kernels, 1, bias=True)\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n if isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n else:\n self.fc1 = nn.Linear(in_planes, hidden_planes)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Linear(hidden_planes, n_basis_kernels)\n\n def forward(self, x): #x size : [bs, chan, frames, freqs]\n if self.pool_dim == 'freq':\n x = torch.mean(x, dim=3) #x size : [bs, chan, frames]\n elif self.pool_dim == 'time':\n x = torch.mean(x, dim=2) #x size : [bs, chan, freqs]\n elif self.pool_dim == 'both':\n # x = torch.mean(torch.mean(x, dim=2), dim=1) #x size : [bs, chan]\n x = F.adaptive_avg_pool2d(x, (1, 1)).squeeze(-1).squeeze(-1)\n elif self.pool_dim == 'chan':\n x = torch.mean(x, dim=1) #x size : [bs, freqs, frames]\n\n if not self.pool_dim == 'both':\n x = self.conv1d1(x) #x size : [bs, hid_chan, frames]\n x = self.bn(x)\n x = self.relu(x)\n x = self.conv1d2(x) #x size : [bs, n_ker, frames]\n else:\n x = self.fc1(x) #x size : [bs, hid_chan]\n x = self.relu(x)\n x = self.fc2(x) #x size : [bs, n_ker]\n\n return F.softmax(x / self.temperature, 1)\n\n\nclass CNN(nn.Module):\n def __init__(self,\n n_input_ch,\n activation=\"Relu\",\n conv_dropout=0,\n kernel=[3, 3, 3],\n pad=[1, 1, 1],\n stride=[1, 1, 1],\n n_filt=[64, 64, 64],\n pooling=[(1, 4), (1, 4), (1, 4)],\n normalization=\"batch\",\n n_basis_kernels=4,\n DY_layers=[0, 1, 1, 1, 1, 1, 1],\n temperature=31,\n pool_dim='freq'):\n super(CNN, self).__init__()\n self.n_filt = n_filt\n self.n_filt_last = n_filt[-1]\n cnn = nn.Sequential()\n\n def conv(i, normalization=\"batch\", dropout=None, activ='relu'):\n in_dim = n_input_ch if i == 0 else n_filt[i - 1]\n out_dim = n_filt[i]\n if DY_layers[i] == 1:\n cnn.add_module(\"conv{0}\".format(i), Dynamic_conv2d(in_dim, out_dim, kernel[i], stride[i], pad[i],\n n_basis_kernels=n_basis_kernels,\n temperature=temperature, pool_dim=pool_dim))\n else:\n cnn.add_module(\"conv{0}\".format(i), nn.Conv2d(in_dim, out_dim, kernel[i], stride[i], pad[i]))\n if normalization == \"batch\":\n cnn.add_module(\"batchnorm{0}\".format(i), nn.BatchNorm2d(out_dim, eps=0.001, momentum=0.99))\n elif normalization == \"layer\":\n cnn.add_module(\"layernorm{0}\".format(i), nn.GroupNorm(1, out_dim))\n\n if activ.lower() == \"leakyrelu\":\n cnn.add_module(\"Relu{0}\".format(i), nn.LeakyReLu(0.2))\n elif activ.lower() == \"relu\":\n cnn.add_module(\"Relu{0}\".format(i), nn.ReLu())\n elif activ.lower() == \"glu\":\n cnn.add_module(\"glu{0}\".format(i), GLU(out_dim))\n elif activ.lower() == \"cg\":\n cnn.add_module(\"cg{0}\".format(i), ContextGating(out_dim))\n\n if dropout is not None:\n cnn.add_module(\"dropout{0}\".format(i), nn.Dropout(dropout))\n\n for i in range(len(n_filt)):\n conv(i, normalization=normalization, dropout=conv_dropout, activ=activation)\n cnn.add_module(\"pooling{0}\".format(i), nn.AvgPool2d(pooling[i]))\n self.cnn = cnn\n\n def forward(self, x): #x size : [bs, chan, frames, freqs]\n x = self.cnn(x)\n return x\n\n\nclass BiGRU(nn.Module):\n def __init__(self, n_in, n_hidden, dropout=0, num_layers=1):\n super(BiGRU, self).__init__()\n self.rnn = nn.GRU(n_in, n_hidden, bidirectional=True, dropout=dropout, batch_first=True, num_layers=num_layers)\n\n def forward(self, x):\n #self.rnn.flatten_parameters()\n x, _ = self.rnn(x)\n return x\n\n\nclass CRNN(nn.Module):\n def __init__(self,\n n_input_ch,\n n_class=10,\n activation=\"glu\",\n conv_dropout=0.5,\n n_RNN_cell=128,\n n_RNN_layer=2,\n rec_dropout=0,\n attention=True,\n **convkwargs):\n super(CRNN, self).__init__()\n self.n_input_ch = n_input_ch\n self.attention = attention\n self.n_class = n_class\n\n self.cnn = CNN(n_input_ch=n_input_ch, activation=activation, conv_dropout=conv_dropout, **convkwargs)\n self.rnn = BiGRU(n_in=self.cnn.n_filt[-1], n_hidden=n_RNN_cell, dropout=rec_dropout, num_layers=n_RNN_layer)\n\n self.dropout = nn.Dropout(conv_dropout)\n self.sigmoid = nn.Sigmoid()\n self.dense = nn.Linear(n_RNN_cell * 2, n_class)\n\n if self.attention:\n self.dense_softmax = nn.Linear(n_RNN_cell * 2, n_class)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, x): #input size : [bs, freqs, frames]\n #cnn\n if self.n_input_ch > 1:\n x = x.transpose(2, 3)\n else:\n x = x.transpose(1, 2).unsqueeze(1) #x size : [bs, chan, frames, freqs]\n x = self.cnn(x)\n bs, ch, frame, freq = x.size()\n if freq != 1:\n print(\"warning! frequency axis is large: \" + str(freq))\n x = x.permute(0, 2, 1, 3)\n x = x.contiguous.view(bs, frame, ch*freq)\n else:\n x = x.squeeze(-1)\n x = x.permute(0, 2, 1) # x size : [bs, frames, chan]\n\n #rnn\n x = self.rnn(x) #x size : [bs, frames, 2 * chan]\n x = self.dropout(x)\n\n #classifier\n strong = self.dense(x) #strong size : [bs, frames, n_class]\n strong = self.sigmoid(strong)\n if self.attention:\n sof = self.dense_softmax(x) #sof size : [bs, frames, n_class]\n sof = self.softmax(sof) #sof size : [bs, frames, n_class]\n sof = torch.clamp(sof, min=1e-7, max=1)\n weak = (strong * sof).sum(1) / sof.sum(1) # [bs, n_class]\n else:\n weak = strong.mean(1)\n\n return strong.transpose(1, 2), weak\n\n\n\n" ]
[ [ "torch.mean", "torch.nn.functional.softmax", "torch.nn.Softmax", "torch.nn.GRU", "torch.sum", "torch.nn.LeakyReLu", "torch.nn.Dropout", "torch.randn", "torch.nn.Sigmoid", "torch.nn.GroupNorm", "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.nn.init.constant_", "torch.nn.functional.conv2d", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.functional.adaptive_avg_pool2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.Conv1d", "torch.nn.BatchNorm2d", "torch.Tensor", "torch.nn.ReLu", "torch.clamp", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jonojace/fairseq
[ "ce287a3ca25fb26e65ae4d12614bbf174371eaa9" ]
[ "examples/tac/Hubert_ASR_generate_transcripts.py" ]
[ "import torch\nfrom transformers import HubertForCTC, Wav2Vec2Processor\nfrom datasets import load_dataset\nimport soundfile as sf\nimport librosa\n\nprocessor = Wav2Vec2Processor.from_pretrained(\"facebook/hubert-large-ls960-ft\")\nmodel = HubertForCTC.from_pretrained(\"facebook/hubert-large-ls960-ft\")\n\ndef replace_pad(l, new_symbol='-'):\n \"\"\"<pad> refers to epsilon in CTC replace with another symbol for readability\"\"\"\n new_l = []\n for x in l:\n if x == \"<pad>\":\n new_l.append(new_symbol)\n else:\n new_l.append(x)\n return new_l\n\n\nfrom itertools import groupby\n\n\ndef reduce_tokens(\n tokens,\n pad_symbol=\"<pad>\",\n word_boundary_symbol=\"|\",\n remove_epsilons=True,\n no_repeat_epsilons=False,\n no_repeat_word_boundaries=False,\n no_repeat_graphemes=False,\n):\n \"\"\"\n reduce a sequence of CTC output tokens that contains\n\n args:\n tokens: list of CTC model tokens to reduce\n remove_epsilons: whether or not to leave epsilons in\n no_repeat_epsilons: whether to reduce repeated epsilons to just one\n no_repeat_graphemes: whether to reduce repeated graphemes to just one\n \"\"\"\n reduced_tokens = []\n all_symbols = []\n all_durations = []\n\n for symbol, group in groupby(tokens):\n duration = sum(1 for _ in group)\n all_symbols.append(symbol)\n all_durations.append(duration)\n\n if symbol == pad_symbol:\n if remove_epsilons:\n pass\n elif no_repeat_epsilons:\n reduced_tokens.append(symbol)\n else:\n reduced_tokens.extend(duration * [symbol])\n elif symbol == word_boundary_symbol:\n if no_repeat_word_boundaries:\n reduced_tokens.append(symbol)\n else:\n reduced_tokens.extend(duration * [symbol])\n else:\n if no_repeat_graphemes:\n reduced_tokens.append(symbol)\n else:\n reduced_tokens.extend(duration * [symbol])\n\n return reduced_tokens, all_symbols, all_durations\n\n\nimport os\nwav_dir = \"/home/s1785140/data/LJSpeech-1.1/wavs\"\nwavs = os.listdir(wav_dir)\nwav_paths = [os.path.join(wav_dir, wav) for wav in wavs]\n\nfrom tqdm import tqdm\n\noutdir = \"/home/s1785140/data/LJSpeech-1.1/hubert_asr_transcription\"\noutdir_with_repeats = \"/home/s1785140/data/LJSpeech-1.1/hubert_asr_transcription_with_grapheme_repeats\"\noutdir_raw_outputs = \"/home/s1785140/data/LJSpeech-1.1/hubert_asr_raw_outputs\"\n\nos.makedirs(outdir, exist_ok=True)\nos.makedirs(outdir_with_repeats, exist_ok=True)\nos.makedirs(outdir_raw_outputs, exist_ok=True)\n\nprint(len(wav_paths))\n\nall_transcriptions = []\nall_alt_transcriptions = []\nall_raw_outputs = []\n\nljspeech_sampling_rate = 22050\nhubert_sampling_rate = 16000\n\nfor i, wav_path in enumerate(tqdm(wav_paths[:])):\n speech, _ = sf.read(wav_path)\n speech = librosa.resample(speech, ljspeech_sampling_rate, hubert_sampling_rate)\n utt_id = wav_path.split('/')[-1].split('.')[0]\n input_values = processor(speech, return_tensors=\"pt\").input_values\n logits = model(input_values).logits\n predicted_ids = torch.argmax(logits, dim=-1)\n transcription = processor.decode(predicted_ids[0])\n\n filtered_tokens = processor.tokenizer.convert_ids_to_tokens(predicted_ids[0].tolist(), skip_special_tokens=False)\n reduced_tokens, all_symbols, all_durations = reduce_tokens(\n filtered_tokens,\n remove_epsilons=True,\n no_repeat_epsilons=False,\n no_repeat_word_boundaries=True,\n no_repeat_graphemes=False\n )\n alt_transcription = replace_pad(reduced_tokens)\n alt_transcription = [sym if sym != \"|\" else \" \" for sym in alt_transcription]\n alt_transcription = \"\".join(alt_transcription)\n\n raw_outputs = replace_pad(filtered_tokens)\n raw_outputs = [sym if sym != \"|\" else \" \" for sym in raw_outputs]\n raw_outputs = \"\".join(raw_outputs)\n\n print(i, '===', raw_outputs)\n print(i, '===', transcription)\n print(i, '===', alt_transcription)\n\n all_transcriptions.append(f\"{utt_id}||{transcription.lower()}\")\n all_alt_transcriptions.append(f\"{utt_id}||{alt_transcription.lower()}\")\n all_raw_outputs.append(f\"{utt_id}||{raw_outputs.lower()}\")\n\n outfile = f\"{utt_id}.txt\"\n\n # save proper transcription\n with open(os.path.join(outdir, outfile), 'w') as f:\n f.write(all_transcriptions[-1])\n\n # save alternative transcription\n with open(os.path.join(outdir_with_repeats, outfile), 'w') as f:\n f.write(all_alt_transcriptions[-1])\n\n # save raw outputs\n with open(os.path.join(outdir_raw_outputs, outfile), 'w') as f:\n f.write(all_raw_outputs[-1])\n\noutfile = f\"metadata.csv\"\n\nall_transcriptions = sorted(all_transcriptions)\nall_alt_transcriptions = sorted(all_alt_transcriptions)\nall_raw_outputs = sorted(all_raw_outputs)\n\nprint(all_transcriptions)\nprint(all_alt_transcriptions)\nprint(all_raw_outputs)\n\n# save proper transcription\nwith open(os.path.join(outdir, outfile), 'w') as f:\n f.write(\"\\n\".join(all_transcriptions))\n\n# save alternative transcription\nwith open(os.path.join(outdir_with_repeats, outfile), 'w') as f:\n f.write(\"\\n\".join(all_alt_transcriptions))\n\n# save raw outputs\nwith open(os.path.join(outdir_raw_outputs, outfile), 'w') as f:\n f.write(\"\\n\".join(all_raw_outputs))\n" ]
[ [ "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mannurulz/Cirq
[ "78218a61c816b8c4b2b7a3d91b8c24df2dce30a0" ]
[ "cirq/ops/eigen_gate.py" ]
[ "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport fractions\nfrom typing import Tuple, Union, List, Optional, cast, TypeVar, NamedTuple, \\\n Iterable\n\nimport abc\n\nimport numpy as np\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\nfrom cirq.type_workarounds import NotImplementedType\n\n\nTSelf = TypeVar('TSelf', bound='EigenGate')\n\n\nEigenComponent = NamedTuple(\n 'EigenComponent',\n [\n # The θ in λ = exp(i π θ) where λ is a unique eigenvalue. The exponent\n # factor is used, instead of just a raw unit complex number, because it\n # disambiguates several cases. For example, when λ=-1 you can set θ to\n # -1 instead of +1 resulting in square root operations returning -i\n # instead of +1.\n ('eigenvalue_exponent_factor', float),\n\n # The projection matrix onto the eigenspace of the eigenvalue. Must\n # equal Σ_k |λ_k⟩⟨λ_k| where the |λ_k⟩ vectors form an orthonormal\n # basis for the eigenspace.\n ('eigenspace_projector', np.ndarray),\n ]\n)\n\n\nclass EigenGate(raw_types.Gate):\n \"\"\"A gate with a known eigendecomposition.\n\n EigenGate is particularly useful when one wishes for different parts of\n the same eigenspace to be extrapolated differently. For example, if a gate\n has a 2-dimensional eigenspace with eigenvalue -1, but one wishes for the\n square root of the gate to split this eigenspace into a part with\n eigenvalue i and a part with eigenvalue -i, then EigenGate allows this\n functionality to be unambiguously specified via the _eigen_components\n method.\n \"\"\"\n\n def __init__(self, *, # Forces keyword args.\n exponent: Union[value.Symbol, float] = 1.0,\n global_shift: float = 0.0) -> None:\n \"\"\"Initializes the parameters used to compute the gate's matrix.\n\n The eigenvalue of an eigenspace of the gate is computed by:\n 1. Starting with an angle returned by the _eigen_components method.\n θ\n 2. Shifting the angle by the global_shift.\n θ + s\n 3. Scaling the angle by the exponent.\n (θ + s) * e\n 4. Converting from half turns to a complex number on the unit circle.\n exp(i * pi * (θ + s) * e)\n\n Args:\n exponent: How much to scale the eigencomponents' angles by when\n computing the gate's matrix.\n global_shift: Offsets the eigenvalues of the gate at exponent=1.\n In effect, this controls a global phase factor on the gate's\n unitary matrix. The factor is:\n\n exp(i * pi * global_shift * exponent)\n\n For example, `cirq.X**t` uses a `global_shift` of 0 but\n `cirq.Rx(t)` uses a `global_shift` of -0.5, which is why\n `cirq.unitary(cirq.Rx(pi))` equals -iX instead of X.\n \"\"\"\n self._exponent = exponent\n self._global_shift = global_shift\n self._canonical_exponent_cached = None\n\n # virtual method\n def _with_exponent(self: TSelf,\n exponent: Union[value.Symbol, float]) -> TSelf:\n \"\"\"Return the same kind of gate, but with a different exponent.\n\n Child classes should override this method if they have an __init__\n method with a differing signature.\n \"\"\"\n # pylint: disable=unexpected-keyword-arg\n if self._global_shift == 0:\n return type(self)(exponent=exponent)\n return type(self)(\n exponent=exponent,\n global_shift=self._global_shift)\n # pylint: enable=unexpected-keyword-arg\n\n @abc.abstractmethod\n def _eigen_components(self) -> List[Union[EigenComponent,\n Tuple[float, np.ndarray]]]:\n \"\"\"Describes the eigendecomposition of the gate's matrix.\n\n Returns:\n A list of EigenComponent tuples. Each tuple in the list\n corresponds to one of the eigenspaces of the gate's matrix. Each\n tuple has two elements. The first element of a tuple is the θ in\n λ = exp(i π θ) (where λ is the eigenvalue of the eigenspace). The\n second element is a projection matrix onto the eigenspace.\n\n Examples:\n The Pauli Z gate's eigencomponents are:\n\n [\n (0, np.array([[1, 0],\n [0, 0]])),\n (1, np.array([[0, 0],\n [0, 1]])),\n ]\n\n Valid eigencomponents for Rz(π) = -iZ are:\n\n [\n (-0.5, np.array([[1, 0],\n [0, 0]])),\n (+0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n But in principle you could also use this:\n\n [\n (+1.5, np.array([[1, 0],\n [0, 0]])),\n (-0.5, np.array([[0, 0],\n [0, 1]])),\n ]\n\n The choice between -0.5 and +1.5 does not affect the gate's\n matrix, but it does affect the matrix of powers of the gates\n (because (x+2)*s != x*s (mod 2) when s is a real number).\n\n The Pauli X gate's eigencomponents are:\n\n [\n (0, np.array([[0.5, 0.5],\n [0.5, 0.5]])),\n (1, np.array([[+0.5, -0.5],\n [-0.5, +0.5]])),\n ]\n \"\"\"\n pass\n\n def _period(self) -> Optional[float]:\n \"\"\"Determines how the exponent parameter is canonicalized when equating.\n\n Returns:\n None if the exponent should not be canonicalized. Otherwise a float\n indicating the period of the exponent. If the period is p, then a\n given exponent will be shifted by p until it is in the range\n (-p/2, p/2] during initialization.\n \"\"\"\n exponents = [e + self._global_shift\n for e, _ in self._eigen_components()]\n real_periods = [abs(2/e) for e in exponents if e != 0]\n return _approximate_common_period(real_periods)\n\n def __pow__(self: TSelf, exponent: Union[float, value.Symbol]) -> TSelf:\n new_exponent = protocols.mul(self._exponent, exponent, NotImplemented)\n if new_exponent is NotImplemented:\n return NotImplemented\n return self._with_exponent(exponent=new_exponent)\n\n @property\n def _canonical_exponent(self):\n if self._canonical_exponent_cached is None:\n period = self._period()\n if not period or isinstance(self._exponent, value.Symbol):\n self._canonical_exponent_cached = self._exponent\n else:\n self._canonical_exponent_cached = self._exponent % period\n return self._canonical_exponent_cached\n\n def _identity_tuple(self):\n return (type(self),\n self._canonical_exponent,\n self._global_shift)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._identity_tuple() == other._identity_tuple()\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash(self._identity_tuple())\n\n def _trace_distance_bound_(self):\n if isinstance(self._exponent, value.Symbol):\n return 1\n\n angles = [half_turns for half_turns, _ in self._eigen_components()]\n min_angle = min(angles)\n max_angle = max(angles)\n return abs((max_angle - min_angle) * self._exponent * 3.5)\n\n def _has_unitary_(self) -> bool:\n return not self._is_parameterized_()\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n if self._is_parameterized_():\n return NotImplemented\n e = cast(float, self._exponent)\n return np.sum([\n component * 1j**(\n 2 * e * (half_turns + self._global_shift))\n for half_turns, component in self._eigen_components()\n ], axis=0)\n\n def _is_parameterized_(self) -> bool:\n return isinstance(self._exponent, value.Symbol)\n\n def _resolve_parameters_(self: TSelf, param_resolver) -> TSelf:\n return self._with_exponent(\n exponent=param_resolver.value_of(self._exponent))\n\n\ndef _lcm(vals: Iterable[int]) -> int:\n t = 1\n for r in vals:\n t = t * r // fractions.gcd(t, r)\n return t\n\n\ndef _approximate_common_period(periods: List[float],\n approx_denom: int = 60,\n reject_atol: float = 1e-8) -> Optional[float]:\n \"\"\"Finds a value that is nearly an integer multiple of multiple periods.\n\n The returned value should be the smallest non-negative number with this\n property. If `approx_denom` is too small the computation can fail to satisfy\n the `reject_atol` criteria and return `None`. This is actually desirable\n behavior, since otherwise the code would e.g. return a nonsense value when\n asked to compute the common period of `np.e` and `np.pi`.\n\n Args:\n periods: The result must be an approximate integer multiple of each of\n these.\n approx_denom: Determines how the floating point values are rounded into\n rational values (so that integer methods such as lcm can be used).\n Each floating point value f_k will be rounded to a rational number\n of the form n_k / approx_denom. If you want to recognize rational\n periods of the form i/d then d should divide `approx_denom`.\n reject_atol: If the computed approximate common period is at least this\n far from an integer multiple of any of the given periods, then it\n is discarded and `None` is returned instead.\n\n Returns:\n The approximate common period, or else `None` if the given\n `approx_denom` wasn't sufficient to approximate the common period to\n within the given `reject_atol`.\n \"\"\"\n if not periods:\n return None\n if any(e == 0 for e in periods):\n return None\n approx_rational_periods = [\n fractions.Fraction(int(np.round(p * approx_denom)), approx_denom)\n for p in periods\n ]\n common = float(_common_rational_period(approx_rational_periods))\n\n for p in periods:\n if p != 0 and abs(p * np.round(common / p) - common) > reject_atol:\n return None\n\n return common\n\n\ndef _common_rational_period(rational_periods: List[fractions.Fraction]\n ) -> fractions.Fraction:\n \"\"\"Finds the least common integer multiple of some fractions.\n\n The solution is the smallest positive integer c such that there\n exists integers n_k satisfying p_k * n_k = c for all k.\n \"\"\"\n assert rational_periods, \"no well-defined solution for an empty list\"\n common_denom = _lcm(p.denominator for p in rational_periods)\n int_periods = [p.numerator * common_denom // p.denominator\n for p in rational_periods]\n int_common_period = _lcm(int_periods)\n return fractions.Fraction(int_common_period, common_denom)\n" ]
[ [ "numpy.round" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dasushi/silkymitties-server
[ "8314de95647fefb8556e0ba7aff96545a405088c" ]
[ "core/lstm.py" ]
[ "import numpy as np\nfrom keras.models import Sequential, Model, model_from_json\nfrom keras.layers import Input\nfrom keras.layers.core import Dense, Activation, Dropout, Merge, RepeatVector\nfrom keras.layers.recurrent import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.utils.visualize_util import plot\nimport keras.backend as K\nimport json\nimport random\n#from IPython.display import SVG\n#from keras.utils.visualize_util import plot\nfrom pymongo import MongoClient\nfrom math import ceil\n\n#fix random number for repeatability\n#np.random.seed(17)\n\ninput_dim = 3 #input dimensions of fused sensor data\nnb_timesteps = 2000 #maximum amount of samples/timesteps per shot\nnb_output_class = 3 #slap, snap, wrist, none\nnb_batch_size = 36 #number of samples per batch\ntrain_test_ratio = 0.6\nnb_input_multi = 6\nmodel_filename = 'LSTM_silkymitties.json'\nweights_filename = 'LSTM_silkymitties_weights.h5'\n\n\n\ndef load_training_data():\n client = MongoClient('localhost', 27017)\n db = client['restdb']\n handedness = []\n speed = []\n accuracy = []\n shotTypes = []\n y = []\n labelled_values = db['testlblfusedshots'].find()\n nb_input_samples = labelled_values.count()\n x_upper = np.empty(shape=[nb_input_samples,nb_timesteps,input_dim])\n x_lower = np.empty(shape=[nb_input_samples,nb_timesteps,input_dim])\n handedness = np.zeros(shape=[nb_input_samples,2], dtype='float')\n #speed = np.empty(shape=[nb_input_samples])\n #accuracy = np.empty(shape=[nb_input_samples])\n shotTypes = np.zeros(shape=[nb_input_samples,nb_output_class], dtype='float')\n #slapshotTypes = np.empty(shape=[nb_input_samples])\n #y = np.empty(shape=[nb_input_samples,nb_output_class])\n index = 0\n for target in labelled_values:\n upperTheta = np.vstack((target['upperTheta']['uthetaX'], target['upperTheta']['uthetaY'], target['upperTheta']['uthetaZ']))\n lowerTheta = np.vstack((target['lowerTheta']['lthetaX'], target['lowerTheta']['lthetaY'], target['lowerTheta']['lthetaZ']))\n normalizedUpperTheta = upperTheta / 180.0\n normalizedLowerTheta = lowerTheta / 180.0\n x_upper[index] = sequence.pad_sequences(normalizedUpperTheta, maxlen=nb_timesteps, dtype='float', padding='post', truncating='post', value=0.).T\n x_lower[index] = sequence.pad_sequences(normalizedLowerTheta, maxlen=nb_timesteps, dtype='float', padding='post', truncating='post', value=0.).T\n shotTypes[index,shotTypeToInt(target['shotType'])] = 1.0\n #slapshotTypes[index] = isSlapShot(target['shotType'])\n handedness[index,isLeftHanded(target['handedness'])] = 1.0\n #speed = nb.append(speed, target['speed']], axis=0)\n #accuracy = nb.append(accuracy, ['accuracy'], axis=0)\n index+=1\n\n #for size in range(20, nb_input_samples, 20):\n # trainIndex = round(size * train_test_ratio)\n # nb_epochs = ceil(size / nb_batch_size)\n # trainShotTypeCompileFit(nb_epochs, handedness[0:trainIndex], handedness[trainIndex:size], \\\n # x_upper[0:trainIndex], x_lower[0:trainIndex], shotTypes[0:trainIndex,:], \\\n # x_upper[trainIndex:size], x_lower[trainIndex:size], shotTypes[trainIndex:size,:])\n\n #Shuffle the samples in unison to decrease data clustering\n s_handedness, s_x_upper, s_x_lower, s_shotTypes = unison_shuffle(handedness,\n x_upper, x_lower, shotTypes)\n\n trainIndex = round(nb_input_samples * train_test_ratio)\n nb_epochs = ceil(nb_input_samples / nb_batch_size)\n #trainSlapShotCompileFit(nb_epochs, handedness[0:trainIndex], handedness[trainIndex:], \\\n # x_upper[0:trainIndex], x_lower[0:trainIndex], slapshotTypes[0:trainIndex], \\\n # x_upper[trainIndex:], x_lower[trainIndex:], slapshotTypes[trainIndex:])\n trainShotTypeCompileFit(nb_epochs, s_handedness[0:trainIndex], s_handedness[trainIndex:], \\\n s_x_upper[0:trainIndex], s_x_lower[0:trainIndex], s_shotTypes[0:trainIndex], \\\n s_x_upper[trainIndex:], s_x_lower[trainIndex:], s_shotTypes[trainIndex:])\n\ndef unison_shuffle(a, b, c, d):\n p = np.random.permutation(len(a))\n return a[p], b[p], c[p], d[p]\n\ndef trainSlapShotCompileFit(epoch_count, train_handedness, test_handedness,\n x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):\n #Upper hand LSTM input\n encoder_a = Sequential()\n encoder_a.add(LSTM(output_dim=nb_input_multi*input_dim,\n batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n #Lower hand LSTM input\n encoder_b = Sequential()\n encoder_b.add(LSTM(output_dim=nb_input_multi*input_dim,\n batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n encoded_handedness = Sequential()\n encoded_handedness.add(keras.layers.core.RepeatVector(nb_timesteps))\n #Merge both LSTM units with concatenation\n merged = Merge([encoded_handedness, encoder_a, encoder_b], mode='concat')\n decoder = Sequential()\n decoder.add(merged)\n decoder.add(Dropout(0.1))\n decoder.add(Dense(input_dim*nb_input_multi*2 + 2, activation='relu'))\n decoder.add(Dropout(0.2))\n decoder.add(Dense(input_dim*2, activation='relu'))\n decoder.add(Dropout(0.3))\n #sigmoid activation instead of softmax to avoid normalizing to 1.0\n #1 output signal for the binary classification likelihood\n decoder.add(Dense(1, activation='sigmoid'))\n\n decoder.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n decoder.fit([train_handedness, x_train_upper, x_train_lower], y_train,\n batch_size=nb_batch_size, nb_epoch=epoch_count,\n validation_data=([test_handedness, x_test_upper, x_test_lower], y_test))\n\n printSummary(decoder,test_handedness,x_test_upper, x_test_lower, y_test)\n return decoder\n\ndef trainShotTypeCompileFit(epoch_count, train_handedness, test_handedness,\n x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):\n #Upper hand LSTM input\n encoder_a = Sequential()\n encoder_a.add(LSTM(4*input_dim,\n batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid',\n return_sequences=True))\n encoder_a.add(Dropout(0.2))\n encoder_a.add(LSTM(4*input_dim, return_sequences=True,\n activation='sigmoid', inner_activation='hard_sigmoid'))\n encoder_a.add(Dropout(0.25))\n encoder_a.add(LSTM(8*input_dim, batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n\n #Lower hand LSTM input\n encoder_b = Sequential()\n encoder_b.add(LSTM(4*input_dim,\n batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid',\n return_sequences=True))\n encoder_b.add(Dropout(0.2))\n encoder_b.add(LSTM(4*input_dim, return_sequences=True,\n activation='sigmoid', inner_activation='hard_sigmoid'))\n encoder_b.add(Dropout(0.25))\n encoder_b.add(LSTM(8*input_dim, batch_input_shape=(nb_batch_size, nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n\n encoded_handedness = Sequential()\n encoded_handedness.add(Dense(2, batch_input_shape=(nb_batch_size, 2)))\n\n #Merge both LSTM units with concatenation\n merged = Merge([encoded_handedness, encoder_a, encoder_b], mode='concat')\n decoder = Sequential()\n decoder.add(merged)\n #decoder.add(Dropout(0.25))\n #Use CNNs to expand then shrink to desired output signal\n decoder.add(Dropout(0.25))\n decoder.add(Dense(input_dim*8, activation='sigmoid'))\n decoder.add(Dropout(0.25))\n decoder.add(Dense(output_dim=(2*nb_output_class), activation='sigmoid'))\n decoder.add(Dropout(0.3))\n decoder.add(Dense(nb_output_class, activation='softmax'))\n\n decoder.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n decoder.fit([train_handedness, x_train_upper, x_train_lower], y_train,\n batch_size=nb_batch_size, nb_epoch=epoch_count,\n validation_data=([test_handedness, x_test_upper, x_test_lower], y_test))\n\n printSummary(decoder, test_handedness, x_test_upper, x_test_lower, y_test)\n saveCompiledShotTypeModel(decoder)\n return decoder\n\ndef saveCompiledShotTypeModel(decoder):\n saved_model = decoder.to_json()\n with open('LSTM_silkymitties_ShotType.json', 'w') as outfile:\n json.dump(saved_model, outfile)\n decoder.save_weights('LSTM_silkymitties_ShotType_weights.h5')\n\ndef loadCompiledShotTypeModel():\n with open('LSTM_silkymitties_ShotType.json', 'r') as infile:\n architecture = json.load(infile)\n model = model_from_json(architecture)\n model.load_weights('LSTM_silkymitties_ShotType_weights.h5')\n return model\n\ndef predictShotTypeResult(fusedShotID):\n client = MongoClient('localhost', 27017)\n db = client['restdb']\n shot = db['fusedshots'].find_one(fusedShotID)\n x_upper = np.empty(shape=[1,nb_timesteps,input_dim])\n x_lower = np.empty(shape=[1,nb_timesteps,input_dim])\n raw_x_upper = np.vstack((shot['upperTheta']['uthetaX'],\n shot['upperTheta']['uthetaY'], shot['upperTheta']['uthetaZ']))\n raw_x_lower = np.vstack((shot['lowerTheta']['lthetaX'],\n shot['lowerTheta']['lthetaY'], shot['lowerTheta']['lthetaZ']))\n normalizedUpperTheta = raw_x_upper / 180.0\n normalizedLowerTheta = raw_x_lower / 180.0\n x_upper[0] = sequence.pad_sequences(normalizedUpperTheta, maxlen=nb_timesteps,\n dtype='float', padding='post', truncating='post', value=0.).T\n x_lower[0] = sequence.pad_sequences(normalizedLowerTheta, maxlen=nb_timesteps,\n dtype='float', padding='post', truncating='post', value=0.).T\n handedness = np.zeros(shape=[1,2])\n handedness[0,isLeftHanded(shot['handedness'])] = 1.0\n\n print(\"Loading Model\")\n model = loadCompiledShotTypeModel()\n print(\"Loaded Model Succesfully\")\n result = model.predict([handedness, x_upper, x_lower], batch_size=1)\n print(\"Result: \" + str(result))\n resultIndex = result.argmax()\n print(resultIndex)\n shotTypeResult = shotTypeToString(resultIndex)\n print(shotTypeResult)\n return shotTypeResult\n\ndef trainShotSpeedCompileFit(speed, test_handedness, x_train_upper, x_train_lower, y_train, x_test_upper, x_test_lower, y_test):\n #Upper hand LSTM input\n encoder_a = Sequential()\n encoder_a.add(LSTM(output_dim=2*input_dim,\n input_shape=(nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n #Lower hand LSTM input\n encoder_b = Sequential()\n encoder_b.add(LSTM(output_dim=2*input_dim,\n input_shape=(nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n\n #Merge both LSTM units with concatenation\n merged = Merge([speed, encoder_a, encoder_b], mode='concat')\n decoder = Sequential()\n decoder.add(merged)\n #Use CNNs to reduce to desired intermediate shape\n decoder.add(Dense(output_dim=2*input_dim, activation='relu'))\n #sigmoid activation instead of softmax to avoid normalizing to 1.0\n #1 output signal for the binary classification likelihood\n decoder.add(Dense(1, activation='sigmoid'))\n\n decoder.compile(loss='mae', optimizer='rmsprop', metrics=['accuracy'])\n decoder.fit([x_train_upper, x_train_lower], y_train,\n batch_size=4*input_dim, nb_epoch=nb_epoch,\n validation_data=([x_test_upper, x_test_lower], y_test))\n\n printSummary(decoder,x_test_upper, x_test_lower, y_test)\n return decoder\n\ndef saveCompiledShotSpeedModel(decoder):\n saved_model = decoder.to_json()\n with open('LSTM_silkymitties_ShotSpeed.json', 'w') as outfile:\n json.dump(saved_model, outfile)\n decoder.save_weights('LSTM_silkymitties_ShotSpeed_weights.h5')\n\ndef loadCompiledShotSpeedModel():\n with open('LSTM_silkymitties_ShotSpeed.json', 'r') as infile:\n architecture = json.load(infile)\n model = model_from_json(architecture)\n model.load_weights('LSTM_silkymitties_ShotSpeed_weights.h5')\n return model\n\ndef trainShotAccuracyCompileFit(accuracy, test_handedness, x_train_upper,\n x_train_lower, y_train, x_test_upper, x_test_lower, y_test):\n #Upper hand LSTM input\n encoder_a = Sequential()\n encoder_a.add(LSTM(output_dim=2*input_dim,\n input_shape=(nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n #Lower hand LSTM input\n encoder_b = Sequential()\n encoder_b.add(LSTM(output_dim=2*input_dim,\n input_shape=(nb_timesteps, input_dim),\n activation='sigmoid', inner_activation='hard_sigmoid'))\n\n #Merge both LSTM units with concatenation\n merged = Merge([accuracy, encoder_a, encoder_b], mode='concat')\n decoder = Sequential()\n decoder.add(merged)\n #Use CNNs to reduce to desired output classes\n decoder.add(Dense(output_dim=2*input_dim, activation='relu'))\n #decoder.add(Dropout(0.5))\n decoder.add(Dense(1, activation='softmax'))\n\n decoder.compile(loss='mse', optimizer='rmsprop', metrics=['accuracy'])\n decoder.fit([x_train_upper, x_train_lower], y_train,\n batch_size=4*input_dim, nb_epoch=nb_epoch,\n validation_data=([x_test_upper, x_test_lower], y_test))\n\n printSummary(decoder,x_test, y_test)\n return decoder\n\ndef saveCompiledShotAccuracyModel(decoder):\n saved_model = decoder.to_json()\n with open('LSTM_silkymitties_ShotAccuracy.json', 'w') as outfile:\n json.dump(saved_model, outfile)\n decoder.save_weights('LSTM_silkymitties_ShotAccuracy_weights.h5')\n\ndef loadCompiledShotAccuracyModel():\n with open('LSTM_silkymitties_ShotAccuracy.json', 'r') as infile:\n architecture = json.load(infile)\n model = model_from_json(architecture)\n model.load_weights('LSTM_silkymitties_ShotAccuracy_weights.h5')\n return model\n\n\ndef mean_pred(y_true, y_pred):\n return K.mean(y_pred)\n\n\ndef isLeftHanded(handedness):\n if str(handedness) == \"L\":\n return 1\n else:\n return 0\n\ndef isSlapShot(shotType):\n if str(shotType) == \"slap\" or str(shotType) == \"Slap\":\n return 1\n else:\n return 0\n\ndef shotTypeToInt(shotType):\n stringShot = str(shotType)\n #if stringShot == \"notashot\" or stringShot == \"none\" or stringShot == \"NoShot\":\n # return 0\n if stringShot == \"slap\" or stringShot == \"Slap\":\n return 0\n elif stringShot == \"wrist\" or stringShot == \"Wrist\":\n return 1\n elif stringShot == \"snap\" or stringShot == \"Snap\":\n return 2\n\ndef shotTypeToString(shotType):\n #if stringShot == \"notashot\" or stringShot == \"none\" or stringShot == \"NoShot\":\n # return 0\n if shotType == 0:\n return \"Slap\"\n elif shotType == 1:\n return \"Wrist\"\n elif shotType == 2:\n return \"Snap\"\n\n\ndef printSummary(decoder, test_handedness, x_test_upper, x_test_lower, y_test):\n #print(decoder.summary())\n scores = decoder.evaluate([test_handedness, x_test_upper, x_test_lower],\n y_test, verbose=0, batch_size=nb_batch_size)\n print(\"Accuracy: %2f%%\" % (scores[1]*100))\n print(scores)\n plot(decoder, to_file='model.png', show_shapes=True)\n #plot(decoder, to_file='model.png')\n\nif __name__=='__main__':\n #loadFromFile(True)\n #fuseLabelledShots()\n load_training_data()\n" ]
[ [ "numpy.vstack", "numpy.zeros", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AlessandroPierro/trajectory-optimization
[ "e474af45067894b11eea263cf07d850c5537767c" ]
[ "problema_test/smoothing.py" ]
[ "# Importing numerical libraries\nimport numpy as np\nfrom scipy import optimize\n\n# Importing plotting libraries\nfrom bokeh.plotting import figure, show\nfrom bokeh.io import export_png\n\n\nexport = True\n\n\ndef solve_NLP(N, smoothing=False):\n\n if N % 2 == 0:\n N += 1 # assicura che il numero dei knot points sia dispari\n\n # Defining problem bound conditions\n\n a = 0\n b = 1\n\n x_start = 0\n x_end = 1\n v_start = 0\n v_end = 0\n\n t = np.linspace(a, b, num=N)\n h = t[1:N] - t[0:N-1]\n\n # Defining the objective function (using quadrature)\n if smoothing:\n def objective_function(X):\n return 0.5 * np.dot(h, (np.power(X[2*N:3*N-1], 2) + np.power(X[2*N+1:3*N], 2))) + np.sum(np.power(X[2*N+1:3*N] - X[2*N:3*N-1], 2))\n else:\n def objective_function(X):\n return 0.5 * np.dot(h, (np.power(X[2*N:3*N-1], 2) + np.power(X[2*N+1:3*N], 2)))\n\n # Setting bound conditions\n\n lb = np.full(3*N, -np.inf, dtype=float)\n ub = np.full(3*N, +np.inf, dtype=float)\n\n lb[0] = x_start\n ub[0] = x_start\n lb[N-1] = x_end\n ub[N-1] = x_end\n lb[N] = v_start\n ub[N] = v_start\n lb[2*N-1] = v_end\n ub[2*N-1] = v_end\n\n bounds = optimize.Bounds(lb, ub)\n\n # Setting dynamics constraints\n\n constraints = []\n\n # Setting Simpson collocation constraints\n\n for i in range(0, N-2, 2):\n\n def fun_position_collocation(\n X, i=i): return X[i+2] - X[i] - ((h[i]+h[i+1])/6) * (X[N+i+2] + 4 * X[N+i+1] + X[N+i])\n\n def fun_velocity_collocation(X, i=i): return X[N+i+2] - X[N+i] - (\n (h[i]+h[i+1])/6) * (X[2*N+i+2] + 4 * X[2*N+i+2] + X[2*N+i])\n\n def fun_position_interpolation(\n X, i=i): return X[i+1] - 0.5 * (X[i] + X[i+2]) - ((h[i]+h[i+1])/8) * (X[N+i] - X[N+i+2])\n def fun_velocity_interpolation(X, i=i): return X[N+i+1] - 0.5 * (\n X[N+i] + X[N+i+2]) - ((h[i]+h[i+1])/8) * (X[2*N+i] - X[2*N+i+2])\n\n const_position_collocation = {\n \"fun\": fun_position_collocation, \"type\": \"eq\"}\n const_velocity_collocation = {\n \"fun\": fun_velocity_collocation, \"type\": \"eq\"}\n const_position_interpolation = {\n \"fun\": fun_position_interpolation, \"type\": \"eq\"}\n const_velocity_interpolation = {\n \"fun\": fun_velocity_interpolation, \"type\": \"eq\"}\n\n constraints.append(const_position_collocation)\n constraints.append(const_velocity_collocation)\n constraints.append(const_position_interpolation)\n constraints.append(const_velocity_interpolation)\n\n # Setting an initial guess\n initial_guess = np.zeros(3*N)\n initial_guess[0:N] = np.linspace(x_start, x_end, num=N)\n initial_guess[N:2*N] = 1\n\n # Utility function called at the end of each iteration of the optimizer\n\n def callback(a):\n b = a\n #plt.plot(t, X[0:N])\n\n # Solving the optimization problem\n\n res = optimize.minimize(\n objective_function,\n initial_guess,\n method=\"SLSQP\",\n bounds=bounds,\n constraints=constraints,\n callback=callback\n )\n\n # Returning optimization results\n\n x = res.x[0:N]\n v = res.x[N:2*N]\n u = res.x[2*N:3*N]\n\n print(res.message)\n\n return t, x, v, u\n\n\nif __name__ == '__main__':\n t, x, v, u = solve_NLP(40)\n t_smoothed, x_smoothed, v_smoothed, u_smoothed = solve_NLP(\n 40, smoothing=True)\n\n z = np.linspace(0, 1, num=1000)\n\n # Plotting position s(t)\n p = figure(x_axis_label='Time t', y_axis_label='Position x(t)')\n p.toolbar_location = None\n p.line(z, 3*z**2 - 2*z**3, line_width=2, line_color=\"orange\")\n p.line(t, x, line_width=2, line_color=\"red\")\n p.line(t_smoothed, x_smoothed, line_width=2, line_color=\"green\")\n if export:\n export_png(p, filename=\"smoothing_problema_test_position.png\",\n width=400, height=250)\n else:\n show(p)\n\n # Plotting velocity v(t)\n p = figure(x_axis_label='Time t', y_axis_label='Velocity v(t)')\n p.toolbar_location = None\n p.line(z, 6*z - 6*z**2, line_width=2, line_color=\"orange\")\n p.line(t, v, line_width=2, line_color=\"red\")\n p.line(t_smoothed, v_smoothed, line_width=2, line_color=\"green\")\n if export:\n export_png(p, filename=\"smoothing_problema_test_velocity.png\",\n width=400, height=250)\n else:\n show(p)\n\n # Plotting control function u(t)\n p = figure(x_axis_label='Time t', y_axis_label='Control u(t)')\n p.toolbar_location = None\n p.line(t, 6-12*t, line_width=2, line_color=\"orange\")\n p.line(t, u, line_width=2, line_color=\"red\")\n p.line(t, u_smoothed, line_width=2, line_color=\"green\")\n if export:\n export_png(p, filename=\"smoothing_problema_test_control.png\",\n width=400, height=250)\n else:\n show(p)\n" ]
[ [ "numpy.linspace", "numpy.power", "numpy.full", "scipy.optimize.Bounds", "scipy.optimize.minimize", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.2", "1.8" ], "tensorflow": [] } ]
yupeng-zglue/tensorflow
[ "2eb1429580a15af5de0751da1ab9b750eb07bea8" ]
[ "tensorflow/python/keras/engine/base_layer_v1.py" ]
[ "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# pylint: disable=protected-access\n\"\"\"Contains the base Layer class, from which all layers inherit.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport functools\nimport itertools\nimport threading\n\nimport numpy as np\nimport six\nfrom six.moves import zip # pylint: disable=redefined-builtin\n\nfrom tensorflow.python.autograph.core import ag_ctx\nfrom tensorflow.python.autograph.impl import api as autograph\nfrom tensorflow.python.distribute import distribution_strategy_context as ds_context\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import func_graph\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend\nfrom tensorflow.python.keras import constraints\nfrom tensorflow.python.keras import initializers\nfrom tensorflow.python.keras import regularizers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import base_layer_utils\nfrom tensorflow.python.keras.engine import input_spec\nfrom tensorflow.python.keras.engine import node as node_module\nfrom tensorflow.python.keras.mixed_precision.experimental import autocast_variable\nfrom tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer\nfrom tensorflow.python.keras.mixed_precision.experimental import policy\nfrom tensorflow.python.keras.saving.saved_model import layer_serialization\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils import layer_utils\nfrom tensorflow.python.keras.utils import tf_utils\n# A module that only depends on `keras.layers` import these from here.\nfrom tensorflow.python.keras.utils.generic_utils import to_snake_case # pylint: disable=unused-import\nfrom tensorflow.python.keras.utils.tf_utils import is_tensor_or_tensor_list # pylint: disable=unused-import\nfrom tensorflow.python.module import module\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.platform import tf_logging\nfrom tensorflow.python.training.tracking import base as trackable\nfrom tensorflow.python.training.tracking import data_structures\nfrom tensorflow.python.training.tracking import layer_utils as trackable_layer_utils\nfrom tensorflow.python.training.tracking import tracking\nfrom tensorflow.python.util import deprecation\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util import object_identity\nfrom tensorflow.python.util import tf_inspect\nfrom tensorflow.tools.docs import doc_controls\n\n\n# pylint: disable=g-classes-have-attributes\nclass Layer(base_layer.Layer):\n \"\"\"Base layer class.\n\n This is the class from which all layers inherit.\n\n A layer is a class implementing common neural networks operations, such\n as convolution, batch norm, etc. These operations require managing weights,\n losses, updates, and inter-layer connectivity.\n\n Users will just instantiate a layer and then treat it as a callable.\n\n We recommend that descendants of `Layer` implement the following methods:\n\n * `__init__()`: Save configuration in member variables\n * `build()`: Called once from `__call__`, when we know the shapes of inputs\n and `dtype`. Should have the calls to `add_weight()`, and then\n call the super's `build()` (which sets `self.built = True`, which is\n nice in case the user wants to call `build()` manually before the\n first `__call__`).\n * `call()`: Called in `__call__` after making sure `build()` has been called\n once. Should actually perform the logic of applying the layer to the\n input tensors (which should be passed in as the first argument).\n\n Arguments:\n trainable: Boolean, whether the layer's variables should be trainable.\n name: String name of the layer.\n dtype: The dtype of the layer's computations and weights (default of\n `None` means use `tf.keras.backend.floatx` in TensorFlow 2, or the type\n of the first input in TensorFlow 1).\n dynamic: Set this to `True` if your layer should only be run eagerly, and\n should not be used to generate a static computation graph.\n This would be the case for a Tree-RNN or a recursive network,\n for example, or generally for any layer that manipulates tensors\n using Python control flow. If `False`, we assume that the layer can\n safely be used to generate a static computation graph.\n\n Attributes:\n name: The name of the layer (string).\n dtype: The dtype of the layer's computations and weights. If mixed\n precision is used with a `tf.keras.mixed_precision.experimental.Policy`,\n this is instead just the dtype of the layer's weights, as the computations\n are done in a different dtype.\n updates: List of update ops of this layer.\n losses: List of losses added by this layer.\n trainable_weights: List of variables to be included in backprop.\n non_trainable_weights: List of variables that should not be\n included in backprop.\n weights: The concatenation of the lists trainable_weights and\n non_trainable_weights (in this order).\n trainable: Whether the layer should be trained (boolean).\n input_spec: Optional (list of) `InputSpec` object(s) specifying the\n constraints on inputs that can be accepted by the layer.\n\n Each layer has a dtype, which is typically the dtype of the layer's\n computations and variables. A layer's dtype can be queried via the\n `Layer.dtype` property. The dtype is specified with the `dtype` constructor\n argument. In TensorFlow 2, the dtype defaults to `tf.keras.backend.floatx()`\n if no dtype is passed. `floatx()` itself defaults to \"float32\". Additionally,\n layers will cast their inputs to the layer's dtype in TensorFlow 2. When mixed\n precision is used, layers may have different computation and variable dtypes.\n See `tf.keras.mixed_precision.experimental.Policy` for details on layer\n dtypes.\n \"\"\"\n\n # See tf.Module for the usage of this property.\n # The key for _obj_reference_counts_dict is a Trackable, which could be a\n # variable or layer etc. tf.Module._flatten will fail to flatten the key\n # since it is trying to convert Trackable to a string. This attribute can be\n # ignored even after the fix of nest lib, since the trackable object should\n # already been available as individual attributes. _obj_reference_counts_dict\n # just contains a copy of them.\n _TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(\n ('_obj_reference_counts_dict',),\n module.Module._TF_MODULE_IGNORED_PROPERTIES\n ))\n\n @trackable.no_automatic_dependency_tracking\n def __init__(self, trainable=True, name=None, dtype=None, dynamic=False,\n **kwargs):\n # These properties should be set by the user via keyword arguments.\n # note that 'dtype', 'input_shape' and 'batch_input_shape'\n # are only applicable to input layers: do not pass these keywords\n # to non-input layers.\n allowed_kwargs = {\n 'input_shape',\n 'batch_input_shape',\n 'batch_size',\n 'weights',\n 'activity_regularizer',\n 'autocast'\n }\n # Validate optional keyword arguments.\n generic_utils.validate_kwargs(kwargs, allowed_kwargs)\n\n # Mutable properties\n # Indicates whether the layer's weights are updated during training\n # and whether the layer's updates are run during training.\n self._trainable = trainable\n # A stateful layer is a layer whose updates are run during inference too,\n # for instance stateful RNNs.\n self._stateful = False\n # Indicates whether `build` needs to be called upon layer call, to create\n # the layer's weights.\n self.built = False\n self._build_input_shape = None\n # Provides information about which inputs are compatible with the layer.\n self._input_spec = None\n self.supports_masking = False\n self._supports_ragged_inputs = False\n\n self._init_set_name(name)\n self._activity_regularizer = kwargs.pop('activity_regularizer', None)\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n self._updates = []\n # Object to store all thread local layer properties.\n self._thread_local = threading.local()\n # A list of zero-argument lambdas which return Tensors, used for variable\n # regularizers.\n self._callable_losses = []\n # A list of symbolic Tensors containing activity regularizers and losses\n # manually added through `add_loss` in graph-building mode.\n self._losses = []\n # A list of metric instances corresponding to the symbolic metric tensors\n # added using the `add_metric` API.\n self._metrics = []\n\n # Both graph and subclassed networks have a dtype policy. For graph\n # networks, the policy's compute and variable dtypes are ignored, but other\n # fields, like the loss scale, are used by Models. For subclassed networks,\n # the compute and variable dtypes are used as like any ordinary layer.\n self._set_dtype_policy(dtype)\n # Boolean indicating whether the layer automatically casts its inputs to the\n # layer's compute_dtype.\n self._autocast = kwargs.get('autocast',\n base_layer_utils.v2_dtype_behavior_enabled())\n\n # Dependencies tracked via attribute assignment.\n # All layers in order of horizontal graph traversal.\n # Entries are unique. For models includes input and output layers.\n self._maybe_create_attribute('_layers', [])\n\n # These lists will be filled via successive calls\n # to self._add_inbound_node().\n # Used in symbolic mode only, only in conjunction with graph-networks\n self._inbound_nodes = []\n self._outbound_nodes = []\n\n self._init_call_fn_args()\n\n # Whether the `call` method can be used to build a TF graph without issues.\n # This attribute has no effect if the model is created using the Functional\n # API. Instead, `model.dynamic` is determined based on the internal layers.\n self._dynamic = dynamic\n\n # Manage input shape information if passed.\n if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:\n # In this case we will later create an input layer\n # to insert before the current layer\n if 'batch_input_shape' in kwargs:\n batch_input_shape = tuple(kwargs['batch_input_shape'])\n elif 'input_shape' in kwargs:\n if 'batch_size' in kwargs:\n batch_size = kwargs['batch_size']\n else:\n batch_size = None\n batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])\n self._batch_input_shape = batch_input_shape\n\n # Manage initial weight values if passed.\n self._initial_weights = kwargs.get('weights', None)\n\n # Whether the layer will track any layers that is set as attribute on itself\n # as sub-layers, the weights from the sub-layers will be included in the\n # parent layer's variables() as well.\n # Default to True, which means auto tracking is turned on. Certain subclass\n # might want to turn it off, like Sequential model.\n self._auto_track_sub_layers = True\n\n @trackable.no_automatic_dependency_tracking\n @generic_utils.default\n def build(self, input_shape):\n \"\"\"Creates the variables of the layer (optional, for subclass implementers).\n\n This is a method that implementers of subclasses of `Layer` or `Model`\n can override if they need a state-creation step in-between\n layer instantiation and layer call.\n\n This is typically used to create the weights of `Layer` subclasses.\n\n Arguments:\n input_shape: Instance of `TensorShape`, or list of instances of\n `TensorShape` if the layer expects a list of inputs\n (one instance per input).\n \"\"\"\n if not hasattr(self.build, '_is_default'):\n self._build_input_shape = input_shape\n self.built = True\n\n @doc_controls.for_subclass_implementers\n def call(self, inputs, **kwargs): # pylint: disable=unused-argument\n \"\"\"This is where the layer's logic lives.\n\n Arguments:\n inputs: Input tensor, or list/tuple of input tensors.\n **kwargs: Additional keyword arguments.\n\n Returns:\n A tensor or list/tuple of tensors.\n \"\"\"\n return inputs\n\n @doc_controls.for_subclass_implementers\n def _add_trackable(self, trackable_object, trainable):\n \"\"\"Adds a Trackable object to this layer's state.\n\n Arguments:\n trackable_object: The tf.tracking.Trackable object to add.\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases) or\n \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n\n Returns:\n The TrackableWeightHandler used to track this object.\n \"\"\"\n handler = base_layer_utils.TrackableWeightHandler(trackable_object)\n if trainable:\n self._trainable_weights.append(handler)\n else:\n self._non_trainable_weights.append(handler)\n return handler\n\n @doc_controls.for_subclass_implementers\n def add_weight(self,\n name=None,\n shape=None,\n dtype=None,\n initializer=None,\n regularizer=None,\n trainable=None,\n constraint=None,\n partitioner=None,\n use_resource=None,\n synchronization=tf_variables.VariableSynchronization.AUTO,\n aggregation=tf_variables.VariableAggregation.NONE,\n **kwargs):\n \"\"\"Adds a new variable to the layer.\n\n Arguments:\n name: Variable name.\n shape: Variable shape. Defaults to scalar if unspecified.\n dtype: The type of the variable. Defaults to `self.dtype` or `float32`.\n initializer: Initializer instance (callable).\n regularizer: Regularizer instance (callable).\n trainable: Boolean, whether the variable should be part of the layer's\n \"trainable_variables\" (e.g. variables, biases)\n or \"non_trainable_variables\" (e.g. BatchNorm mean and variance).\n Note that `trainable` cannot be `True` if `synchronization`\n is set to `ON_READ`.\n constraint: Constraint instance (callable).\n partitioner: Partitioner to be passed to the `Trackable` API.\n use_resource: Whether to use `ResourceVariable`.\n synchronization: Indicates when a distributed a variable will be\n aggregated. Accepted values are constants defined in the class\n `tf.VariableSynchronization`. By default the synchronization is set to\n `AUTO` and the current `DistributionStrategy` chooses\n when to synchronize. If `synchronization` is set to `ON_READ`,\n `trainable` must not be set to `True`.\n aggregation: Indicates how a distributed variable will be aggregated.\n Accepted values are constants defined in the class\n `tf.VariableAggregation`.\n **kwargs: Additional keyword arguments. Accepted values are `getter`,\n `collections`, `experimental_autocast` and `caching_device`.\n\n Returns:\n The created variable. Usually either a `Variable` or `ResourceVariable`\n instance. If `partitioner` is not `None`, a `PartitionedVariable`\n instance is returned.\n\n Raises:\n RuntimeError: If called with partitioned variable regularization and\n eager execution is enabled.\n ValueError: When giving unsupported dtype and no initializer or when\n trainable has been set to True with synchronization set as `ON_READ`.\n \"\"\"\n if shape is None:\n shape = ()\n # Validate optional keyword arguments.\n for kwarg in kwargs:\n if kwarg not in ['getter', 'collections', 'experimental_autocast',\n 'caching_device']:\n raise TypeError('Unknown keyword argument:', kwarg)\n getter = kwargs.pop('getter', base_layer_utils.make_variable)\n collections_arg = kwargs.pop('collections', None)\n # 'experimental_autocast' can be set to False by the caller to indicate an\n # AutoCastVariable should never be created.\n autocast = kwargs.pop('experimental_autocast', True)\n # See the docstring for tf.Variable about the details for caching_device.\n caching_device = kwargs.pop('caching_device', None)\n\n if dtype is None:\n dtype = self.dtype or backend.floatx()\n dtype = dtypes.as_dtype(dtype)\n if self._dtype_policy.variable_dtype is None:\n # The policy is \"_infer\", so we infer the policy from the variable dtype.\n self._dtype_policy = policy.Policy(dtype.base_dtype.name)\n initializer = initializers.get(initializer)\n regularizer = regularizers.get(regularizer)\n constraint = constraints.get(constraint)\n\n if synchronization == tf_variables.VariableSynchronization.ON_READ:\n if trainable:\n raise ValueError(\n 'Synchronization value can be set to '\n 'VariableSynchronization.ON_READ only for non-trainable variables. '\n 'You have specified trainable=True and '\n 'synchronization=VariableSynchronization.ON_READ.')\n else:\n # Set trainable to be false when variable is to be synced on read.\n trainable = False\n elif trainable is None:\n trainable = True\n\n # Initialize variable when no initializer provided\n if initializer is None:\n # If dtype is DT_FLOAT, provide a uniform unit scaling initializer\n if dtype.is_floating:\n initializer = initializers.glorot_uniform()\n # If dtype is DT_INT/DT_UINT, provide a default value `zero`\n # If dtype is DT_BOOL, provide a default value `FALSE`\n elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:\n initializer = initializers.zeros()\n # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?\n else:\n raise ValueError('An initializer for variable %s of type %s is required'\n ' for layer %s' % (name, dtype.base_dtype, self.name))\n\n if (autocast and self._dtype_policy.should_cast_variables and\n dtype.is_floating):\n # Wrap 'getter' with a version that returns an AutoCastVariable.\n old_getter = getter\n def getter(*args, **kwargs): # pylint: disable=function-redefined\n variable = old_getter(*args, **kwargs)\n return autocast_variable.create_autocast_variable(variable)\n # Also the caching_device does not work with the mixed precision API,\n # disable it if it is specified.\n # TODO(b/142020079): Reenable it once the bug is fixed.\n if caching_device is not None:\n tf_logging.warn('`caching_device` does not work with mixed precision '\n 'API. Ignoring user specified `caching_device`.')\n caching_device = None\n\n variable = self._add_variable_with_custom_getter(\n name=name,\n shape=shape,\n # TODO(allenl): a `make_variable` equivalent should be added as a\n # `Trackable` method.\n getter=getter,\n # Manage errors in Layer rather than Trackable.\n overwrite=True,\n initializer=initializer,\n dtype=dtype,\n constraint=constraint,\n trainable=trainable,\n partitioner=partitioner,\n use_resource=use_resource,\n collections=collections_arg,\n synchronization=synchronization,\n aggregation=aggregation,\n caching_device=caching_device)\n if regularizer is not None:\n # TODO(fchollet): in the future, this should be handled at the\n # level of variable creation, and weight regularization losses\n # should be variable attributes.\n name_in_scope = variable.name[:variable.name.find(':')]\n self._handle_weight_regularization(name_in_scope,\n variable,\n regularizer)\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n backend.track_variable(v)\n if trainable:\n self._trainable_weights.append(v)\n else:\n self._non_trainable_weights.append(v)\n else:\n backend.track_variable(variable)\n if trainable:\n self._trainable_weights.append(variable)\n else:\n self._non_trainable_weights.append(variable)\n return variable\n\n @generic_utils.default\n def get_config(self):\n \"\"\"Returns the config of the layer.\n\n A layer config is a Python dictionary (serializable)\n containing the configuration of a layer.\n The same layer can be reinstantiated later\n (without its trained weights) from this configuration.\n\n The config of a layer does not include connectivity\n information, nor the layer class name. These are handled\n by `Network` (one layer of abstraction above).\n\n Returns:\n Python dictionary.\n \"\"\"\n all_args = tf_inspect.getfullargspec(self.__init__).args\n config = {'name': self.name, 'trainable': self.trainable}\n if hasattr(self, '_batch_input_shape'):\n config['batch_input_shape'] = self._batch_input_shape\n config['dtype'] = policy.serialize(self._dtype_policy)\n if hasattr(self, 'dynamic'):\n # Only include `dynamic` in the `config` if it is `True`\n if self.dynamic:\n config['dynamic'] = self.dynamic\n elif 'dynamic' in all_args:\n all_args.remove('dynamic')\n expected_args = config.keys()\n # Finds all arguments in the `__init__` that are not in the config:\n extra_args = [arg for arg in all_args if arg not in expected_args]\n # Check that either the only argument in the `__init__` is `self`,\n # or that `get_config` has been overridden:\n if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'):\n raise NotImplementedError('Layers with arguments in `__init__` must '\n 'override `get_config`.')\n return config\n\n @classmethod\n def from_config(cls, config):\n \"\"\"Creates a layer from its config.\n\n This method is the reverse of `get_config`,\n capable of instantiating the same layer from the config\n dictionary. It does not handle layer connectivity\n (handled by Network), nor weights (handled by `set_weights`).\n\n Arguments:\n config: A Python dictionary, typically the\n output of get_config.\n\n Returns:\n A layer instance.\n \"\"\"\n return cls(**config)\n\n def compute_output_shape(self, input_shape):\n \"\"\"Computes the output shape of the layer.\n\n If the layer has not been built, this method will call `build` on the\n layer. This assumes that the layer will later be used with inputs that\n match the input shape provided here.\n\n Arguments:\n input_shape: Shape tuple (tuple of integers)\n or list of shape tuples (one per output tensor of the layer).\n Shape tuples can include None for free dimensions,\n instead of an integer.\n\n Returns:\n An input shape tuple.\n \"\"\"\n if context.executing_eagerly():\n # In this case we build the model first in order to do shape inference.\n # This is acceptable because the framework only calls\n # `compute_output_shape` on shape values that the layer would later be\n # built for. It would however cause issues in case a user attempts to\n # use `compute_output_shape` manually with shapes that are incompatible\n # with the shape the Layer will be called on (these users will have to\n # implement `compute_output_shape` themselves).\n self._maybe_build(input_shape)\n with ops.get_default_graph().as_default():\n graph = func_graph.FuncGraph('graph')\n with graph.as_default():\n input_shape = tf_utils.convert_shapes(input_shape, to_tuples=False)\n inputs = nest.map_structure(\n base_layer_utils.generate_placeholders_from_shape, input_shape)\n try:\n outputs = self(inputs, training=False)\n except TypeError as e:\n six.raise_from(\n NotImplementedError(\n 'We could not automatically infer the static shape of the '\n 'layer\\'s output. Please implement the '\n '`compute_output_shape` method on your layer (%s).' %\n self.__class__.__name__), e)\n return nest.map_structure(lambda t: t.shape, outputs)\n raise NotImplementedError\n\n @doc_controls.for_subclass_implementers\n def compute_output_signature(self, input_signature):\n \"\"\"Compute the output tensor signature of the layer based on the inputs.\n\n Unlike a TensorShape object, a TensorSpec object contains both shape\n and dtype information for a tensor. This method allows layers to provide\n output dtype information if it is different from the input dtype.\n For any layer that doesn't implement this function,\n the framework will fall back to use `compute_output_shape`, and will\n assume that the output dtype matches the input dtype.\n\n Args:\n input_signature: Single TensorSpec or nested structure of TensorSpec\n objects, describing a candidate input for the layer.\n\n Returns:\n Single TensorSpec or nested structure of TensorSpec objects, describing\n how the layer would transform the provided input.\n\n Raises:\n TypeError: If input_signature contains a non-TensorSpec object.\n \"\"\"\n def check_type_return_shape(s):\n if not isinstance(s, tensor_spec.TensorSpec):\n raise TypeError(\n 'Only TensorSpec signature types are supported, '\n 'but saw signature signature entry: {}.'.format(s))\n return s.shape\n input_shape = nest.map_structure(check_type_return_shape, input_signature)\n output_shape = self.compute_output_shape(input_shape)\n dtype = self._compute_dtype\n if dtype is None:\n input_dtypes = [s.dtype for s in nest.flatten(input_signature)]\n # Default behavior when self.dtype is None, is to use the first input's\n # dtype.\n dtype = input_dtypes[0]\n return nest.map_structure(\n lambda s: tensor_spec.TensorSpec(dtype=dtype, shape=s),\n output_shape)\n\n @generic_utils.default\n def compute_mask(self, inputs, mask=None): # pylint: disable=unused-argument\n \"\"\"Computes an output mask tensor.\n\n Arguments:\n inputs: Tensor or list of tensors.\n mask: Tensor or list of tensors.\n\n Returns:\n None or a tensor (or list of tensors,\n one per output tensor of the layer).\n \"\"\"\n if not self.supports_masking:\n if any(m is not None for m in nest.flatten(mask)):\n raise TypeError('Layer ' + self.name + ' does not support masking, '\n 'but was passed an input_mask: ' + str(mask))\n # masking not explicitly supported: return None as mask.\n return None\n # if masking is explicitly supported, by default\n # carry over the input mask\n return mask\n\n def __call__(self, *args, **kwargs):\n \"\"\"Wraps `call`, applying pre- and post-processing steps.\n\n Arguments:\n *args: Positional arguments to be passed to `self.call`.\n **kwargs: Keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n\n Note:\n - The following optional keyword arguments are reserved for specific uses:\n * `training`: Boolean scalar tensor of Python boolean indicating\n whether the `call` is meant for training or inference.\n * `mask`: Boolean input mask.\n - If the layer's `call` method takes a `mask` argument (as some Keras\n layers do), its default value will be set to the mask generated\n for `inputs` by the previous layer (if `input` did come from\n a layer that generated a corresponding mask, i.e. if it came from\n a Keras layer with masking support.\n\n Raises:\n ValueError: if the layer's `call` method returns None (an invalid value).\n RuntimeError: if `super().__init__()` was not called in the constructor.\n \"\"\"\n if not hasattr(self, '_thread_local'):\n raise RuntimeError(\n 'You must call `super().__init__()` in the layer constructor.')\n\n # Grab the first positional or keyword argument.\n if args:\n inputs = args[0]\n args = args[1:]\n elif self._call_fn_args[0] in kwargs:\n inputs = kwargs.pop(self._call_fn_args[0])\n else:\n raise ValueError(\n 'The first argument to `Layer.call` must always be passed.')\n\n call_context = base_layer_utils.call_context()\n input_list = nest.flatten(inputs)\n\n # We will attempt to build a TF graph if & only if all inputs are symbolic.\n # This is always the case in graph mode. It can also be the case in eager\n # mode when all inputs can be traced back to `keras.Input()` (when building\n # models using the functional API).\n build_graph = tf_utils.are_all_symbolic_tensors(input_list)\n\n # Accept NumPy and scalar inputs by converting to Tensors.\n if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):\n def _convert_non_tensor(x):\n # Don't call `ops.convert_to_tensor_v2` on all `inputs` because\n # `SparseTensors` can't be converted to `Tensor`.\n if isinstance(x, (np.ndarray, float, int)):\n return ops.convert_to_tensor_v2(x)\n return x\n inputs = nest.map_structure(_convert_non_tensor, inputs)\n input_list = nest.flatten(inputs)\n\n # Handle `mask` propagation from previous layer to current layer. Masks can\n # be propagated explicitly via the `mask` argument, or implicitly via\n # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed\n # explicitly take priority.\n mask_arg_passed_by_framework = False\n input_masks = self._collect_input_masks(inputs, args, kwargs)\n if (self._expects_mask_arg and input_masks is not None and\n not self._call_arg_was_passed('mask', args, kwargs)):\n mask_arg_passed_by_framework = True\n kwargs['mask'] = input_masks\n\n # If `training` argument was not explicitly passed, propagate `training`\n # value from this layer's calling layer.\n training_arg_passed_by_framework = False\n # Priority 1: `training` was explicitly passed.\n if self._call_arg_was_passed('training', args, kwargs):\n training_value = self._get_call_arg_value('training', args, kwargs)\n if not self._expects_training_arg:\n kwargs.pop('training')\n else:\n training_value = None\n # Priority 2: `training` was passed to a parent layer.\n if call_context.training is not None:\n training_value = call_context.training\n # Priority 3a: `learning_phase()` has been set.\n elif backend.global_learning_phase_is_set():\n training_value = backend.learning_phase()\n # Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.\n elif build_graph:\n with backend.get_graph().as_default():\n if base_layer_utils.is_in_keras_graph():\n training_value = backend.learning_phase()\n\n if self._expects_training_arg and training_value is not None:\n # Force the training_value to be bool type which matches to the contract\n # for layer/model call args.\n if tensor_util.is_tensor(training_value):\n training_value = math_ops.cast(training_value, dtypes.bool)\n else:\n training_value = bool(training_value)\n kwargs['training'] = training_value\n training_arg_passed_by_framework = True\n\n # Only create Keras history if at least one tensor originates from a\n # `keras.Input`. Otherwise this Layer may be being used outside the Keras\n # framework.\n if build_graph and base_layer_utils.needs_keras_history(inputs):\n base_layer_utils.create_keras_history(inputs)\n\n with call_context.enter(self, inputs, build_graph, training_value):\n # Check input assumptions set after layer building, e.g. input shape.\n if build_graph:\n # Symbolic execution on symbolic tensors. We will attempt to build\n # the corresponding TF subgraph inside `backend.get_graph()`\n # TODO(reedwm): We should assert input compatibility after the inputs\n # are casted, not before.\n input_spec.assert_input_compatibility(self.input_spec, inputs,\n self.name)\n if (any(isinstance(x, ragged_tensor.RaggedTensor) for x in input_list)\n and self._supports_ragged_inputs is False): # pylint: disable=g-bool-id-comparison\n raise ValueError('Layer %s does not support RaggedTensors as input. '\n 'Inputs received: %s. You can try converting your '\n 'input to an uniform tensor.' % (self.name, inputs))\n\n graph = backend.get_graph()\n with graph.as_default(), backend.name_scope(self._name_scope()):\n # Build layer if applicable (if the `build` method has been\n # overridden).\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n\n # Wrapping `call` function in autograph to allow for dynamic control\n # flow and control dependencies in call. We are limiting this to\n # subclassed layers as autograph is strictly needed only for\n # subclassed layers and models.\n # tf_convert will respect the value of autograph setting in the\n # enclosing tf.function, if any.\n if (base_layer_utils.is_subclassed(self) and\n not base_layer_utils.from_saved_model(self)):\n call_fn = autograph.tf_convert(\n self.call, ag_ctx.control_status_ctx())\n else:\n call_fn = self.call\n\n if not self.dynamic:\n try:\n with base_layer_utils.autocast_context_manager(\n self._compute_dtype):\n outputs = call_fn(cast_inputs, *args, **kwargs)\n\n except errors.OperatorNotAllowedInGraphError as e:\n raise TypeError('You are attempting to use Python control '\n 'flow in a layer that was not declared to be '\n 'dynamic. Pass `dynamic=True` to the class '\n 'constructor.\\nEncountered error:\\n\"\"\"\\n' +\n str(e) + '\\n\"\"\"')\n else:\n # We will use static shape inference to return symbolic tensors\n # matching the specifications of the layer outputs.\n # Since `self.dynamic` is True, we will never attempt to\n # run the underlying TF graph (which is disconnected).\n # TODO(fchollet): consider py_func as an alternative, which\n # would enable us to run the underlying graph if needed.\n outputs = self._symbolic_call(inputs)\n\n if outputs is None:\n raise ValueError('A layer\\'s `call` method should return a '\n 'Tensor or a list of Tensors, not None '\n '(layer: ' + self.name + ').')\n if base_layer_utils.have_all_keras_metadata(inputs):\n if training_arg_passed_by_framework:\n kwargs.pop('training')\n if mask_arg_passed_by_framework:\n kwargs.pop('mask')\n inputs, outputs = self._set_connectivity_metadata_(\n inputs, outputs, args, kwargs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n if hasattr(self, '_set_inputs') and not self.inputs:\n # Subclassed network: explicitly set metadata normally set by\n # a call to self._set_inputs().\n # TODO(b/120997007): This should be done in Eager as well, but\n # causes garbage collection issues because of the placeholders\n # created on the default Keras graph.\n self._set_inputs(inputs, outputs)\n else:\n # Eager execution on data tensors.\n with backend.name_scope(self._name_scope()):\n self._maybe_build(inputs)\n cast_inputs = self._maybe_cast_inputs(inputs)\n with base_layer_utils.autocast_context_manager(\n self._compute_dtype):\n outputs = self.call(cast_inputs, *args, **kwargs)\n self._handle_activity_regularization(inputs, outputs)\n self._set_mask_metadata(inputs, outputs, input_masks)\n\n return outputs\n\n @property\n def dtype(self):\n return self._dtype_policy.variable_dtype\n\n @property\n def name(self):\n return self._name\n\n @property\n @trackable_layer_utils.cache_recursive_attribute('dynamic')\n def dynamic(self):\n # NOTE(taylorrobie): Currently self._dynamic is read-only. If that changes\n # then this cache logic must be updated.\n return self._dynamic\n\n @property\n @doc_controls.do_not_generate_docs\n @trackable_layer_utils.cache_recursive_attribute('stateful')\n def stateful(self):\n return self._stateful\n\n @stateful.setter\n @trackable_layer_utils.invalidate_recursive_cache('stateful')\n def stateful(self, value):\n self._stateful = value\n\n @property\n def trainable(self):\n return self._trainable\n\n @trainable.setter\n def trainable(self, value):\n self._trainable = value\n for layer in getattr(self, '_layers', []):\n layer.trainable = value\n\n @property\n def activity_regularizer(self):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n return self._activity_regularizer\n\n @activity_regularizer.setter\n def activity_regularizer(self, regularizer):\n \"\"\"Optional regularizer function for the output of this layer.\"\"\"\n self._activity_regularizer = regularizer\n\n @property\n def input_spec(self):\n return self._input_spec\n\n @input_spec.setter\n # Must be decorated to prevent tracking, since the input_spec can be nested\n # InputSpec objects.\n @trackable.no_automatic_dependency_tracking\n def input_spec(self, value):\n for v in nest.flatten(value):\n if v is not None and not isinstance(v, base_layer.InputSpec):\n raise TypeError('Layer input_spec must be an instance of InputSpec. '\n 'Got: {}'.format(v))\n self._input_spec = value\n\n @property\n def trainable_weights(self):\n if self.trainable:\n children_weights = self._gather_children_attribute('trainable_weights')\n return self._dedup_weights(self._trainable_weights + children_weights)\n else:\n return []\n\n @property\n def non_trainable_weights(self):\n if self.trainable:\n children_weights = self._gather_children_attribute(\n 'non_trainable_weights')\n non_trainable_weights = self._non_trainable_weights + children_weights\n else:\n children_weights = self._gather_children_attribute('weights')\n non_trainable_weights = (\n self._trainable_weights + self._non_trainable_weights +\n children_weights)\n return self._dedup_weights(non_trainable_weights)\n\n @property\n def weights(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.trainable_weights + self.non_trainable_weights\n\n @property\n def updates(self):\n collected_updates = []\n all_layers = self._gather_unique_layers()\n with backend.get_graph().as_default():\n for layer in all_layers:\n if not layer.trainable and not layer.stateful:\n continue\n for u in layer._updates:\n if callable(u):\n try:\n u = u()\n except errors.InaccessibleTensorError:\n base_layer_utils.check_graph_consistency(\n method='add_update', force_raise=True)\n raise # check_graph_consistency may not always raise.\n base_layer_utils.check_graph_consistency(u, method='add_update')\n collected_updates.append(u)\n return collected_updates\n\n @property\n def losses(self):\n \"\"\"Losses which are associated with this `Layer`.\n\n Variable regularization tensors are created when this property is accessed,\n so it is eager safe: accessing `losses` under a `tf.GradientTape` will\n propagate gradients back to the corresponding variables.\n\n Returns:\n A list of tensors.\n \"\"\"\n collected_losses = []\n all_layers = self._gather_unique_layers()\n for layer in all_layers:\n # If any eager losses are present, we assume the model to be part of an\n # eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses\n\n @doc_controls.for_subclass_implementers\n def add_loss(self, losses, inputs=None):\n \"\"\"Add loss tensor(s), potentially dependent on layer inputs.\n\n Some losses (for instance, activity regularization losses) may be dependent\n on the inputs passed when calling a layer. Hence, when reusing the same\n layer on different inputs `a` and `b`, some entries in `layer.losses` may\n be dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This method can be used inside a subclassed layer or model's `call`\n function, in which case `losses` should be a Tensor or list of Tensors.\n\n Example:\n\n ```python\n class MyLayer(tf.keras.layers.Layer):\n def call(inputs, self):\n self.add_loss(tf.abs(tf.reduce_mean(inputs)), inputs=True)\n return inputs\n ```\n\n This method can also be called directly on a Functional Model during\n construction. In this case, any loss Tensors passed to this Model must\n be symbolic and be able to be traced back to the model's `Input`s. These\n losses become part of the model's topology and are tracked in `get_config`.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Actvity regularization.\n model.add_loss(tf.abs(tf.reduce_mean(x)))\n ```\n\n If this is not the case for your loss (if, for example, your loss references\n a `Variable` of one of the model's layers), you can wrap your loss in a\n zero-argument lambda. These losses are not tracked as part of the model's\n topology since they can't be serialized.\n\n Example:\n\n ```python\n inputs = tf.keras.Input(shape=(10,))\n x = tf.keras.layers.Dense(10)(inputs)\n outputs = tf.keras.layers.Dense(1)(x)\n model = tf.keras.Model(inputs, outputs)\n # Weight regularization.\n model.add_loss(lambda: tf.reduce_mean(x.kernel))\n ```\n\n The `get_losses_for` method allows to retrieve the losses relevant to a\n specific set of inputs.\n\n Arguments:\n losses: Loss tensor, or list/tuple of tensors. Rather than tensors, losses\n may also be zero-argument callables which create a loss tensor.\n inputs: Ignored when executing eagerly. If anything other than None is\n passed, it signals the losses are conditional on some of the layer's\n inputs, and thus they should only be run where these inputs are\n available. This is the case for activity regularization losses, for\n instance. If `None` is passed, the losses are assumed\n to be unconditional, and will apply across all dataflows of the layer\n (e.g. weight regularization losses).\n \"\"\"\n def _tag_unconditional(loss):\n \"\"\"Process the loss and tag it by setting loss._unconditional_loss.\"\"\"\n if callable(loss):\n # We run the loss without autocasting, as regularizers are often\n # numerically unstable in float16.\n with base_layer_utils.autocast_context_manager(None):\n loss = loss()\n if loss is None:\n return None # Will be filtered out when computing the .losses property\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n loss._unconditional_loss = (inputs is None) # pylint: disable=protected-access\n return loss\n\n losses = nest.flatten(losses)\n\n callable_losses = []\n symbolic_losses = []\n for loss in losses:\n if callable(loss):\n callable_losses.append(functools.partial(_tag_unconditional, loss))\n continue\n if loss is None:\n continue\n if not tensor_util.is_tensor(loss):\n loss = ops.convert_to_tensor_v2(loss, dtype=backend.floatx())\n # TF Functions should take the eager path.\n if (tf_utils.is_symbolic_tensor(loss) and\n not base_layer_utils.is_in_tf_function()):\n symbolic_losses.append(_tag_unconditional(loss))\n base_layer_utils.check_graph_consistency(loss, method='add_loss')\n\n self._callable_losses.extend(callable_losses)\n\n in_call_context = base_layer_utils.call_context().in_call\n\n if in_call_context:\n for symbolic_loss in symbolic_losses:\n self._losses.append(symbolic_loss)\n else:\n for symbolic_loss in symbolic_losses:\n if getattr(self, '_is_graph_network', False):\n self._graph_network_add_loss(symbolic_loss)\n else:\n # Possible a loss was added in a Layer's `build`.\n self._losses.append(symbolic_loss)\n\n @property\n def metrics(self):\n collected_metrics = []\n all_layers = self._gather_unique_layers()\n for layer in all_layers:\n collected_metrics.extend(layer._metrics)\n return collected_metrics\n\n @doc_controls.for_subclass_implementers\n def add_metric(self, value, aggregation=None, name=None):\n \"\"\"Adds metric tensor to the layer.\n\n Args:\n value: Metric tensor.\n aggregation: Sample-wise metric reduction function. If `aggregation=None`,\n it indicates that the metric tensor provided has been aggregated\n already. eg, `bin_acc = BinaryAccuracy(name='acc')` followed by\n `model.add_metric(bin_acc(y_true, y_pred))`. If aggregation='mean', the\n given metric tensor will be sample-wise reduced using `mean` function.\n eg, `model.add_metric(tf.reduce_sum(outputs), name='output_mean',\n aggregation='mean')`.\n name: String metric name.\n\n Raises:\n ValueError: If `aggregation` is anything other than None or `mean`.\n \"\"\"\n if aggregation is not None and aggregation != 'mean':\n raise ValueError(\n 'We currently support only `mean` sample-wise metric aggregation. '\n 'You provided aggregation=`%s`' % aggregation)\n\n from_metric_obj = hasattr(value, '_metric_obj')\n is_symbolic = tf_utils.is_symbolic_tensor(value)\n in_call_context = base_layer_utils.call_context().in_call\n\n if name is None and not from_metric_obj:\n # Eg. `self.add_metric(math_ops.reduce_sum(x), aggregation='mean')`\n # In eager mode, we use metric name to lookup a metric. Without a name,\n # a new Mean metric wrapper will be created on every model/layer call.\n # So, we raise an error when no name is provided.\n # We will do the same for symbolic mode for consistency although a name\n # will be generated if no name is provided.\n\n # We will not raise this error in the foll use case for the sake of\n # consistency as name in provided in the metric constructor.\n # mean = metrics.Mean(name='my_metric')\n # model.add_metric(mean(outputs))\n raise ValueError('Please provide a name for your metric like '\n '`self.add_metric(tf.reduce_sum(inputs), '\n 'name=\\'mean_activation\\', aggregation=\\'mean\\')`')\n elif from_metric_obj:\n name = value._metric_obj.name\n\n if in_call_context:\n # TF Function path should take the eager path.\n self._symbolic_add_metric(value, aggregation, name)\n else:\n if not is_symbolic:\n raise ValueError('Expected a symbolic Tensor for the metric value, '\n 'received: ' + str(value))\n\n # Possible a metric was added in a Layer's `build`.\n if not getattr(self, '_is_graph_network', False):\n with backend.get_graph().as_default():\n self._symbolic_add_metric(value, aggregation, name)\n return\n\n if from_metric_obj:\n raise ValueError('Using the result of calling a `Metric` object '\n 'when calling `add_metric` on a Functional '\n 'Model is not supported. Please pass the '\n 'Tensor to monitor directly.')\n\n # Insert layers into the Keras Graph Network.\n self._graph_network_add_metric(value, aggregation, name)\n\n @deprecation.deprecated_args(None, '`inputs` is now automatically inferred',\n 'inputs')\n @doc_controls.for_subclass_implementers\n def add_update(self, updates, inputs=None):\n \"\"\"Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and variance\n in a BatchNormalization layer) may be dependent on the inputs passed\n when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n The `get_updates_for` method allows to retrieve the updates relevant to a\n specific set of inputs.\n\n This call is ignored when eager execution is enabled (in that case, variable\n updates are run on the fly and thus do not need to be tracked for later\n execution).\n\n Arguments:\n updates: Update op, or list/tuple of update ops, or zero-arg callable\n that returns an update op. A zero-arg callable should be passed in\n order to disable running the updates by setting `trainable=False`\n on this Layer, when executing in Eager mode.\n inputs: Deprecated, will be automatically inferred.\n \"\"\"\n call_context = base_layer_utils.call_context()\n\n if (ds_context.has_strategy() and\n ds_context.in_cross_replica_context() and\n # When saving the model, the distribution strategy context should be\n # ignored, following the default path for adding updates.\n not call_context.saving):\n # Updates don't need to be run in a cross-replica context.\n return\n\n updates = generic_utils.to_list(updates)\n\n if call_context.in_call:\n relevant_inputs = call_context.inputs\n else:\n inbound_nodes = getattr(self, '_inbound_nodes', [])\n relevant_inputs = [node.input_tensors for node in inbound_nodes]\n\n def process_update(x):\n \"\"\"Standardize update ops.\n\n Arguments:\n x: Tensor, op, or callable.\n\n Returns:\n An update op.\n \"\"\"\n if callable(x):\n update = lambda: process_update(x())\n return update()\n elif isinstance(x, ops.Operation):\n update = x\n elif hasattr(x, 'op'):\n update = x.op\n else:\n update = ops.convert_to_tensor_v2(x)\n\n reachable = tf_utils.get_reachable_from_inputs(relevant_inputs, [update])\n update._unconditional_update = update not in reachable\n return update\n\n updates = [process_update(x) for x in updates]\n self._updates.extend(updates)\n\n def set_weights(self, weights):\n \"\"\"Sets the weights of the layer, from Numpy arrays.\n\n The weights of a layer represent the state of the layer. This function\n sets the weight values from numpy arrays. The weight values should be\n passed in the order they are created by the layer. Note that the layer's\n weights must be instantiated before calling this function by calling\n the layer.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Arguments:\n weights: a list of Numpy arrays. The number\n of arrays and their shape must match\n number of the dimensions of the weights\n of the layer (i.e. it should match the\n output of `get_weights`).\n\n Raises:\n ValueError: If the provided weights list does not match the\n layer's specifications.\n \"\"\"\n params = self.weights\n\n expected_num_weights = 0\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n expected_num_weights += param.num_tensors\n else:\n expected_num_weights += 1\n\n if expected_num_weights != len(weights):\n raise ValueError(\n 'You called `set_weights(weights)` on layer \"%s\" '\n 'with a weight list of length %s, but the layer was '\n 'expecting %s weights. Provided weights: %s...' %\n (self.name, len(weights), expected_num_weights, str(weights)[:50]))\n\n weight_index = 0\n weight_value_tuples = []\n for param in params:\n if isinstance(param, base_layer_utils.TrackableWeightHandler):\n num_tensors = param.num_tensors\n tensors = weights[weight_index:weight_index + num_tensors]\n param.set_weights(tensors)\n weight_index += num_tensors\n else:\n weight = weights[weight_index]\n ref_shape = param.shape\n if not ref_shape.is_compatible_with(weight.shape):\n raise ValueError(\n 'Layer weight shape %s not compatible with provided weight '\n 'shape %s' % (ref_shape, weight.shape))\n weight_value_tuples.append((param, weight))\n weight_index += 1\n\n backend.batch_set_value(weight_value_tuples)\n\n def get_weights(self):\n \"\"\"Returns the current weights of the layer.\n\n The weights of a layer represent the state of the layer. This function\n returns both trainable and non-trainable weight values associated with this\n layer as a list of Numpy arrays, which can in turn be used to load state\n into similarly parameterized layers.\n\n For example, a Dense layer returns a list of two values-- per-output\n weights and the bias value. These can be used to set the weights of another\n Dense layer:\n\n >>> a = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(1.))\n >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]]))\n >>> a.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n >>> b = tf.keras.layers.Dense(1,\n ... kernel_initializer=tf.constant_initializer(2.))\n >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]]))\n >>> b.get_weights()\n [array([[2.],\n [2.],\n [2.]], dtype=float32), array([0.], dtype=float32)]\n >>> b.set_weights(a.get_weights())\n >>> b.get_weights()\n [array([[1.],\n [1.],\n [1.]], dtype=float32), array([0.], dtype=float32)]\n\n Returns:\n Weights values as a list of numpy arrays.\n \"\"\"\n weights = self.weights\n output_weights = []\n for weight in weights:\n if isinstance(weight, base_layer_utils.TrackableWeightHandler):\n output_weights.extend(weight.get_tensors())\n else:\n output_weights.append(weight)\n return backend.batch_get_value(output_weights)\n\n def get_updates_for(self, inputs):\n \"\"\"Retrieves updates relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n \"\"\"\n if inputs is None:\n # Requesting unconditional updates.\n return [u for u in self.updates if u._unconditional_update]\n\n # Requesting input-conditional updates.\n updates = [u for u in self.updates if not u._unconditional_update]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n return [u for u in updates if u in reachable]\n\n def get_losses_for(self, inputs):\n \"\"\"Retrieves losses relevant to a specific set of inputs.\n\n Arguments:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n \"\"\"\n if inputs is None:\n # Requesting unconditional losses.\n return [l for l in self.losses if l._unconditional_loss]\n\n # Requesting input-conditional losses.\n losses = [l for l in self.losses if not l._unconditional_loss]\n inputs = nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, losses)\n return [l for l in losses if l in reachable]\n\n def get_input_mask_at(self, node_index):\n \"\"\"Retrieves the input mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple inputs).\n \"\"\"\n inputs = self.get_input_at(node_index)\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n def get_output_mask_at(self, node_index):\n \"\"\"Retrieves the output mask tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A mask tensor\n (or list of tensors if the layer has multiple outputs).\n \"\"\"\n output = self.get_output_at(node_index)\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n @property\n def input_mask(self):\n \"\"\"Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)\n\n @property\n def output_mask(self):\n \"\"\"Retrieves the output mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output mask tensor (potentially None) or list of output\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n \"\"\"\n output = self.output\n if isinstance(output, list):\n return [getattr(x, '_keras_mask', None) for x in output]\n else:\n return getattr(output, '_keras_mask', None)\n\n def get_input_shape_at(self, node_index):\n \"\"\"Retrieves the input shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_shapes',\n 'input shape')\n\n def get_output_shape_at(self, node_index):\n \"\"\"Retrieves the output shape(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A shape tuple\n (or list of shape tuples if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_shapes',\n 'output shape')\n\n def get_input_at(self, node_index):\n \"\"\"Retrieves the input tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple inputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'input_tensors',\n 'input')\n\n def get_output_at(self, node_index):\n \"\"\"Retrieves the output tensor(s) of a layer at a given node.\n\n Arguments:\n node_index: Integer, index of the node\n from which to retrieve the attribute.\n E.g. `node_index=0` will correspond to the\n first time the layer was called.\n\n Returns:\n A tensor (or list of tensors if the layer has multiple outputs).\n\n Raises:\n RuntimeError: If called in Eager mode.\n \"\"\"\n return self._get_node_attribute_at_index(node_index, 'output_tensors',\n 'output')\n\n @property\n def input(self):\n \"\"\"Retrieves the input tensor(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input tensor or list of input tensors.\n\n Raises:\n RuntimeError: If called in Eager mode.\n AttributeError: If no inbound nodes are found.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name +\n ' is not connected, no input to return.')\n return self._get_node_attribute_at_index(0, 'input_tensors', 'input')\n\n @property\n def output(self):\n \"\"\"Retrieves the output tensor(s) of a layer.\n\n Only applicable if the layer has exactly one output,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Output tensor or list of output tensors.\n\n Raises:\n AttributeError: if the layer is connected to more than one incoming\n layers.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('Layer ' + self.name + ' has no inbound nodes.')\n return self._get_node_attribute_at_index(0, 'output_tensors', 'output')\n\n @property\n def input_shape(self):\n \"\"\"Retrieves the input shape(s) of a layer.\n\n Only applicable if the layer has exactly one input,\n i.e. if it is connected to one incoming layer, or if all inputs\n have the same shape.\n\n Returns:\n Input shape, as an integer shape tuple\n (or list of shape tuples, one tuple per input tensor).\n\n Raises:\n AttributeError: if the layer has no defined input_shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined input shape.')\n all_input_shapes = set(\n [str(node.input_shapes) for node in self._inbound_nodes])\n if len(all_input_shapes) == 1:\n return self._inbound_nodes[0].input_shapes\n else:\n raise AttributeError('The layer \"' + str(self.name) +\n ' has multiple inbound nodes, '\n 'with different input shapes. Hence '\n 'the notion of \"input shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_input_shape_at(node_index)` '\n 'instead.')\n\n def count_params(self):\n \"\"\"Count the total number of scalars composing the weights.\n\n Returns:\n An integer count.\n\n Raises:\n ValueError: if the layer isn't yet built\n (in which case its weights aren't yet defined).\n \"\"\"\n if not self.built:\n if getattr(self, '_is_graph_network', False):\n with tf_utils.maybe_init_scope(self):\n self._maybe_build(self.inputs)\n else:\n raise ValueError('You tried to call `count_params` on ' + self.name +\n ', but the layer isn\\'t built. '\n 'You can build it manually via: `' + self.name +\n '.build(batch_input_shape)`.')\n return layer_utils.count_params(self.weights)\n\n @property\n def output_shape(self):\n \"\"\"Retrieves the output shape(s) of a layer.\n\n Only applicable if the layer has one output,\n or if all outputs have the same shape.\n\n Returns:\n Output shape, as an integer shape tuple\n (or list of shape tuples, one tuple per output tensor).\n\n Raises:\n AttributeError: if the layer has no defined output shape.\n RuntimeError: if called in Eager mode.\n \"\"\"\n if not self._inbound_nodes:\n raise AttributeError('The layer has never been called '\n 'and thus has no defined output shape.')\n all_output_shapes = set(\n [str(node.output_shapes) for node in self._inbound_nodes])\n if len(all_output_shapes) == 1:\n return self._inbound_nodes[0].output_shapes\n else:\n raise AttributeError('The layer \"%s\"'\n ' has multiple inbound nodes, '\n 'with different output shapes. Hence '\n 'the notion of \"output shape\" is '\n 'ill-defined for the layer. '\n 'Use `get_output_shape_at(node_index)` '\n 'instead.' % self.name)\n\n @property\n @doc_controls.do_not_doc_inheritable\n def inbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._inbound_nodes\n\n @property\n @doc_controls.do_not_doc_inheritable\n def outbound_nodes(self):\n \"\"\"Deprecated, do NOT use! Only for compatibility with external Keras.\"\"\"\n return self._outbound_nodes\n\n ##############################################################################\n # Methods & attributes below are public aliases of other methods. #\n ##############################################################################\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.__call__` method instead.')\n @doc_controls.do_not_doc_inheritable\n def apply(self, inputs, *args, **kwargs):\n \"\"\"Deprecated, do NOT use!\n\n This is an alias of `self.__call__`.\n\n Arguments:\n inputs: Input tensor(s).\n *args: additional positional arguments to be passed to `self.call`.\n **kwargs: additional keyword arguments to be passed to `self.call`.\n\n Returns:\n Output tensor(s).\n \"\"\"\n return self.__call__(inputs, *args, **kwargs)\n\n @deprecation.deprecated(\n date=None, instructions='Please use `layer.add_weight` method instead.')\n @doc_controls.do_not_doc_inheritable\n def add_variable(self, *args, **kwargs):\n \"\"\"Deprecated, do NOT use! Alias for `add_weight`.\"\"\"\n return self.add_weight(*args, **kwargs)\n\n @property\n def variables(self):\n \"\"\"Returns the list of all layer variables/weights.\n\n Alias of `self.weights`.\n\n Returns:\n A list of variables.\n \"\"\"\n return self.weights\n\n @property\n def trainable_variables(self):\n return self.trainable_weights\n\n @property\n def non_trainable_variables(self):\n return self.non_trainable_weights\n\n ##############################################################################\n # Methods & attributes below are all private and only used by the framework. #\n ##############################################################################\n\n def _set_dtype_policy(self, dtype):\n \"\"\"Sets self._dtype_policy.\"\"\"\n if isinstance(dtype, policy.Policy):\n self._dtype_policy = dtype\n elif isinstance(dtype, dict):\n self._dtype_policy = policy.deserialize(dtype)\n elif dtype:\n self._dtype_policy = policy.Policy(dtypes.as_dtype(dtype).name)\n else:\n self._dtype_policy = policy.global_policy()\n if (self._dtype_policy.name == 'mixed_float16' and\n not loss_scale_optimizer.strategy_supports_loss_scaling()):\n # Although only loss scaling doesn't support certain strategies, to avoid\n # confusion, we disallow the 'mixed_float16' policy with unsupported\n # strategies. This is because 'mixed_float16' requires loss scaling for\n # numeric stability.\n strategy = ds_context.get_strategy()\n raise ValueError('Mixed precision is not supported with the '\n 'tf.distribute.Strategy: %s. Either stop using mixed '\n 'precision by removing the use of the \"%s\" policy or '\n 'use a different Strategy, e.g. a MirroredStrategy.' %\n (strategy.__class__.__name__, self._dtype_policy.name))\n\n # This has no impact on the layer behavior, and is only used for printing\n # warnings.\n self._dtype_defaulted_to_floatx = (not dtype and\n policy.policy_defaults_to_floatx())\n\n # TODO(reedwm): Expose this property?\n @property\n def _compute_dtype(self):\n \"\"\"The layer's compute dtype.\n\n Unless mixed-precision is used, this is the same as `Layer.dtype`.\n\n If self._autocast is True, layer's will cast floating-point inputs to this.\n\n Returns:\n The layer's compute dtype.\n \"\"\"\n return self._dtype_policy.compute_dtype\n\n def _maybe_cast_inputs(self, inputs):\n \"\"\"Maybe casts the inputs to the compute dtype.\n\n If self._compute_dtype is floating-point, and self_autocast is True,\n floating-point inputs are casted to self._compute_dtype.\n\n Args:\n inputs: Input tensor, or structure of input tensors.\n\n Returns:\n `inputs`, but tensors may have been casted to self._compute_dtype\n \"\"\"\n compute_dtype = self._compute_dtype\n if (self._autocast and compute_dtype and\n dtypes.as_dtype(compute_dtype).is_floating):\n def f(x):\n \"\"\"Cast a single Tensor or TensorSpec to the compute dtype.\"\"\"\n cast_types = (ops.Tensor, sparse_tensor.SparseTensor,\n ragged_tensor.RaggedTensor)\n if (isinstance(x, cast_types) and x.dtype.is_floating and\n x.dtype.base_dtype.name != compute_dtype):\n if self._dtype_defaulted_to_floatx:\n self._warn_about_input_casting(x.dtype.base_dtype)\n return math_ops.cast(x, compute_dtype)\n elif isinstance(x, tensor_spec.TensorSpec) and x.dtype.is_floating:\n # Inputs may be TensorSpecs when this function is called from\n # model._set_inputs.\n return tensor_spec.TensorSpec(x.shape, compute_dtype, x.name)\n else:\n return x\n return nest.map_structure(f, inputs)\n else:\n return inputs\n\n def _warn_about_input_casting(self, input_dtype):\n # self._already_warned_about_input_casting is only retrieved or set in this\n # function.\n already_warned = getattr(self, '_already_warned_about_input_casting', False)\n if not already_warned:\n tf_logging.warn(\n \"Layer {self.name} is casting an input tensor from dtype \"\n \"{input_dtype} to the layer's dtype of {layer_dtype}, which is new \"\n \"behavior in TensorFlow 2. The layer has dtype {layer_dtype} \"\n 'because its dtype defaults to floatx.\\n\\n'\n \"\"\n \"If you intended to run this layer in {layer_dtype}, you can safely \"\n \"ignore this warning. If in doubt, this warning is likely only an \"\n \"issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\\n\\n\"\n \"\"\n \"To change all layers to have dtype {input_dtype} by default, call \"\n \"`tf.keras.backend.set_floatx('{input_dtype}')`. To change just this \"\n \"layer, pass dtype='{input_dtype}' to the layer constructor. If you \"\n \"are the author of this layer, you can disable autocasting by \"\n \"passing autocast=False to the base Layer constructor.\\n\".format(\n self=self,\n input_dtype=input_dtype.name,\n layer_dtype=self._compute_dtype))\n self._already_warned_about_input_casting = True\n\n # _dtype used to be an attribute set in the constructor. We still expose it\n # because some clients still use it.\n # TODO(reedwm): Deprecate, then remove the _dtype property.\n @property\n def _dtype(self):\n # This is equivalent to returning self.dtype . We do not return self.dtype\n # as it would cause infinite recursion in a few subclasses, which override\n # \"dtype\" to return self._dtype.\n return self._dtype_policy.variable_dtype\n\n @_dtype.setter\n def _dtype(self, value):\n value = dtypes.as_dtype(value).name\n self._dtype_policy = policy.Policy(value)\n\n def _name_scope(self):\n return self.name\n\n def _init_set_name(self, name, zero_based=True):\n if not name:\n self._name = backend.unique_object_name(\n generic_utils.to_snake_case(self.__class__.__name__),\n zero_based=zero_based)\n else:\n self._name = name\n\n def _get_existing_metric(self, name=None):\n match = [m for m in self._metrics if m.name == name]\n if not match:\n return\n if len(match) > 1:\n raise ValueError(\n 'Please provide different names for the metrics you have added. '\n 'We found {} metrics with the name: \"{}\"'.format(len(match), name))\n return match[0]\n\n def _symbolic_add_metric(self, value, aggregation=None, name=None):\n base_layer_utils.check_graph_consistency(value, method='add_metric')\n match = self._get_existing_metric(name)\n if aggregation is None:\n # Iterate over the metrics and check if the given metric exists already.\n # This can happen when a metric instance is created in subclassed model\n # layer `__init__` and we have tracked that instance already in\n # model.__setattr__.\n if match:\n result_tensor = value\n metric_obj = match\n elif hasattr(value, '_metric_obj'):\n # We track the instance using the metadata on the result tensor.\n result_tensor = value\n metric_obj = result_tensor._metric_obj\n self._metrics.append(metric_obj)\n else:\n raise ValueError(\n 'We do not support adding an aggregated metric result tensor that '\n 'is not the output of a `tf.keras.metrics.Metric` metric instance. '\n 'Without having access to the metric instance we cannot reset the '\n 'state of a metric after every epoch during training. You can '\n 'create a `tf.keras.metrics.Metric` instance and pass the result '\n 'here or pass an un-aggregated result with `aggregation` parameter '\n 'set as `mean`. For example: `self.add_metric(tf.reduce_sum(inputs)'\n ', name=\\'mean_activation\\', aggregation=\\'mean\\')`')\n else:\n # If a non-aggregated tensor is given as input (ie. `aggregation` is\n # explicitly set to `mean`), we wrap the tensor in `Mean` metric.\n if match:\n result_tensor = match(value)\n metric_obj = match\n else:\n metric_obj, result_tensor = base_layer_utils.create_mean_metric(\n value, name)\n self._metrics.append(metric_obj)\n\n def _handle_weight_regularization(self, name, variable, regularizer):\n \"\"\"Create lambdas which compute regularization losses.\"\"\"\n\n def _loss_for_variable(v):\n \"\"\"Creates a regularization loss `Tensor` for variable `v`.\"\"\"\n with backend.name_scope(name + '/Regularizer'):\n regularization = regularizer(v)\n return regularization\n\n if isinstance(variable, tf_variables.PartitionedVariable):\n for v in variable:\n self.add_loss(functools.partial(_loss_for_variable, v))\n else:\n self.add_loss(functools.partial(_loss_for_variable, variable))\n\n def _handle_activity_regularization(self, inputs, outputs):\n # Apply activity regularization.\n # Note that it should be applied every time the layer creates a new\n # output, since it is output-specific.\n if self._activity_regularizer:\n output_list = nest.flatten(outputs)\n with backend.name_scope('ActivityRegularizer'):\n for output in output_list:\n activity_loss = self._activity_regularizer(output)\n batch_size = math_ops.cast(\n array_ops.shape(output)[0], activity_loss.dtype)\n # Make activity regularization strength batch-agnostic.\n mean_activity_loss = activity_loss / batch_size\n base_layer_utils.check_graph_consistency(\n mean_activity_loss, method='activity_regularizer')\n self.add_loss(mean_activity_loss, inputs=inputs)\n\n def _set_mask_metadata(self, inputs, outputs, previous_mask):\n flat_outputs = nest.flatten(outputs)\n\n mask_already_computed = (\n getattr(self, '_compute_output_and_mask_jointly', False) or\n all(getattr(x, '_keras_mask', None) is not None for x in flat_outputs))\n\n # Only compute the mask if the Layer explicitly supports masking or has\n # overridden `compute_mask`.\n should_compute_mask = (\n hasattr(self, 'compute_mask') and\n (self.supports_masking or\n not getattr(self.compute_mask, '_is_default', False)))\n\n if mask_already_computed:\n flat_masks = [getattr(x, '_keras_mask', None) for x in flat_outputs]\n elif not should_compute_mask:\n flat_masks = [None for _ in flat_outputs]\n else:\n output_masks = self.compute_mask(inputs, previous_mask)\n # `compute_mask` can return a single `None` even when a Layer\n # has multiple outputs.\n if output_masks is None:\n flat_masks = [None for _ in flat_outputs]\n else:\n flat_masks = nest.flatten(output_masks)\n\n for output, mask in zip(flat_outputs, flat_masks):\n try:\n output._keras_mask = mask\n except AttributeError:\n # C Type such as np.ndarray.\n pass\n\n if tf_utils.are_all_symbolic_tensors(flat_outputs):\n for output in flat_outputs:\n if getattr(output, '_keras_mask', None) is not None:\n # Do not track masks for `TensorFlowOpLayer` construction.\n output._keras_mask._keras_history_checked = True\n\n def _collect_input_masks(self, inputs, args, kwargs):\n \"\"\"Checks if `mask` argument was passed, else gathers mask from inputs.\"\"\"\n if self._call_arg_was_passed('mask', args, kwargs):\n return self._get_call_arg_value('mask', args, kwargs)\n\n if not self._should_compute_mask:\n return None\n\n input_masks = nest.map_structure(lambda t: getattr(t, '_keras_mask', None),\n inputs)\n if generic_utils.is_all_none(input_masks):\n return None\n return input_masks\n\n def _call_arg_was_passed(self, arg_name, args, kwargs, inputs_in_args=False):\n if arg_name in kwargs:\n return True\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n if arg_name in dict(zip(call_fn_args, args)):\n return True\n return False\n\n def _get_call_arg_value(self, arg_name, args, kwargs, inputs_in_args=False):\n if arg_name in kwargs:\n return kwargs[arg_name]\n call_fn_args = self._call_fn_args\n if not inputs_in_args:\n # Ignore `inputs` arg.\n call_fn_args = call_fn_args[1:]\n args_dict = dict(zip(call_fn_args, args))\n return args_dict[arg_name]\n\n def _set_connectivity_metadata_(self, inputs, outputs, args, kwargs):\n\n # If the layer returns tensors from its inputs, unmodified,\n # we copy them to avoid loss of tensor metadata.\n output_ls = nest.flatten(outputs)\n inputs_ls = object_identity.ObjectIdentitySet(nest.flatten(inputs))\n output_ls_copy = []\n for x in output_ls:\n if x in inputs_ls:\n with backend.name_scope(self.name):\n x = array_ops.identity(x)\n output_ls_copy.append(x)\n outputs = nest.pack_sequence_as(outputs, output_ls_copy)\n\n # Ignore `inputs` arg.\n arguments = dict(zip(self._call_fn_args[1:], args))\n arguments.update(kwargs)\n\n # Add an inbound node to the layer, so it can keep track of this call.\n # This updates the layer history of the output tensor(s).\n self._add_inbound_node(\n input_tensors=inputs, output_tensors=outputs, arguments=arguments)\n return inputs, outputs\n\n def _add_inbound_node(self,\n input_tensors,\n output_tensors,\n arguments=None):\n \"\"\"Internal method to create an inbound node for the layer.\n\n Arguments:\n input_tensors: list of input tensors.\n output_tensors: list of output tensors.\n arguments: dictionary of keyword arguments that were passed to the\n `call` method of the layer at the call that created the node.\n \"\"\"\n inbound_layers = nest.map_structure(lambda t: t._keras_history.layer,\n input_tensors)\n node_indices = nest.map_structure(lambda t: t._keras_history.node_index,\n input_tensors)\n tensor_indices = nest.map_structure(lambda t: t._keras_history.tensor_index,\n input_tensors)\n\n # Create node, add it to inbound nodes.\n node_module.Node(\n self,\n inbound_layers=inbound_layers,\n node_indices=node_indices,\n tensor_indices=tensor_indices,\n input_tensors=input_tensors,\n output_tensors=output_tensors,\n arguments=arguments)\n\n # Update tensor history metadata.\n # The metadata attribute consists of\n # 1) a layer instance\n # 2) a node index for the layer\n # 3) a tensor index for the node.\n # The allows layer reuse (multiple nodes per layer) and multi-output\n # or multi-input layers (e.g. a layer can return multiple tensors,\n # and each can be sent to a different layer).\n for i, tensor in enumerate(nest.flatten(output_tensors)):\n tensor._keras_history = KerasHistory(self,\n len(self._inbound_nodes) - 1, i) # pylint: disable=protected-access\n\n def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \"\"\"Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Arguments:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n \"\"\"\n if not self._inbound_nodes:\n raise RuntimeError('The layer has never been called '\n 'and thus has no defined ' + attr_name + '.')\n if not len(self._inbound_nodes) > node_index:\n raise ValueError('Asked to get ' + attr_name + ' at node ' +\n str(node_index) + ', but the layer has only ' +\n str(len(self._inbound_nodes)) + ' inbound nodes.')\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values\n\n def _maybe_build(self, inputs):\n # Check input assumptions set before layer building, e.g. input rank.\n if not self.built:\n input_spec.assert_input_compatibility(\n self.input_spec, inputs, self.name)\n input_list = nest.flatten(inputs)\n if input_list and self._dtype_policy.compute_dtype is None:\n try:\n dtype = input_list[0].dtype.base_dtype.name\n except AttributeError:\n pass\n else:\n self._dtype_policy = policy.Policy(dtype)\n input_shapes = None\n if all(hasattr(x, 'shape') for x in input_list):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n # Only call `build` if the user has manually overridden the build method.\n if not hasattr(self.build, '_is_default'):\n # Any setup work performed only once should happen in an `init_scope`\n # to avoid creating symbolic Tensors that will later pollute any eager\n # operations.\n with tf_utils.maybe_init_scope(self):\n self.build(input_shapes)\n # We must set self.built since user defined build functions are not\n # constrained to set self.built.\n self.built = True\n\n # Optionally load weight values specified at layer instantiation.\n if self._initial_weights is not None:\n self.set_weights(self._initial_weights)\n self._initial_weights = None\n\n def _symbolic_call(self, inputs):\n input_shapes = nest.map_structure(lambda x: x.shape, inputs)\n output_shapes = self.compute_output_shape(input_shapes)\n\n def _make_placeholder_like(shape):\n ph = backend.placeholder(shape=shape, dtype=self.dtype)\n ph._keras_mask = None\n return ph\n\n return nest.map_structure(_make_placeholder_like, output_shapes)\n\n def _get_trainable_state(self):\n \"\"\"Get the `trainable` state of each sublayer.\n\n Returns:\n A dict mapping all sublayers to their `trainable` value.\n \"\"\"\n layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)\n # Keep track of each top-level layers' `trainable` as well as the\n # state of all of its sublayers.\n trainable_state = {self: self.trainable}\n for layer in layers:\n trainable_state.update(layer._get_trainable_state())\n return trainable_state\n\n def _set_trainable_state(self, trainable_state):\n \"\"\"Set `trainable` state for each sublayer.\"\"\"\n layers = trackable_layer_utils.filter_empty_layer_containers(self._layers)\n if self in trainable_state:\n self.trainable = trainable_state[self]\n for layer in layers:\n layer._set_trainable_state(trainable_state)\n\n @property\n def _obj_reference_counts(self):\n \"\"\"A dictionary counting the number of attributes referencing an object.\"\"\"\n self._maybe_create_attribute('_obj_reference_counts_dict',\n object_identity.ObjectIdentityDictionary())\n return self._obj_reference_counts_dict\n\n @trackable.no_automatic_dependency_tracking\n def _maybe_create_attribute(self, name, default_value):\n \"\"\"Create the attribute with the default value if it hasn't been created.\n\n This is useful for fields that is used for tracking purpose,\n _trainable_weights, or _layers. Note that user could create a layer subclass\n and assign an internal field before invoking the Layer.__init__(), the\n __setattr__() need to create the tracking fields and __init__() need to not\n override them.\n\n Args:\n name: String, the name of the attribute.\n default_value: Object, the default value of the attribute.\n \"\"\"\n if not hasattr(self, name):\n super(Layer, self).__setattr__(name, default_value)\n\n def __delattr__(self, name):\n # For any super.__delattr__() call, we will directly use the implementation\n # in Trackable and skip the behavior in AutoTrackable. The Layer was\n # originally use Trackable as base class, the change of using Module as base\n # class forced us to have AutoTrackable in the class hierarchy. Skipping\n # the __delattr__ and __setattr__ in AutoTrackable will keep the status quo.\n existing_value = getattr(self, name, None)\n\n # If this value is replacing an existing object assigned to an attribute, we\n # should clean it out to avoid leaking memory. First we check if there are\n # other attributes referencing it.\n reference_counts = self._obj_reference_counts\n if existing_value not in reference_counts:\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n\n reference_count = reference_counts[existing_value]\n if reference_count > 1:\n # There are other remaining references. We can't remove this object from\n # _layers etc.\n reference_counts[existing_value] = reference_count - 1\n super(tracking.AutoTrackable, self).__delattr__(name)\n return\n else:\n # This is the last remaining reference.\n del reference_counts[existing_value]\n\n super(tracking.AutoTrackable, self).__delattr__(name)\n\n if (isinstance(existing_value, Layer)\n or trackable_layer_utils.has_weights(existing_value)):\n super(tracking.AutoTrackable, self).__setattr__(\n '_layers',\n [l for l in self._layers if l is not existing_value])\n self._attribute_sentinel.invalidate_all()\n if isinstance(existing_value, tf_variables.Variable):\n super(tracking.AutoTrackable, self).__setattr__(\n '_trainable_weights',\n [w for w in self._trainable_weights if w is not existing_value])\n super(tracking.AutoTrackable, self).__setattr__(\n '_non_trainable_weights',\n [w for w in self._non_trainable_weights if w is not existing_value])\n\n # Any time we change `_layers` (either by deleting the attribute or by\n # reassigning it which will call __delattr__ from __setattr__) the topology\n # of the subgraph of Layers may change. In that case we will need to\n # recompute any attribute which depends on that subgraph.\n if name == '_layers':\n self._attribute_sentinel.invalidate_all()\n\n def __setattr__(self, name, value):\n if (name == '_self_setattr_tracking' or\n not getattr(self, '_self_setattr_tracking', True) or\n # Exclude @property.setters from tracking\n hasattr(self.__class__, name)):\n try:\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n except AttributeError:\n raise AttributeError(\n ('Can\\'t set the attribute \"{}\", likely because it conflicts with '\n 'an existing read-only @property of the object. Please choose a '\n 'different name.').format(name))\n return\n\n # Keep track of trackable objects, for the needs of `Network.save_weights`.\n value = data_structures.sticky_attribute_assignment(\n trackable=self, value=value, name=name)\n\n reference_counts = self._obj_reference_counts\n reference_counts[value] = reference_counts.get(value, 0) + 1\n\n # Clean out the old attribute, which clears _layers and _trainable_weights\n # if necessary.\n try:\n self.__delattr__(name)\n except AttributeError:\n pass\n\n # TODO(scottzhu): Need to track Module object as well for weight tracking.\n # Be careful about metric if it becomes a Module in future.\n # Append value to self._layers if relevant\n if (getattr(self, '_auto_track_sub_layers', True) and\n (isinstance(value, Layer) or trackable_layer_utils.has_weights(value))):\n self._maybe_create_attribute('_layers', [])\n # We need to check object identity to avoid de-duplicating empty\n # container types which compare equal.\n if not any((layer is value for layer in self._layers)):\n self._layers.append(value)\n if hasattr(value, '_attribute_sentinel'):\n value._attribute_sentinel.add_parent(self._attribute_sentinel)\n if hasattr(value, '_use_resource_variables'):\n # Legacy layers (V1 tf.layers) must always use\n # resource variables.\n value._use_resource_variables = True\n\n # Append value to list of trainable / non-trainable weights if relevant\n # TODO(b/125122625): This won't pick up on any variables added to a\n # list/dict after creation.\n for val in nest.flatten(value):\n # TODO(b/126450014): Remove `_UnreadVariable` check here when assign ops\n # no longer return True for isinstance Variable checks.\n if not isinstance(val, tf_variables.Variable):\n continue\n if isinstance(val, resource_variable_ops._UnreadVariable): # pylint: disable=protected-access\n continue\n\n # Users may add extra weights/variables\n # simply by assigning them to attributes (invalid for graph networks)\n self._maybe_create_attribute('_trainable_weights', [])\n self._maybe_create_attribute('_non_trainable_weights', [])\n if val.trainable:\n if any(val is w for w in self._trainable_weights):\n continue\n self._trainable_weights.append(val)\n else:\n if any(val is w for w in self._non_trainable_weights):\n continue\n self._non_trainable_weights.append(val)\n\n backend.track_variable(val)\n\n # Skip the auto trackable from tf.Module to keep status quo. See the comment\n # at __delattr__.\n super(tracking.AutoTrackable, self).__setattr__(name, value)\n\n def _gather_children_attribute(self, attribute):\n assert attribute in {\n 'weights', 'trainable_weights', 'non_trainable_weights'\n }\n if hasattr(self, '_layers'):\n nested_layers = trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n return list(\n itertools.chain.from_iterable(\n getattr(layer, attribute) for layer in nested_layers))\n return []\n\n def _gather_unique_layers(self):\n \"\"\"Returns the current layer and all its children depth first deduped.\n\n We are deduping after getting the layers to maintain the order.\n \"\"\"\n all_layers = self._gather_layers()\n unique_layers, seen_layers = [], object_identity.ObjectIdentitySet()\n for layer in all_layers:\n if layer not in seen_layers:\n unique_layers.append(layer)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_layers.add(layer)\n return unique_layers\n\n def _gather_layers(self):\n \"\"\"Returns the current layer and all its children depth first.\"\"\"\n all_layers = [self]\n if hasattr(self, '_layers'):\n child_layers = trackable_layer_utils.filter_empty_layer_containers(\n self._layers)\n for child_layer in child_layers:\n all_layers.extend(child_layer._gather_layers())\n return all_layers\n\n @property\n @tracking.cached_per_instance\n def _attribute_sentinel(self):\n return trackable_layer_utils.AttributeSentinel()\n\n # This is a hack so that the is_layer (within\n # training/trackable/layer_utils.py) check doesn't get the weights attr.\n # TODO(b/110718070): Remove when fixed.\n def _is_layer(self):\n return True\n\n def _init_call_fn_args(self):\n # Clear cached call function arguments.\n self.__class__._call_full_argspec.fget.cache.pop(self, None)\n self.__class__._call_fn_args.fget.cache.pop(self, None)\n self.__class__._call_accepts_kwargs.fget.cache.pop(self, None)\n\n call_fn_args = self._call_fn_args\n self._expects_training_arg = ('training' in call_fn_args or\n self._call_accepts_kwargs)\n self._expects_mask_arg = ('mask' in call_fn_args or\n self._call_accepts_kwargs)\n\n @property\n @tracking.cached_per_instance\n def _call_full_argspec(self):\n # Argspec inspection is expensive and the call spec is used often, so it\n # makes sense to cache the result.\n return tf_inspect.getfullargspec(self.call)\n\n @property\n @tracking.cached_per_instance\n def _call_fn_args(self):\n all_args = self._call_full_argspec.args\n # Scrub `self` that appears if a decorator was applied.\n if all_args and all_args[0] == 'self':\n return all_args[1:]\n return all_args\n\n @property\n @tracking.cached_per_instance\n def _call_accepts_kwargs(self):\n return self._call_full_argspec.varkw is not None\n\n @property\n @tracking.cached_per_instance\n def _should_compute_mask(self):\n return ('mask' in self._call_fn_args or\n getattr(self, 'compute_mask', None) is not None)\n\n def _dedup_weights(self, weights):\n \"\"\"Dedupe weights while maintaining order as much as possible.\"\"\"\n output, seen_weights = [], object_identity.ObjectIdentitySet()\n for w in weights:\n if w not in seen_weights:\n output.append(w)\n # Track the Variable's identity to avoid __eq__ issues.\n seen_weights.add(w)\n return output\n\n # SavedModel properties. Please see keras/saving/saved_model for details.\n\n @property\n def _trackable_saved_model_saver(self):\n return layer_serialization.LayerSavedModelSaver(self)\n\n @property\n def _object_identifier(self):\n return self._trackable_saved_model_saver.object_identifier\n\n @property\n def _tracking_metadata(self):\n return self._trackable_saved_model_saver.tracking_metadata\n\n def _list_extra_dependencies_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_extra_dependencies_for_serialization(serialization_cache))\n\n def _list_functions_for_serialization(self, serialization_cache):\n return (self._trackable_saved_model_saver\n .list_functions_for_serialization(serialization_cache))\n\n def __getstate__(self):\n # Override to support `copy.deepcopy` and pickling.\n # Thread-local objects cannot be copied in Python 3, so pop these.\n # Thread-local objects are used to cache losses in MirroredStrategy, and\n # so shouldn't be copied.\n state = self.__dict__.copy()\n state.pop('_thread_local', None)\n return state\n\n def __setstate__(self, state):\n state['_thread_local'] = threading.local()\n # Bypass Trackable logic as `__dict__` already contains this info.\n object.__setattr__(self, '__dict__', state)\n\n\nclass KerasHistory(\n collections.namedtuple('KerasHistory',\n ['layer', 'node_index', 'tensor_index'])):\n \"\"\"Tracks the Layer call that created a Tensor, for Keras Graph Networks.\n\n During construction of Keras Graph Networks, this metadata is added to\n each Tensor produced as the output of a Layer, starting with an\n `InputLayer`. This allows Keras to track how each Tensor was produced, and\n this information is later retraced by the `keras.engine.Network` class to\n reconstruct the Keras Graph Network.\n\n Attributes:\n layer: The Layer that produced the Tensor.\n node_index: The specific call to the Layer that produced this Tensor. Layers\n can be called multiple times in order to share weights. A new node is\n created every time a Tensor is called.\n tensor_index: The output index for this Tensor. Always zero if the Layer\n that produced this Tensor only has one output. Nested structures of\n Tensors are deterministically assigned an index via `nest.flatten`.\n \"\"\"\n # Added to maintain memory and performance characteristics of `namedtuple`\n # while subclassing.\n __slots__ = ()\n\n\n# Avoid breaking users who directly import this symbol from this file.\n# TODO(fchollet): remove this.\nInputSpec = input_spec.InputSpec # pylint:disable=invalid-name\n" ]
[ [ "tensorflow.python.keras.mixed_precision.experimental.policy.serialize", "tensorflow.python.training.tracking.layer_utils.filter_empty_layer_containers", "tensorflow.python.keras.backend.name_scope", "tensorflow.python.keras.backend.batch_get_value", "tensorflow.python.util.tf_inspect.getfullargspec", "tensorflow.python.keras.utils.generic_utils.to_list", "tensorflow.python.keras.mixed_precision.experimental.policy.policy_defaults_to_floatx", "tensorflow.python.keras.backend.placeholder", "tensorflow.python.keras.backend.global_learning_phase_is_set", "tensorflow.python.framework.ops.convert_to_tensor_v2", "tensorflow.python.keras.engine.base_layer_utils.autocast_context_manager", "tensorflow.python.keras.mixed_precision.experimental.loss_scale_optimizer.strategy_supports_loss_scaling", "tensorflow.python.keras.utils.generic_utils.validate_kwargs", "tensorflow.python.keras.regularizers.get", "tensorflow.python.distribute.distribution_strategy_context.has_strategy", "tensorflow.python.keras.engine.base_layer_utils.needs_keras_history", "tensorflow.python.training.tracking.layer_utils.has_weights", "tensorflow.python.keras.initializers.glorot_uniform", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.python.ops.array_ops.identity", "tensorflow.python.keras.initializers.zeros", "tensorflow.python.keras.backend.track_variable", "tensorflow.python.keras.engine.base_layer_utils.create_mean_metric", "tensorflow.python.keras.mixed_precision.experimental.policy.global_policy", "tensorflow.python.keras.engine.input_spec.assert_input_compatibility", "tensorflow.python.keras.utils.tf_utils.convert_shapes", "tensorflow.python.keras.engine.base_layer_utils.is_in_keras_graph", "tensorflow.python.training.tracking.layer_utils.invalidate_recursive_cache", "tensorflow.python.keras.backend.floatx", "tensorflow.python.training.tracking.layer_utils.AttributeSentinel", "tensorflow.python.keras.mixed_precision.experimental.autocast_variable.create_autocast_variable", "tensorflow.python.keras.engine.base_layer_utils.is_in_tf_function", "tensorflow.python.keras.mixed_precision.experimental.policy.Policy", "tensorflow.python.util.nest.map_structure", "tensorflow.python.keras.saving.saved_model.layer_serialization.LayerSavedModelSaver", "tensorflow.python.keras.utils.tf_utils.maybe_init_scope", "tensorflow.python.util.deprecation.deprecated", "tensorflow.python.ops.math_ops.cast", "tensorflow.python.keras.constraints.get", "tensorflow.python.training.tracking.data_structures.sticky_attribute_assignment", "tensorflow.python.distribute.distribution_strategy_context.in_cross_replica_context", "tensorflow.python.framework.func_graph.FuncGraph", "tensorflow.python.keras.utils.tf_utils.get_reachable_from_inputs", "tensorflow.python.keras.engine.base_layer_utils.call_context", "tensorflow.python.keras.utils.layer_utils.count_params", "tensorflow.python.framework.tensor_util.is_tensor", "tensorflow.python.training.tracking.layer_utils.cache_recursive_attribute", "tensorflow.python.util.deprecation.deprecated_args", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.python.distribute.distribution_strategy_context.get_strategy", "tensorflow.python.ops.array_ops.shape", "tensorflow.python.keras.utils.tf_utils.is_symbolic_tensor", "tensorflow.python.keras.engine.base_layer_utils.check_graph_consistency", "tensorflow.python.keras.backend.get_graph", "tensorflow.python.keras.mixed_precision.experimental.policy.deserialize", "tensorflow.python.keras.engine.base_layer_utils.have_all_keras_metadata", "tensorflow.python.util.nest.pack_sequence_as", "tensorflow.python.util.object_identity.ObjectIdentityDictionary", "tensorflow.python.keras.backend.batch_set_value", "tensorflow.python.keras.utils.generic_utils.to_snake_case", "tensorflow.python.platform.tf_logging.warn", "tensorflow.python.framework.tensor_spec.TensorSpec", "tensorflow.python.keras.engine.node.Node", "tensorflow.python.util.object_identity.ObjectIdentitySet", "tensorflow.python.keras.engine.base_layer_utils.create_keras_history", "tensorflow.python.keras.engine.base_layer_utils.from_saved_model", "tensorflow.python.keras.engine.base_layer_utils.v2_dtype_behavior_enabled", "tensorflow.python.keras.utils.tf_utils.are_all_symbolic_tensors", "tensorflow.python.keras.utils.generic_utils.is_all_none", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.keras.backend.learning_phase", "tensorflow.python.autograph.core.ag_ctx.control_status_ctx", "tensorflow.python.keras.initializers.get", "tensorflow.python.keras.engine.base_layer_utils.is_subclassed", "tensorflow.python.keras.engine.base_layer_utils.TrackableWeightHandler", "tensorflow.python.util.nest.flatten" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.3", "2.2" ] } ]
star936/python-learning
[ "02fe35a3944cb75184f1c9196618202ccf02c210" ]
[ "examples/read_city.py" ]
[ "# coding: utf-8\n\nimport pandas as pd\nimport sqlite3\nimport psycopg2\n\nHOST = 'localhost'\nPORT = 5432\nDATABASE = 'test'\nUSER = 'root'\nPASSWORD = 'root'\n\npg_conn = psycopg2.connect(host=HOST,\n port=PORT,\n user=USER,\n password=PASSWORD,\n dbname=DATABASE)\npg_cur = pg_conn.cursor()\n\npg_sql = \"INSERT INTO city(id, country, state, city, zip, latitude, longitude) VALUES (%s, %s, %s, %s, %s, %s, %s)\"\n\nconn = sqlite3.connect('../data/City.sqlite')\n\ncursor = conn.cursor()\n\n\ndef executemany(cur):\n values = cur.fetchall()\n data = []\n num = 0\n for v in values:\n data.append([v[0], v[1], v[2], v[3], v[4], v[5], v[6]])\n if len(data) == 500:\n num += 1\n print(num)\n print(data)\n pg_cur.executemany(pg_sql, data)\n pg_conn.commit()\n data = []\n\n if len(data) > 0:\n pg_cur.executemany(pg_sql, data)\n pg_conn.commit()\n\n cur.executemany(pg_sql, data)\n\n\ndef copy_from(filename):\n values = cursor.fetchall()\n df = pd.DataFrame(values,\n columns=[\n 'id', 'country', 'state', 'city', 'zip', 'latitude',\n 'longitude'\n ])\n df.to_csv(filename, index=False)\n\n with open(filename, 'r') as f:\n next(f) # 跳过header\n pg_cur.copy_from(f, 'city', sep=',')\n\n\nif __name__ == '__main__':\n cursor.execute(\"SELECT * FROM city\")\n # 第一种方法\n # executemany(cursor)\n\n # 第二种方法\n copy_from('../data/city.csv')\n conn.close()\n pg_conn.commit()\n pg_conn.close()\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
mmlanger/pyrbfpu
[ "005ac2778c913c4d3a7d836da9ecdf27afb44c74" ]
[ "error_local.py" ]
[ "import time\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom pyrbfpu.common import *\n\n\nnp.random.seed(12351)\npoints = np.random.uniform(0, 3, (200, 2))\n\n\ndef test_func(point):\n x, y = point\n r = np.sqrt(x ** 2 + y ** 2)\n return np.sin(x) * np.cos(y) * (10 - r)\n\n\nkernel = generate_kernel(inverse_multiquadric)\nvals = np.array([test_func(x) for x in points])\n\ntargets = [RBFInterpolationRational, RBFInterpolationFastLinear]\n\nfig = plt.figure()\n\nfor RBFInterpolation in targets:\n rbf = RBFInterpolation(points, vals, kernel, 1.0, tol=1e-12)\n rbf.estimate_error(0.1)\n\n start = time.perf_counter()\n rbf.optimize_param()\n end = time.perf_counter()\n print(\"TIME {}\".format(end - start))\n # print(\"TIME {} with EVALS {}\".format(end - start, rbf.counter))\n\n # start = time.perf_counter()\n # for i in range(50):\n # rbf.estimate_error(0.1)\n # end = time.perf_counter()\n # print(\"TIME \", end - start)\n\n eps_space = np.linspace(0.01, 3.5, 200)\n errors = np.array([rbf.estimate_error(eps) for eps in eps_space])\n\n axes = fig.add_subplot(111)\n axes.axvline(rbf.param)\n axes.plot(eps_space, errors, \"+\", label=repr(RBFInterpolation))\n\nplt.legend()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.sqrt", "numpy.random.seed", "numpy.linspace", "numpy.cos", "numpy.sin", "numpy.random.uniform", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pourabkarchaudhuri/unsupervised-clustering-faces-tensorflow
[ "1267df0e32e8f7bbdc7b6072bf1383c40d35163d" ]
[ "face_detect_demo.py" ]
[ "import tensorflow as tf\nfrom align import detect_face\nimport cv2\nimport imutils\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--img\", type = str, required=True)\nargs = parser.parse_args()\n\n# some constants kept as default from facenet\nminsize = 20\nthreshold = [0.6, 0.7, 0.7]\nfactor = 0.709\nmargin = 44\ninput_image_size = 160\n\nsess = tf.Session()\n# read pnet, rnet, onet models from align directory and files are det1.npy, det2.npy, det3.npy\npnet, rnet, onet = detect_face.create_mtcnn(sess, 'align')\n\ndef getFace(img):\n faces = []\n img_size = np.asarray(img.shape)[0:2]\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n if not len(bounding_boxes) == 0:\n for face in bounding_boxes:\n if face[4] > 0.50:\n det = np.squeeze(face[0:4])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - margin / 2, 0)\n bb[1] = np.maximum(det[1] - margin / 2, 0)\n bb[2] = np.minimum(det[2] + margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n resized = cv2.resize(cropped, (input_image_size,input_image_size),interpolation=cv2.INTER_AREA)\n faces.append({'face':resized,'rect':[bb[0],bb[1],bb[2],bb[3]]})\n return faces\n\nimg = cv2.imread(args.img)\nimg = imutils.resize(img,width=1000)\nfaces = getFace(img)\nfor face in faces:\n cv2.rectangle(img, (face['rect'][0], face['rect'][1]), (face['rect'][2], face['rect'][3]), (0, 255, 0), 2)\ncv2.imshow(\"faces\", img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.asarray", "numpy.squeeze", "tensorflow.Session", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
syahrulhamdani/disaster-response-pipeline
[ "b5ce11d0c29972120e500e8effecaa9c44f765d7" ]
[ "apps/routes.py" ]
[ "import json\n\nimport joblib\nimport pandas as pd\nimport plotly\nfrom flask import Flask, render_template, request, jsonify\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n# from plotly.graph_objs import Bar\nfrom plotly.graph_objects import Bar\nfrom sqlalchemy import create_engine\n\nfrom models import tokenize, StartingWithVerb\n\n\napp = Flask(__name__)\n\n# load data\nengine = create_engine('sqlite:///data/disaster.db')\ndf = pd.read_sql_table('disaster', engine)\n\n# load model\nmodel = joblib.load(\"models/model.joblib\")\n\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n\n # extract data needed for visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n category_columns = df.drop(\n columns=[\"message\", \"original\", \"genre\", \"id\"]\n ).columns\n category_names = category_columns.str.replace(\"_\", \" \") \\\n .str.title()\n category_sum = df[category_columns].sum(axis=0)\n\n # create visuals\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n },\n {\n 'data': [\n Bar(\n x=category_names,\n y=category_sum\n )\n ],\n 'layout': {\n 'title': 'Distribution of Message Categories',\n 'yaxis': {\n 'title': \"Count\",\n },\n }\n }\n ]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '')\n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file.\n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n" ]
[ [ "pandas.read_sql_table" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Panaetius/python-profiling-presentation
[ "0ed7a20fcefbb3bb5afc2fe7f99b603e458a0575" ]
[ "examples/example2/example_better.py" ]
[ "import numpy as np\n\n\ndef main():\n x = np.array(range(10**7))\n y = np.random.uniform(0, 100, size=(10**8))\n\n\nmain()\n" ]
[ [ "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
takumihonda/TC202010
[ "e661288cba8f2bffea0712c0fb23901eec509ae6" ]
[ "src/tseries_besttrack.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport matplotlib.dates as mdates\n\nfrom datetime import datetime, timedelta\n\nfrom tools_TC202010 import get_besttrack, plot_or_save\n\nquick = False\n\ndef main( stime=datetime( 2020, 8, 26, 0 ), etime=datetime( 2020, 9, 6, 0, )):\n tlons, tlats, tslps, ttimes = get_besttrack()\n\n fig, ax1 = plt.subplots( 1, 1, figsize=( 8, 6.5 ) ) \n\n ymin = 900\n ymax = 1010\n\n yticks = np.arange( ymin, ymax+10, 10 )\n\n ax1.plot( ttimes, tslps, )\n\n ax1.xaxis.set_major_locator( mdates.HourLocator(interval=24) )\n ax1.xaxis.set_major_formatter( mdates.DateFormatter('%H%M\\n%m/%d') )\n\n ax1.set_xlim( stime, etime )\n ax1.set_ylim( ymin, ymax )\n\n time_l = []\n time_ = stime\n while time_ <= etime:\n time_l.append( time_ )\n\n time_ += timedelta( days=1 )\n\n ax1.vlines( x=time_l, ymin=ymin, ymax=ymax, \n color='gray', lw=1.0, ls='dashed' )\n\n ax1.set_yticks( yticks, minor=False )\n ax1.hlines( y=yticks, xmin=stime, xmax=etime, \n color='gray', lw=1.0, ls='dashed' )\n\n # SLP reduction per day\n rat_slp = 24\n\n s_slp = 1000.0\n sdate_slp = datetime( 2020, 8, 28, 0 )\n dday = 4\n\n edate_slp = sdate_slp + timedelta( days=dday )\n e_slp = s_slp - dday * rat_slp\n\n ax1.plot( [ sdate_slp, edate_slp ], [ s_slp, e_slp ] )\n\n ofig = \"1p_MSLP_tseries_besttrac\"\n plot_or_save( quick=quick, opath=\"png/1p_MSLP_tseries\", ofig=ofig ) \n\nmain()\n" ]
[ [ "numpy.arange", "matplotlib.dates.HourLocator", "matplotlib.pyplot.subplots", "matplotlib.dates.DateFormatter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kongwilson/kaggle-feedback-prize
[ "475cbc88fea6618cdd2281e051c9cb69dd8bc2d7" ]
[ "main-longformer_tutorial.py" ]
[ "\"\"\"\nbase on\nhttps://www.kaggle.com/abhishek/two-longformers-are-better-than-1\n\nCopyright (C) Weicong Kong, 17/02/2022\n\"\"\"\n# %% [code] {\"jupyter\":{\"outputs_hidden\":false},\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:19:34.765636Z\",\"iopub.execute_input\":\"2022-01-05T18:19:34.766525Z\",\"iopub.status.idle\":\"2022-01-05T18:19:43.074135Z\",\"shell.execute_reply.started\":\"2022-01-05T18:19:34.76641Z\",\"shell.execute_reply\":\"2022-01-05T18:19:43.072934Z\"}}\nimport gc\n\ngc.enable()\n\nimport sys\n\nsys.path.append(\"tez\")\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport tez\nimport torch\nimport torch.nn as nn\nfrom joblib import Parallel, delayed\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer\n\n# %% [code] {\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:20:25.812387Z\",\"iopub.execute_input\":\"2022-01-05T18:20:25.812686Z\",\"iopub.status.idle\":\"2022-01-05T18:20:25.822348Z\",\"shell.execute_reply.started\":\"2022-01-05T18:20:25.812656Z\",\"shell.execute_reply\":\"2022-01-05T18:20:25.821316Z\"}}\ntarget_id_map = {\n\t\"B-Lead\": 0,\n\t\"I-Lead\": 1,\n\t\"B-Position\": 2,\n\t\"I-Position\": 3,\n\t\"B-Evidence\": 4,\n\t\"I-Evidence\": 5,\n\t\"B-Claim\": 6,\n\t\"I-Claim\": 7,\n\t\"B-Concluding Statement\": 8,\n\t\"I-Concluding Statement\": 9,\n\t\"B-Counterclaim\": 10,\n\t\"I-Counterclaim\": 11,\n\t\"B-Rebuttal\": 12,\n\t\"I-Rebuttal\": 13,\n\t\"O\": 14,\n\t\"PAD\": -100,\n}\n\nid_target_map = {v: k for k, v in target_id_map.items()}\n\n\nclass args1:\n\tinput_path = r\"C:\\Users\\wkong\\IdeaProjects\\kaggle_data\\feedback-prize-2021\"\n\tmodel = os.path.join('model_stores', 'longformer-large-4096')\n\ttez_model = os.path.join('model_stores', 'fblongformerlarge1536')\n\toutput = \".\"\n\tbatch_size = 8\n\tmax_len = 4096\n\n\nclass args2:\n\tinput_path = r\"C:\\Users\\wkong\\IdeaProjects\\kaggle_data\\feedback-prize-2021\"\n\tmodel = os.path.join('model_stores', 'longformer-large-4096')\n\ttez_model = os.path.join('model_stores', 'tez-fb-large')\n\toutput = \".\"\n\tbatch_size = 8\n\tmax_len = 4096\n\n\n# %% [code] {\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:20:26.277132Z\",\"iopub.execute_input\":\"2022-01-05T18:20:26.277591Z\",\"iopub.status.idle\":\"2022-01-05T18:20:26.28869Z\",\"shell.execute_reply.started\":\"2022-01-05T18:20:26.277556Z\",\"shell.execute_reply\":\"2022-01-05T18:20:26.287697Z\"}}\nclass FeedbackDataset:\n\tdef __init__(self, samples, max_len, tokenizer):\n\t\tself.samples = samples\n\t\tself.max_len = max_len\n\t\tself.tokenizer = tokenizer\n\t\tself.length = len(samples)\n\n\tdef __len__(self):\n\t\treturn self.length\n\n\tdef __getitem__(self, idx):\n\t\tinput_ids = self.samples[idx][\"input_ids\"]\n\t\t# print(input_ids)\n\t\t# print(input_labels)\n\n\t\t# add start token id to the input_ids\n\t\tinput_ids = [self.tokenizer.cls_token_id] + input_ids\n\n\t\tif len(input_ids) > self.max_len - 1:\n\t\t\tinput_ids = input_ids[: self.max_len - 1]\n\n\t\t# add end token id to the input_ids\n\t\tinput_ids = input_ids + [self.tokenizer.sep_token_id]\n\t\tattention_mask = [1] * len(input_ids)\n\n\t\t# padding_length = self.max_len - len(input_ids)\n\t\t# if padding_length > 0:\n\t\t# if self.tokenizer.padding_side == \"right\":\n\t\t# input_ids = input_ids + [self.tokenizer.pad_token_id] * padding_length\n\t\t# attention_mask = attention_mask + [0] * padding_length\n\t\t# else:\n\t\t# input_ids = [self.tokenizer.pad_token_id] * padding_length + input_ids\n\t\t# attention_mask = [0] * padding_length + attention_mask\n\n\t\t# return {\n\t\t# \"ids\": torch.tensor(input_ids, dtype=torch.long),\n\t\t# \"mask\": torch.tensor(attention_mask, dtype=torch.long),\n\t\t# }\n\n\t\treturn {\n\t\t\t\"ids\": input_ids,\n\t\t\t\"mask\": attention_mask,\n\t\t}\n\n\n# %% [code]\nclass Collate:\n\tdef __init__(self, tokenizer):\n\t\tself.tokenizer = tokenizer\n\n\tdef __call__(self, batch):\n\t\toutput = dict()\n\t\toutput[\"ids\"] = [sample[\"ids\"] for sample in batch]\n\t\toutput[\"mask\"] = [sample[\"mask\"] for sample in batch]\n\n\t\t# calculate max token length of this batch\n\t\tbatch_max = max([len(ids) for ids in output[\"ids\"]])\n\n\t\t# add padding\n\t\tif self.tokenizer.padding_side == \"right\":\n\t\t\toutput[\"ids\"] = [s + (batch_max - len(s)) * [self.tokenizer.pad_token_id] for s in output[\"ids\"]]\n\t\t\toutput[\"mask\"] = [s + (batch_max - len(s)) * [0] for s in output[\"mask\"]]\n\t\telse:\n\t\t\toutput[\"ids\"] = [(batch_max - len(s)) * [self.tokenizer.pad_token_id] + s for s in output[\"ids\"]]\n\t\t\toutput[\"mask\"] = [(batch_max - len(s)) * [0] + s for s in output[\"mask\"]]\n\n\t\t# convert to tensors\n\t\toutput[\"ids\"] = torch.tensor(output[\"ids\"], dtype=torch.long)\n\t\toutput[\"mask\"] = torch.tensor(output[\"mask\"], dtype=torch.long)\n\n\t\treturn output\n\n\n# %% [code] {\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:20:26.502033Z\",\"iopub.execute_input\":\"2022-01-05T18:20:26.50234Z\",\"iopub.status.idle\":\"2022-01-05T18:20:26.511671Z\",\"shell.execute_reply.started\":\"2022-01-05T18:20:26.502308Z\",\"shell.execute_reply\":\"2022-01-05T18:20:26.510519Z\"}}\nclass FeedbackModel(tez.Model):\n\tdef __init__(self, model_name, num_labels):\n\t\tsuper().__init__()\n\t\tself.model_name = model_name\n\t\tself.num_labels = num_labels\n\t\tconfig = AutoConfig.from_pretrained(model_name)\n\n\t\thidden_dropout_prob: float = 0.1\n\t\tlayer_norm_eps: float = 1e-7\n\t\tconfig.update(\n\t\t\t{\n\t\t\t\t\"output_hidden_states\": True,\n\t\t\t\t\"hidden_dropout_prob\": hidden_dropout_prob,\n\t\t\t\t\"layer_norm_eps\": layer_norm_eps,\n\t\t\t\t\"add_pooling_layer\": False,\n\t\t\t}\n\t\t)\n\t\tself.transformer = AutoModel.from_config(config)\n\t\tself.output = nn.Linear(config.hidden_size, self.num_labels)\n\n\tdef forward(self, ids, mask):\n\t\ttransformer_out = self.transformer(ids, mask)\n\t\tsequence_output = transformer_out.last_hidden_state\n\t\tlogits = self.output(sequence_output)\n\t\tlogits = torch.softmax(logits, dim=-1)\n\t\treturn logits, 0, {}\n\n\n# %% [code] {\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:20:26.715129Z\",\"iopub.execute_input\":\"2022-01-05T18:20:26.715408Z\",\"iopub.status.idle\":\"2022-01-05T18:20:26.725686Z\",\"shell.execute_reply.started\":\"2022-01-05T18:20:26.715365Z\",\"shell.execute_reply\":\"2022-01-05T18:20:26.724696Z\"}}\ndef _prepare_test_data_helper(args, tokenizer, ids):\n\ttest_samples = []\n\tfor idx in ids:\n\t\tfilename = os.path.join(args.input_path, \"test\", idx + \".txt\")\n\t\twith open(filename, \"r\") as f:\n\t\t\ttext = f.read()\n\n\t\tencoded_text = tokenizer.encode_plus(\n\t\t\ttext,\n\t\t\tadd_special_tokens=False,\n\t\t\treturn_offsets_mapping=True,\n\t\t)\n\t\tinput_ids = encoded_text[\"input_ids\"]\n\t\toffset_mapping = encoded_text[\"offset_mapping\"]\n\n\t\tsample = {\n\t\t\t\"id\": idx,\n\t\t\t\"input_ids\": input_ids,\n\t\t\t\"text\": text,\n\t\t\t\"offset_mapping\": offset_mapping,\n\t\t}\n\n\t\ttest_samples.append(sample)\n\treturn test_samples\n\n\ndef prepare_test_data(df, tokenizer, args):\n\ttest_samples = []\n\tids = df[\"id\"].unique()\n\t# ids_splits = np.array_split(ids, 4)\n\t#\n\t# results = Parallel(n_jobs=4, backend=\"multiprocessing\")(\n\t# \tdelayed(_prepare_test_data_helper)(args, tokenizer, idx) for idx in ids_splits\n\t# )\n\ttest_samples = _prepare_test_data_helper(args, tokenizer, ids)\n\t# for result in results:\n\t# \ttest_samples.extend(result)\n\n\treturn test_samples\n\n\n# %% [code]\nDATA_ROOT = r\"C:\\Users\\wkong\\IdeaProjects\\kaggle_data\\feedback-prize-2021\"\ndf = pd.read_csv(os.path.join(DATA_ROOT, \"sample_submission.csv\"))\ndf_ids = df[\"id\"].unique()\n\ntokenizer = AutoTokenizer.from_pretrained(args1.model)\ntest_samples = prepare_test_data(df, tokenizer, args1)\ncollate = Collate(tokenizer=tokenizer)\n\nraw_preds = []\nfor fold_ in range(10):\n\tcurrent_idx = 0\n\ttest_dataset = FeedbackDataset(test_samples, args1.max_len, tokenizer)\n\n\tif fold_ < 5:\n\t\tmodel = FeedbackModel(model_name=args1.model, num_labels=len(target_id_map) - 1)\n\t\tmodel.load(os.path.join(args1.tez_model, f\"model_{fold_}.bin\"), weights_only=True)\n\t\tpreds_iter = model.predict(test_dataset, batch_size=args1.batch_size, n_jobs=-1, collate_fn=collate)\n\telse:\n\t\tmodel = FeedbackModel(model_name=args2.model, num_labels=len(target_id_map) - 1)\n\t\tmodel.load(os.path.join(args2.tez_model, f\"model_{fold_ - 5}.bin\"), weights_only=True)\n\t\tpreds_iter = model.predict(test_dataset, batch_size=args2.batch_size, n_jobs=-1, collate_fn=collate)\n\n\tcurrent_idx = 0\n\n\tfor preds in preds_iter:\n\t\tpreds = preds.astype(np.float16)\n\t\tpreds = preds / 10\n\t\tif fold_ == 0:\n\t\t\traw_preds.append(preds)\n\t\telse:\n\t\t\traw_preds[current_idx] += preds\n\t\t\tcurrent_idx += 1\n\ttorch.cuda.empty_cache()\n\tgc.collect()\n\n# %% [code]\nfinal_preds = []\nfinal_scores = []\n\nfor rp in raw_preds:\n\tpred_class = np.argmax(rp, axis=2)\n\tpred_scrs = np.max(rp, axis=2)\n\tfor pred, pred_scr in zip(pred_class, pred_scrs):\n\t\tpred = pred.tolist()\n\t\tpred_scr = pred_scr.tolist()\n\t\tfinal_preds.append(pred)\n\t\tfinal_scores.append(pred_scr)\n\nfor j in range(len(test_samples)):\n\ttt = [id_target_map[p] for p in final_preds[j][1:]]\n\ttt_score = final_scores[j][1:]\n\ttest_samples[j][\"preds\"] = tt\n\ttest_samples[j][\"pred_scores\"] = tt_score\n\n\n# %% [code]\ndef jn(pst, start, end):\n\treturn \" \".join([str(x) for x in pst[start:end]])\n\n\ndef link_evidence(oof):\n\tthresh = 1\n\tidu = oof['id'].unique()\n\tidc = idu[1]\n\teoof = oof[oof['class'] == \"Evidence\"]\n\tneoof = oof[oof['class'] != \"Evidence\"]\n\tfor thresh2 in range(26, 27, 1):\n\t\tretval = []\n\t\tfor idv in idu:\n\t\t\tfor c in ['Lead', 'Position', 'Evidence', 'Claim', 'Concluding Statement',\n\t\t\t\t'Counterclaim', 'Rebuttal']:\n\t\t\t\tq = eoof[(eoof['id'] == idv) & (eoof['class'] == c)]\n\t\t\t\tif len(q) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tpst = []\n\t\t\t\tfor i, r in q.iterrows():\n\t\t\t\t\tpst = pst + [-1] + [int(x) for x in r['predictionstring'].split()]\n\t\t\t\tstart = 1\n\t\t\t\tend = 1\n\t\t\t\tfor i in range(2, len(pst)):\n\t\t\t\t\tcur = pst[i]\n\t\t\t\t\tend = i\n\t\t\t\t\t# if pst[start] == 205:\n\t\t\t\t\t# print(cur, pst[start], cur - pst[start])\n\t\t\t\t\tif (cur == -1 and c != 'Evidence') or ((cur == -1) and (\n\t\t\t\t\t\t\t(pst[i + 1] > pst[end - 1] + thresh) or (pst[i + 1] - pst[start] > thresh2))):\n\t\t\t\t\t\tretval.append((idv, c, jn(pst, start, end)))\n\t\t\t\t\t\tstart = i + 1\n\t\t\t\tv = (idv, c, jn(pst, start, end + 1))\n\t\t\t\t# print(v)\n\t\t\t\tretval.append(v)\n\t\troof = pd.DataFrame(retval, columns=['id', 'class', 'predictionstring'])\n\t\troof = roof.merge(neoof, how='outer')\n\t\treturn roof\n\n\n# %% [code]\nproba_thresh = {\n\t\"Lead\": 0.7,\n\t\"Position\": 0.55,\n\t\"Evidence\": 0.65,\n\t\"Claim\": 0.55,\n\t\"Concluding Statement\": 0.7,\n\t\"Counterclaim\": 0.5,\n\t\"Rebuttal\": 0.55,\n}\n\nmin_thresh = {\n\t\"Lead\": 9,\n\t\"Position\": 5,\n\t\"Evidence\": 14,\n\t\"Claim\": 3,\n\t\"Concluding Statement\": 11,\n\t\"Counterclaim\": 6,\n\t\"Rebuttal\": 4,\n}\n\nsubmission = []\nfor sample_idx, sample in enumerate(test_samples):\n\tpreds = sample[\"preds\"]\n\toffset_mapping = sample[\"offset_mapping\"]\n\tsample_id = sample[\"id\"]\n\tsample_text = sample[\"text\"]\n\tsample_input_ids = sample[\"input_ids\"]\n\tsample_pred_scores = sample[\"pred_scores\"]\n\tsample_preds = []\n\n\tif len(preds) < len(offset_mapping):\n\t\tpreds = preds + [\"O\"] * (len(offset_mapping) - len(preds))\n\t\tsample_pred_scores = sample_pred_scores + [0] * (len(offset_mapping) - len(sample_pred_scores))\n\n\tidx = 0\n\tphrase_preds = []\n\twhile idx < len(offset_mapping):\n\t\tstart, _ = offset_mapping[idx]\n\t\tif preds[idx] != \"O\":\n\t\t\tlabel = preds[idx][2:]\n\t\telse:\n\t\t\tlabel = \"O\"\n\t\tphrase_scores = []\n\t\tphrase_scores.append(sample_pred_scores[idx])\n\t\tidx += 1\n\t\twhile idx < len(offset_mapping):\n\t\t\tif label == \"O\":\n\t\t\t\tmatching_label = \"O\"\n\t\t\telse:\n\t\t\t\tmatching_label = f\"I-{label}\"\n\t\t\tif preds[idx] == matching_label:\n\t\t\t\t_, end = offset_mapping[idx]\n\t\t\t\tphrase_scores.append(sample_pred_scores[idx])\n\t\t\t\tidx += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tif \"end\" in locals():\n\t\t\tphrase = sample_text[start:end]\n\t\t\tphrase_preds.append((phrase, start, end, label, phrase_scores))\n\n\ttemp_df = []\n\tfor phrase_idx, (phrase, start, end, label, phrase_scores) in enumerate(phrase_preds):\n\t\tword_start = len(sample_text[:start].split())\n\t\tword_end = word_start + len(sample_text[start:end].split())\n\t\tword_end = min(word_end, len(sample_text.split()))\n\t\tps = \" \".join([str(x) for x in range(word_start, word_end)])\n\t\tif label != \"O\":\n\t\t\tif sum(phrase_scores) / len(phrase_scores) >= proba_thresh[label]:\n\t\t\t\tif len(ps.split()) >= min_thresh[label]:\n\t\t\t\t\ttemp_df.append((sample_id, label, ps))\n\n\ttemp_df = pd.DataFrame(temp_df, columns=[\"id\", \"class\", \"predictionstring\"])\n\tsubmission.append(temp_df)\n\nsubmission = pd.concat(submission).reset_index(drop=True)\nsubmission = link_evidence(submission)\nsubmission.to_csv(\"submission.csv\", index=False)\n\n# %% [code] {\"execution\":{\"iopub.status.busy\":\"2022-01-05T18:26:47.003019Z\",\"iopub.status.idle\":\"2022-01-05T18:26:47.004284Z\",\"shell.execute_reply.started\":\"2022-01-05T18:26:47.003972Z\",\"shell.execute_reply\":\"2022-01-05T18:26:47.004003Z\"}}\nsubmission.head()" ]
[ [ "torch.softmax", "pandas.concat", "torch.cuda.empty_cache", "pandas.DataFrame", "torch.tensor", "numpy.max", "torch.nn.Linear", "numpy.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
real-tesco/Multi-Step-Reasoning
[ "12c57714c490c5ef5aea9ecabce2143ca89d90b2" ]
[ "msr/data/datasets/rankingdataset.py" ]
[ "import json\nimport logging\nimport numpy as np\nfrom typing import List\n\nimport torch\nfrom torch.utils.data import Dataset\n\n\nlogger = logging.getLogger()\n\n\nclass RankingDataset(Dataset):\n def __init__(\n self,\n doc_embedding_files: List,\n doc_ids_files: List,\n query_embedding_files: List,\n query_ids_files: List,\n dataset: str,\n mode: str = 'train',\n model: str = 'reformulator'\n ) -> None:\n self._mode = mode\n self._model = model\n\n if model == 'ranker':\n # Load documents and convert to tensors\n tmp_docids = []\n tmp_docids.extend(np.load(x) for x in doc_ids_files)\n tmp_docids = np.concatenate(tmp_docids, axis=0)\n\n tmp_docs = []\n tmp_docs.extend(torch.tensor(np.load(x)) for x in doc_embedding_files)\n tmp_docs = torch.cat(tmp_docs, dim=0)\n\n self._docs = {idx: embed for idx, embed in zip(tmp_docids, tmp_docs)}\n logger.info(f\"len of docs: {len(self._docs)}\")\n\n tmp_query_ids = []\n tmp_query_ids.extend(np.load(x) for x in query_ids_files)\n tmp_query_ids = np.concatenate(tmp_query_ids, axis=0)\n tmp_queries = []\n tmp_queries.extend(torch.tensor(np.load(x)) for x in query_embedding_files)\n tmp_queries = torch.cat(tmp_queries, dim=0)\n\n self._queries = {idx: embed for idx, embed in zip(tmp_query_ids, tmp_queries)}\n logger.info(f\"len of queries: {len(self._queries)}\")\n\n self._dataset = dataset\n\n if self._dataset.split('.')[-1] == 'tsv' or self._dataset.split('.')[-2] == 'trec':\n if isinstance(self._dataset, str):\n with open(self._dataset, 'r') as f:\n self._examples = []\n for i, line in enumerate(f):\n line = line.strip().split()\n self._examples.append(line)\n elif self._dataset.split('.')[-1] == 'jsonl':\n if isinstance(self._dataset, str):\n with open(self._dataset, 'r') as f:\n self._examples = []\n for i, line in enumerate(f):\n line = json.loads(line)\n self._examples.append(line)\n else:\n logger.info(\"unknown dataset name..\")\n self._count = len(self._examples)\n logger.info(f\"len of examples: {self._count}\")\n\n def __getitem__(self, idx):\n example = self._examples[idx]\n if self._mode == 'train':\n if self._model == 'ranker':\n return {'query': self._queries[example[0]],\n 'positive_doc': self._docs[example[1]],\n 'negative_doc': self._docs[example[2]]}\n elif self._model == 'reformulator':\n return {'query': self._queries[example[0]], 'query_id': example[0]}\n\n elif self._mode == 'dev':\n if self._model == 'ranker':\n query_id = example['query_id']\n doc_id = example['doc_id']\n retrieval_score = example['retrieval_score']\n label = example['label']\n return {'query_id': query_id, 'doc_id': doc_id, 'label': label, 'retrieval_score': retrieval_score,\n 'query': self._queries[query_id], 'doc': self._docs[doc_id]}\n elif self._model == 'reformulator':\n qid = example[0]\n return {'query_id': qid, 'query': self._queries[qid]}\n\n elif self._mode == 'test':\n query_id = example[0]\n did = example[2]\n return {'query_id': query_id, 'doc_id': did, 'query': self._queries[query_id], 'doc': self._docs[did]}\n\n def collate(self, batch):\n if self._mode == 'train':\n if self._model == 'ranker':\n queries = torch.stack([item['query'] for item in batch])\n positive_docs = torch.stack([item['positive_doc'] for item in batch])\n negative_docs = torch.stack([item['negative_doc'] for item in batch])\n return {'query': queries, 'positive_doc': positive_docs, 'negative_doc': negative_docs}\n elif self._model == 'reformulator':\n queries = torch.stack([item['query'] for item in batch])\n qids = [item['query_id'] for item in batch]\n return {'query_id': qids, 'query': queries}\n\n elif self._mode == 'dev':\n if self._model == 'ranker':\n query_id = [item['query_id'] for item in batch]\n doc_id = [item['doc_id']for item in batch]\n retrieval_score = [item['retrieval_score'] for item in batch]\n labels = [item['label'] for item in batch]\n queries = torch.stack([item['query'] for item in batch])\n docs = torch.stack([item['doc'] for item in batch])\n return {'query_id': query_id, 'doc_id': doc_id, 'label': labels, 'retrieval_score': retrieval_score,\n 'doc': docs, 'query': queries}\n elif self._model == 'reformulator':\n query_id = [item['query_id'] for item in batch]\n queries = torch.stack([item['query'] for item in batch])\n return {'query_id': query_id, 'query': queries}\n\n elif self._mode == 'test':\n query_id = [item['query_id'] for item in batch]\n queries = torch.stack([item['query'] for item in batch])\n doc_id = [item['doc_id'] for item in batch]\n doc = torch.stack([item['doc'] for item in batch])\n return {'query_id': query_id, 'query': queries, 'doc_id': doc_id, 'doc': doc}\n\n def __len__(self):\n return self._count\n" ]
[ [ "numpy.concatenate", "numpy.load", "torch.stack", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
bobmshannon/ReliableTransportProtocols
[ "5f06534684a445a030c33840aaf5d0103f45846a" ]
[ "scripts/Experiment 3 Results/exp3-graphs.py" ]
[ "import matplotlib.pyplot as plt\n\n\"\"\"\nLoss=0,Corruption=0,t=0.1,m=10000\nX-axis: Window size;\nY-axis: Throughput (ABT, GBN and SR) in one graph/plot.\n\"\"\"\ndef graph_one():\n\t# X-axis (ABT, GBN, SR)\n\twsize = [10, 50, 100, 200, 500]\n\t# Y-axis (ABT)\n\tabt_throughput = [0.0696589, 0.0696589, 0.0696589, 0.0696589, 0.0696589]\n\tplt.plot(wsize, abt_throughput, marker='D', color='r', label='ABT')\n\t# Y-axis (GBN)\n\tgbn_throughput = [0.1412745, 0.0967342, 0.1264804, 0.1380641, 0.1381571]\n\tplt.plot(wsize, gbn_throughput, marker='D', color='g', label='GBN')\n\t# Y-axis (SR)\n\tsr_throughput = [0.0110078, 0.0510701, 0.1015294, 0.1580224, 0.1579091]\n\tplt.plot(wsize, sr_throughput, marker='D', color='b', label='SR')\n\t# Axis labels\n\tplt.xlabel('Window Size (packets)')\n\tplt.ylabel('Throughput (packets/time unit)')\n\t# Set Y-axis range\n\tplt.ylim([0,.2])\n\t# Legend\n\tplt.legend()\n\t# Title\n\tplt.title('Throughput vs. Window Size\\n(with 0.1 time units between each packet sent, \\ncorruption probability 0.0, loss probability 0.0, and 10,000 total messages sent)')\n\t# Show plot\n\tplt.show()\n\ngraph_one()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SimonBlanke/derivative-free-optimizers
[ "e7a95503195ea7c0b3590241caf499ea307085f2" ]
[ "gradient_free_optimizers/optimizers/local_opt/downhill_simplex.py" ]
[ "# Author: Simon Blanke\n# Email: [email protected]\n# License: MIT License\n\n\nimport random\nimport numpy as np\n\nfrom ..base_optimizer import BaseOptimizer\nfrom ...search import Search\n\n\ndef sort_list_idx(list_):\n list_np = np.array(list_)\n idx_sorted = list(list_np.argsort()[::-1])\n return idx_sorted\n\n\ndef centeroid(array_list):\n centeroid = []\n for idx in range(array_list[0].shape[0]):\n center_dim_pos = []\n for array in array_list:\n center_dim_pos.append(array[idx])\n\n center_dim_mean = np.array(center_dim_pos).mean()\n centeroid.append(center_dim_mean)\n\n return centeroid\n\n\nclass DownhillSimplexOptimizer(BaseOptimizer, Search):\n name = \"Downhill Simplex\"\n _name_ = \"downhill_simplex\"\n\n def __init__(self, *args, alpha=1, gamma=2, beta=0.5, sigma=0.5, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.alpha = alpha\n self.gamma = gamma\n self.beta = beta\n self.sigma = sigma\n\n self.n_simp_positions = len(self.conv.search_space) + 1\n self.simp_positions = []\n\n self.simplex_step = 0\n\n def finish_initialization(self):\n idx_sorted = sort_list_idx(self.scores_valid)\n self.simplex_pos = [self.positions_valid[idx] for idx in idx_sorted]\n self.simplex_scores = [self.scores_valid[idx] for idx in idx_sorted]\n\n n_inits = len(self.positions_valid)\n if n_inits < self.n_simp_positions:\n print(\"\\n Error: Not enough initial positions to form simplex\")\n print(\"\\n Increase number of initial positions\")\n\n self.simplex_step = 1\n\n self.i_x_0 = 0\n self.i_x_N_1 = -2\n self.i_x_N = -1\n\n @BaseOptimizer.track_nth_iter\n def iterate(self):\n simplex_stale = all(\n [np.array_equal(self.simplex_pos[0], array) for array in self.simplex_pos]\n )\n\n if simplex_stale:\n idx_sorted = sort_list_idx(self.scores_valid)\n self.simplex_pos = [self.positions_valid[idx] for idx in idx_sorted]\n self.simplex_scores = [self.scores_valid[idx] for idx in idx_sorted]\n\n self.simplex_step = 1\n\n if self.simplex_step == 1:\n idx_sorted = sort_list_idx(self.simplex_scores)\n self.simplex_pos = [self.simplex_pos[idx] for idx in idx_sorted]\n self.simplex_scores = [self.simplex_scores[idx] for idx in idx_sorted]\n\n self.center_array = centeroid(self.simplex_pos[:-1])\n\n r_pos = self.center_array + self.alpha * (\n self.center_array - self.simplex_pos[-1]\n )\n self.r_pos = self.conv2pos(r_pos)\n return self.r_pos\n\n elif self.simplex_step == 2:\n e_pos = self.center_array + self.gamma * (\n self.center_array - self.simplex_pos[-1]\n )\n self.e_pos = self.conv2pos(e_pos)\n self.simplex_step = 1\n\n return self.e_pos\n\n elif self.simplex_step == 3:\n # iter Contraction\n c_pos = self.h_pos + self.beta * (self.center_array - self.h_pos)\n c_pos = self.conv2pos(c_pos)\n\n return c_pos\n\n elif self.simplex_step == 4:\n # iter Shrink\n pos = self.simplex_pos[self.compress_idx]\n pos = pos + self.sigma * (self.simplex_pos[0] - pos)\n\n return self.conv2pos(pos)\n\n def evaluate(self, score_new):\n self.score_new = score_new\n\n if self.simplex_step != 0:\n self.prev_pos = self.positions_valid[-1]\n\n if self.simplex_step == 1:\n # self.r_pos = self.prev_pos\n self.r_score = score_new\n\n if self.r_score > self.simplex_scores[0]:\n self.simplex_step = 2\n\n elif self.r_score > self.simplex_scores[-2]:\n # if r is better than x N-1\n self.simplex_pos[-1] = self.r_pos\n self.simplex_scores[-1] = self.r_score\n self.simplex_step = 1\n\n if self.simplex_scores[-1] > self.r_score:\n self.h_pos = self.simplex_pos[-1]\n self.h_score = self.simplex_scores[-1]\n else:\n self.h_pos = self.r_pos\n self.h_score = self.r_score\n\n self.simplex_step = 3\n\n elif self.simplex_step == 2:\n self.e_score = score_new\n\n if self.e_score > self.r_score:\n self.simplex_scores[-1] = self.e_pos\n elif self.r_score > self.e_score:\n self.simplex_scores[-1] = self.r_pos\n else:\n self.simplex_scores[-1] = random.choice([self.e_pos, self.r_pos])[0]\n\n elif self.simplex_step == 3:\n # eval Contraction\n self.c_pos = self.prev_pos\n self.c_score = score_new\n\n if self.c_score > self.simplex_scores[-1]:\n self.simplex_scores[-1] = self.c_score\n self.simplex_pos[-1] = self.c_pos\n\n self.simplex_step = 1\n\n else:\n # start Shrink\n self.simplex_step = 4\n self.compress_idx = 0\n\n elif self.simplex_step == 4:\n # eval Shrink\n self.simplex_scores[self.compress_idx] = score_new\n self.simplex_pos[self.compress_idx] = self.prev_pos\n\n self.compress_idx += 1\n\n if self.compress_idx == self.n_simp_positions:\n self.simplex_step = 1\n" ]
[ [ "numpy.array", "numpy.array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isLinXu/Yolov5_Efficient
[ "2d9ff5552bb6e608a810b063fc68e192d9264924" ]
[ "yolov5_master/utils/loggers/__init__.py" ]
[ "# YOLOv5 🚀 by Ultralytics, GPL-3.0 license\n\"\"\"\nLogging utils\n\"\"\"\n\nimport os\nimport warnings\nfrom threading import Thread\n\nimport pkg_resources as pkg\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom yolov5_master.utils.general import colorstr, emojis\nfrom yolov5_master.utils.loggers.wandb.wandb_utils import WandbLogger\nfrom yolov5_master.utils.plots import plot_images, plot_results\nfrom yolov5_master.utils.torch_utils import de_parallel\n\nLOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases\nRANK = int(os.getenv('RANK', -1))\n\ntry:\n import wandb\n\n assert hasattr(wandb, '__version__') # verify package import not local dir\n if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]:\n try:\n wandb_login_success = wandb.login(timeout=30)\n except wandb.errors.UsageError: # known non-TTY terminal issue\n wandb_login_success = False\n if not wandb_login_success:\n wandb = None\nexcept (ImportError, AssertionError):\n wandb = None\n\n\nclass Loggers():\n # YOLOv5 Loggers class\n def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS):\n self.save_dir = save_dir\n self.weights = weights\n self.opt = opt\n self.hyp = hyp\n self.logger = logger # for printing results to console\n self.include = include\n self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss\n 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics\n 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss\n 'x/lr0', 'x/lr1', 'x/lr2'] # params\n self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95',]\n for k in LOGGERS:\n setattr(self, k, None) # init empty logger dictionary\n self.csv = True # always log to csv\n\n # Message\n if not wandb:\n prefix = colorstr('Weights & Biases: ')\n s = f\"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\"\n print(emojis(s))\n\n # TensorBoard\n s = self.save_dir\n if 'tb' in self.include and not self.opt.evolve:\n prefix = colorstr('TensorBoard: ')\n self.logger.info(f\"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/\")\n self.tb = SummaryWriter(str(s))\n\n # W&B\n if wandb and 'wandb' in self.include:\n wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://')\n run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None\n self.opt.hyp = self.hyp # add hyperparameters\n self.wandb = WandbLogger(self.opt, run_id)\n else:\n self.wandb = None\n\n def on_pretrain_routine_end(self):\n # Callback runs on pre-train routine end\n paths = self.save_dir.glob('*labels*.jpg') # training labels\n if self.wandb:\n self.wandb.log({\"Labels\": [wandb.Image(str(x), caption=x.name) for x in paths]})\n\n def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn):\n # Callback runs on train batch end\n if plots:\n if ni == 0:\n if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress jit trace warning\n self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), [])\n if ni < 3:\n f = self.save_dir / f'train_batch{ni}.jpg' # filename\n Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()\n if self.wandb and ni == 10:\n files = sorted(self.save_dir.glob('train*.jpg'))\n self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]})\n\n def on_train_epoch_end(self, epoch):\n # Callback runs on train epoch end\n if self.wandb:\n self.wandb.current_epoch = epoch + 1\n\n def on_val_image_end(self, pred, predn, path, names, im):\n # Callback runs on val image end\n if self.wandb:\n self.wandb.val_one_image(pred, predn, path, names, im)\n\n def on_val_end(self):\n # Callback runs on val end\n if self.wandb:\n files = sorted(self.save_dir.glob('val*.jpg'))\n self.wandb.log({\"Validation\": [wandb.Image(str(f), caption=f.name) for f in files]})\n\n def on_fit_epoch_end(self, vals, epoch, best_fitness, fi):\n # Callback runs at the end of each fit (train+val) epoch\n x = {k: v for k, v in zip(self.keys, vals)} # dict\n if self.csv:\n file = self.save_dir / 'results.csv'\n n = len(x) + 1 # number of cols\n s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\\n') # add header\n with open(file, 'a') as f:\n f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\\n')\n\n if self.tb:\n for k, v in x.items():\n self.tb.add_scalar(k, v, epoch)\n\n if self.wandb:\n if best_fitness == fi:\n best_results = [epoch] + vals[3:7]\n for i, name in enumerate(self.best_keys):\n self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary\n self.wandb.log(x)\n self.wandb.end_epoch(best_result=best_fitness == fi)\n\n def on_model_save(self, last, epoch, final_epoch, best_fitness, fi):\n # Callback runs on model save event\n if self.wandb:\n if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1:\n self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi)\n\n def on_train_end(self, last, best, plots, epoch, results):\n # Callback runs on training end\n if plots:\n plot_results(file=self.save_dir / 'results.csv') # save results.png\n files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]\n files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter\n\n if self.tb:\n import cv2\n for f in files:\n self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC')\n\n if self.wandb:\n self.wandb.log({k: v for k, v in zip(self.keys[3:10], results)}) # log best.pt val results\n self.wandb.log({\"Results\": [wandb.Image(str(f), caption=f.name) for f in files]})\n # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model\n if not self.opt.evolve:\n wandb.log_artifact(str(best if best.exists() else last), type='model',\n name='run_' + self.wandb.wandb_run.id + '_model',\n aliases=['latest', 'best', 'stripped'])\n self.wandb.finish_run()\n\n def on_params_update(self, params):\n # Update hyperparams or configs of the experiment\n # params: A dict containing {param: value} pairs\n if self.wandb:\n self.wandb.wandb_run.config.update(params, allow_val_change=True)\n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BanafshehKhaki/pHandDOprediction-models
[ "5164559f22e770ef3d60867380e1f367ec3d8a83" ]
[ "python_Scripts/univariate_models_train.py" ]
[ "import numpy as np\nimport pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nimport os\nimport gc\nimport joblib\nfrom sklearn.metrics import r2_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import f1_score\nimport sklearn.metrics as skm\nfrom sklearn.metrics import confusion_matrix\nimport time\nimport functions as func\nimport datetime\nimport univariatefunctions as ufunc\nfrom multiprocessing import cpu_count\nfrom joblib import Parallel\nfrom joblib import delayed\n\n\ndef main():\n method = 'OrgData'\n\n # 'DOcategory', 'pHcategory',ysi_blue_green_algae (has negative values for leavon... what does negative mean!?)\n # 'ysi_blue_green_algae'] # , 'dissolved_oxygen', 'ph']\n targets = ['ph']\n# 'ARIMA', 'SARIMA', 'ETS', 'AR', 'MA'\n models = ['SARIMA']\n path = 'Sondes_data/train_Summer/'\n files = [f for f in os.listdir(path) if f.endswith(\n \".csv\") and f.startswith('leavon')] # leavon bgsusd_all\n\n for model_name in models:\n for target in targets:\n if target.find('category') > 0:\n cat = 1\n directory = 'Results/bookThree/output_Cat_' + \\\n model_name+'/oversampling_cv_models/'\n data = {'CV': 'CV', 'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'window_nuggets': 'window_nuggets', 'config': 'config',\n 'file_names': 'file_names', 'F1_0': 'F1_0', 'F1_1': 'F1_1', 'P_0': 'P_0', 'P_1': 'P_1', 'R_0': 'R_0', 'R_1': 'R_1', 'acc0_1': 'acc0_1', 'F1_0_1': 'F1_0_1', 'F1_all': 'F1_all', 'fbeta': 'fbeta'}\n else:\n cat = 0\n directory = 'Results/bookThree/output_Reg_' + \\\n model_name+'/oversampling_cv_models/'\n data = {'CV': 'CV', 'target_names': 'target_names', 'method_names': 'method_names', 'temporalhorizons': 'temporalhorizons', 'window_nuggets': 'window_nuggets', 'config': 'config',\n 'file_names': 'file_names', 'mape': 'mape', 'me': 'me', 'mae': 'mae', 'mpe': 'mpe', 'rmse': 'rmse', 'R2': 'R2'}\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n for file in files:\n print(file)\n result_filename = 'results_'+target + \\\n '_'+file + '_'+str(time.time())+'.csv'\n dfheader = pd.DataFrame(data=data, index=[0])\n dfheader.to_csv(directory+result_filename, index=False)\n n_steps = 1\n\n for PrH_index in [1, 3, 6, 12, 24, 36]:\n\n dataset = pd.read_csv(path+file)\n\n # Only the Target\n dataset = dataset[[\n 'year', 'month', 'day', 'hour', target]]\n\n print('Window: '+str(n_steps) + ' TH: ' +\n str(PrH_index)+' '+method+' '+target)\n\n i = 1\n\n if model_name == 'MA':\n train_X_grid, train_y_grid, input_dim, features = func.preparedata(\n dataset, PrH_index, n_steps, target, cat)\n\n start_time = time.time()\n # For Train files:\n custom_cv = func.custom_cv_2folds(train_X_grid, 3)\n for train_index, test_index in custom_cv:\n train_X = train_X_grid[train_index]\n train_y = train_y_grid[train_index]\n train_X_uni = train_X[:, -1]\n\n test_X = train_X_grid[test_index]\n # actual future values\n test_X_uni = test_X[:, -1]\n test_y = train_y_grid[test_index]\n\n predictions = ufunc.movingAverage(\n train_X_uni, train_y, test_X_uni, test_y)\n\n df_time = pd.DataFrame({\n 'year': np.array(test_X[:, 0]).astype(int), 'month': np.array(test_X[:, 1]).astype(int),\n 'day': np.array(test_X[:, 2]).astype(int), 'hour': np.array(test_X[:, 3]).astype(int),\n })\n\n timeline = pd.to_datetime(\n df_time, format='%Y%m%d %H')\n\n if cat == 1:\n predictions = np.array(predictions).astype(int)\n test_y = np.array(test_y).astype(int)\n\n # test_y = test_y.reshape(len(test_y),)\n # predictions = predictions.reshape(\n # len(predictions),)\n\n cm0 = func.forecast_accuracy(\n predictions, test_y, cat)\n\n filename = file + '_' + \\\n target+'_TH' + \\\n str(PrH_index)+'_lag' + \\\n str(n_steps)+'_'+str(i)\n\n plt.scatter(timeline.values,\n test_y, s=1)\n plt.scatter(timeline.values,\n predictions, s=1)\n plt.legend(['actual', 'predictions'],\n loc='upper right')\n plt.xticks(rotation=45)\n\n directorydeeper = directory+'more/'\n if not os.path.exists(directorydeeper):\n os.makedirs(directorydeeper)\n plt.savefig(directorydeeper+filename+'.jpg')\n\n plt.close()\n data = {'time': timeline,\n 'Actual': test_y,\n 'Predictions': predictions}\n df = pd.DataFrame(data=data)\n\n df.to_csv(directorydeeper+filename +\n '.csv', index=False)\n\n if cat == 1:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}\n elif cat == 0:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}\n\n df = pd.DataFrame(data=data, index=[0])\n df.to_csv(directory+result_filename,\n index=False, mode='a', header=False)\n i = i + 1\n\n elapsed_time = time.time() - start_time\n print(time.strftime(\"%H:%M:%S\",\n time.gmtime(elapsed_time)))\n\n if model_name == 'ARIMA' or model_name == 'AR' or model_name == 'ETS' or model_name == 'SARIMA' or model_name == 'BL':\n start_time = time.time()\n train_X_grid = dataset.values\n custom_cv = ufunc.custom_cv_2folds(\n train_X_grid, 1, PrH_index)\n\n ######################\n # Cross Validation sets\n ######################\n i = 1\n for train_index, test_index in custom_cv:\n train_X = train_X_grid[train_index]\n train_X_uni = train_X[:, -1]\n\n test_X = train_X_grid[test_index]\n # actual future values\n test_X_uni = test_X[:, -1]\n\n df_time = pd.DataFrame({\n 'year': np.array(test_X[:, 0]).astype(int), 'month': np.array(test_X[:, 1]).astype(int),\n 'day': np.array(test_X[:, 2]).astype(int), 'hour': np.array(test_X[:, 3]).astype(int),\n })\n\n timeline = pd.to_datetime(\n df_time, format='%Y%m%d %H')\n\n if model_name == 'BL':\n\n # train_X_uni,test_X_uni\n # make them into dataFrame so below can be done\n\n test_X_uni = pd.DataFrame(test_X_uni)\n target_values = test_X_uni.drop(\n test_X_uni.index[0: 1], axis=0)\n target_values.index = np.arange(\n 0, len(target_values))\n\n # test_X_uni = pd.DataFrame(test_X_uni)\n\n predictions = test_X_uni.drop(\n test_X_uni.index[len(test_X_uni)-1: len(test_X_uni)], axis=0)\n test_X_uni = target_values\n\n timeline = timeline.drop(\n timeline.index[len(timeline)-1: len(timeline)], axis=0)\n\n cm0 = func.forecast_accuracy(\n predictions, test_X_uni, cat)\n\n filename = file + '_' + \\\n target+'_TH' + \\\n str(PrH_index)+'_lag' + \\\n str(n_steps)+'_'+str(i)\n\n plt.scatter(timeline.values,\n test_X_uni, s=1)\n plt.scatter(timeline.values,\n predictions, s=1)\n plt.legend(['actual', 'predictions'],\n loc='upper right')\n plt.xticks(rotation=45)\n\n directorydeeper = directory+'more/'\n if not os.path.exists(directorydeeper):\n os.makedirs(directorydeeper)\n plt.savefig(directorydeeper+filename+'.jpg')\n\n plt.close()\n\n print(predictions.head())\n print(test_X_uni.head())\n print(timeline.head())\n\n # data = {'time': timeline,\n # 'Actual': test_X_uni,\n # 'Predictions': predictions}\n frames = [timeline, test_X_uni, predictions]\n df = pd.concat(frames, axis=1)\n df.to_csv(directorydeeper+filename +\n '.csv', index=False, header=['time', 'Actual', 'Predictions'])\n\n if cat == 1:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}\n elif cat == 0:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}\n\n df = pd.DataFrame(data=data, index=[0])\n df.to_csv(directory+result_filename,\n index=False, mode='a', header=False)\n\n if model_name == 'AR':\n predictions = ufunc.AutoRegression(\n train_X_uni, test_X_uni)\n if cat == 1:\n predictions = np.array(\n predictions).astype(int)\n test_X_uni = np.array(\n test_X_uni).astype(int)\n\n cm0 = func.forecast_accuracy(\n predictions, test_X_uni, cat)\n\n filename = file + '_' + \\\n target+'_TH' + \\\n str(PrH_index)+'_lag' + \\\n str(n_steps)+'_'+str(i)\n\n plt.scatter(timeline.values,\n test_X_uni, s=1)\n plt.scatter(timeline.values,\n predictions, s=1)\n plt.legend(['actual', 'predictions'],\n loc='upper right')\n plt.xticks(rotation=45)\n\n directorydeeper = directory+'more/'\n if not os.path.exists(directorydeeper):\n os.makedirs(directorydeeper)\n plt.savefig(directorydeeper+filename+'.jpg')\n\n plt.close()\n data = {'time': timeline,\n 'Actual': test_X_uni,\n 'Predictions': predictions}\n df = pd.DataFrame(data=data)\n\n df.to_csv(directorydeeper+filename +\n '.csv', index=False)\n\n if cat == 1:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'F1_0': cm0[0], 'F1_1': cm0[1], 'P_0': cm0[2], 'P_1': cm0[3], 'R_0': cm0[4], 'R_1': cm0[5], 'acc0_1': cm0[6], 'F1_0_1': cm0[7], 'F1_all': cm0[8], 'fbeta': [cm0[9]]}\n elif cat == 0:\n data = {'CV': i, 'target_names': target, 'method_names': method, 'temporalhorizons': PrH_index, 'window_nuggets': 1,\n 'file_names': filename, 'mape': cm0[0], 'me': cm0[1], 'mae': cm0[2], 'mpe': cm0[3], 'rmse': cm0[4], 'R2': cm0[5]}\n\n df = pd.DataFrame(data=data, index=[0])\n df.to_csv(directory+result_filename,\n index=False, mode='a', header=False)\n\n cfg_list = list()\n if model_name == 'ETS':\n cfg_list = ufunc.exp_smoothing_configs()\n scores = [ufunc.score_model('ETS', train_X_uni, test_X_uni, cfg, cat, directory, file,\n target, PrH_index, n_steps, i, result_filename, timeline) for cfg in cfg_list]\n\n if model_name == 'ARIMA':\n cfg_list = ufunc.ARIMA_configs()\n scores = [ufunc.score_model('ARIMA', train_X_uni, test_X_uni, cfg, cat, directory,\n file, target, PrH_index, n_steps, i, result_filename, timeline) for cfg in cfg_list]\n\n if model_name == 'SARIMA':\n cfg_list = ufunc.sarima_configs()\n\n scores = [ufunc.score_model('SARIMA', train_X_uni, test_X_uni, cfg, cat, directory,\n file, target, PrH_index, n_steps, i, result_filename, timeline) for cfg in cfg_list]\n\n i = i + 1\n elapsed_time = time.time() - start_time\n print(time.strftime(\"%H:%M:%S\",\n time.gmtime(elapsed_time)))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.legend", "pandas.concat", "pandas.read_csv", "pandas.to_datetime", "matplotlib.pyplot.scatter", "pandas.DataFrame", "matplotlib.pyplot.savefig", "matplotlib.pyplot.close", "matplotlib.pyplot.xticks", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
openforcefield/beryllium
[ "0870e93c45caa57ff738efa89f2c2cba1680c3cc" ]
[ "arsenic/tests/test_stats.py" ]
[ "import itertools\n\nimport networkx as nx\nimport numpy as np\nfrom arsenic import stats\nfrom arsenic.stats import bootstrap_statistic\n\n\ndef test_mle_easy():\n \"\"\"\n Test that the MLE for a graph with an absolute\n estimate on all nodes will recapitulate it\n \"\"\"\n input_absolutes: list = [-14.0, -13.0, -9.0]\n graph = nx.DiGraph()\n for i, val in enumerate(input_absolutes):\n graph.add_node(i, f_i=val, f_di=0.5)\n\n edges = [(0, 1), (0, 2), (2, 1)]\n for node1, node2 in edges:\n noise = np.random.uniform(low=-1.0, high=1.0)\n diff = input_absolutes[node2] - input_absolutes[node1] + noise\n graph.add_edge(node1, node2, f_ij=diff, f_dij=0.5 + np.abs(noise))\n\n output_absolutes, covar = stats.mle(graph, factor=\"f_ij\", node_factor=\"f_i\")\n\n for i, _ in enumerate(graph.nodes(data=True)):\n diff = np.abs(output_absolutes[i] - input_absolutes[i])\n assert (\n diff < covar[i, i]\n ), f\"MLE error. Output absolute \\\n estimate, {output_absolutes[i]}, is too far from\\\n true value: {input_absolutes[i]}.\"\n\n\ndef test_mle_easy_self_edge():\n \"\"\"\n Test that the MLE for a graph with an absolute\n estimate on all nodes will recapitulate it\n when a self-edge is included\n \"\"\"\n input_absolutes: list = [-14.0, -13.0, -9.0]\n graph = nx.DiGraph()\n for i, val in enumerate(input_absolutes):\n graph.add_node(i, f_i=val, f_di=0.5)\n\n edges = [(0, 1), (0, 2), (2, 1), (0, 0)]\n for node1, node2 in edges:\n noise = np.random.uniform(low=-1.0, high=1.0)\n diff = input_absolutes[node2] - input_absolutes[node1] + noise\n graph.add_edge(node1, node2, f_ij=diff, f_dij=0.5 + np.abs(noise))\n\n output_absolutes, covar = stats.mle(graph, factor=\"f_ij\", node_factor=\"f_i\")\n\n for i, _ in enumerate(graph.nodes(data=True)):\n diff = np.abs(output_absolutes[i] - input_absolutes[i])\n assert (\n diff < covar[i, i]\n ), f\"MLE error. Output absolute \\\n estimate, {output_absolutes[i]}, is too far from\\\n true value: {input_absolutes[i]}.\"\n\n\ndef test_mle_hard():\n \"\"\"\n Test that the MLE for a graph with a node missing an absolute value\n can get it right based on relative results\n \"\"\"\n input_absolutes: list = [-14.0, -13.0, -9.0]\n # make a t\n graph = nx.DiGraph()\n # Don't assign the first absolute value, check that MLE can get close to it\n for i, val in enumerate(input_absolutes):\n if i == 0:\n graph.add_node(i)\n else:\n graph.add_node(i, f_i=val, f_di=0.5)\n\n edges = [(0, 1), (0, 2), (2, 1)]\n for node1, node2 in edges:\n noise = np.random.uniform(low=-1.0, high=1.0)\n diff = input_absolutes[node2] - input_absolutes[node1] + noise\n graph.add_edge(node1, node2, f_ij=diff, f_dij=0.5 + np.abs(noise))\n\n output_absolutes, covar = stats.mle(graph, factor=\"f_ij\", node_factor=\"f_i\")\n\n for i, _ in enumerate(graph.nodes(data=True)):\n diff = np.abs(output_absolutes[i] - input_absolutes[i])\n assert (\n diff < covar[i, i]\n ), f\"MLE error. Output absolute \\\n estimate, {output_absolutes[i]}, is too far from\\\n true value: {input_absolutes[i]}.\"\n\n\ndef test_mle_relative():\n \"\"\"\n Test that the MLE can get the relative differences correct\n when no absolute values are provided\n \"\"\"\n input_absolutes: list = [-14.0, -13.0, -9.0]\n graph = nx.DiGraph()\n # Don't assign any absolute values\n edges = [(0, 1), (0, 2), (2, 1)]\n for node1, node2 in edges:\n noise = np.random.uniform(low=-0.5, high=0.5)\n diff = input_absolutes[node2] - input_absolutes[node1] + noise\n graph.add_edge(node1, node2, f_ij=diff, f_dij=0.5 + np.abs(noise))\n\n output_absolutes, _ = stats.mle(graph, factor=\"f_ij\", node_factor=\"f_i\")\n\n pairs = itertools.combinations(range(len(input_absolutes)), 2)\n\n for i, j in pairs:\n mle_diff = output_absolutes[i] - output_absolutes[j]\n true_diff = input_absolutes[i] - input_absolutes[j]\n\n assert (\n np.abs(true_diff - mle_diff) < 1.0\n ), f\"Relative\\\n difference from MLE: {mle_diff} is too far from the\\\n input difference, {true_diff}\"\n\n\ndef test_correlation_positive(fe_map):\n \"\"\"\n Test that the absolute DG plots have the correct signs,\n and statistics within reasonable agreement to the example data\n in `arsenic/data/example.csv`\n \"\"\"\n\n nodes = fe_map.graph.nodes\n\n x_data = np.asarray([n[1][\"exp_DG\"] for n in nodes(data=True)])\n y_data = np.asarray([n[1][\"calc_DG\"] for n in nodes(data=True)])\n xerr = np.asarray([n[1][\"exp_dDG\"] for n in nodes(data=True)])\n yerr = np.asarray([n[1][\"calc_dDG\"] for n in nodes(data=True)])\n\n bss = bootstrap_statistic(x_data, y_data, xerr, yerr, statistic=\"rho\")\n assert 0 < bss[\"mle\"] < 1, \"Correlation must be positive for this data\"\n\n for stat in [\"R2\", \"rho\"]:\n bss = bootstrap_statistic(x_data, y_data, xerr, yerr, statistic=stat)\n # all of the statistics for this example is between 0.61 and 0.84\n assert (\n 0.5 < bss[\"mle\"] < 0.9\n ), f\"Correlation must be positive for this data. {stat} is {bss['mle']}\"\n" ]
[ [ "numpy.random.uniform", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LBJ-Wade/aplpy_astro_plotting_lib
[ "3edc7f1ddb005ffac2ed79c42c189e991f5a9291" ]
[ "aplpy/angle_util.py" ]
[ "from __future__ import absolute_import, print_function, division\n\nimport math\nimport struct\n\nimport numpy as np\nfrom . import math_util\n\n\ndef almost_equal(a, b):\n c = struct.pack(\"<dd\", a, b)\n d = struct.unpack(\"<qq\", c)\n diff = abs(d[1] - d[0])\n return diff < 100\n\n\nclass Angle(object):\n\n def __init__(self, degrees='none', sexagesimal='none', latitude=False):\n\n if degrees != 'none':\n\n # Find out if the angle is negative\n negative = degrees < 0\n\n # Treat angle as positive\n degrees = np.abs(degrees)\n\n # Decompose angle into degrees, minutes, seconds\n m, d = math.modf(degrees)\n s, m = math.modf(m * 60.)\n s = s * 60.\n\n # Express degrees and minutes as integers\n d, m = int(round(d)), int(round(m))\n\n # Put back minus sign if negative\n if negative:\n d, m, s = -d, -m, -s\n\n # Set angle to tuple of degrees/minutes/seconds\n self.angle = (d, m, s)\n\n elif sexagesimal != 'none':\n self.angle = sexagesimal\n\n # Whether to keep the angle between 0 and 360 or -90 and 90\n self.latitude = latitude\n\n self.negative = False\n\n self._simplify()\n\n def _simplify(self):\n\n # Decompose angle\n d, m, s = self.angle\n\n # Make sure seconds are between 0. (inclusive) and 60. (exclusive)\n r = np.mod(s, 60.)\n m = m + int(round(s - r)) / 60\n s = r\n\n # Make sure minutes are between 0 and 59 (both inclusive)\n r = np.mod(m, 60)\n d = d + (m - r) / 60\n m = r\n\n # Make sure degrees are between 0 and 359 (both inclusive)\n d = np.mod(d, 360)\n\n # If angle is latitude, then:\n # - if degrees are between 90 and 270, angle is invalid\n # - if angle is between 270 and 360, subtract 360 degrees\n\n if self.latitude and d > 90:\n if d >= 270:\n self.negative = True\n d, m, s = 359 - d, 59 - m, 60. - s\n if s == 60.:\n s = s - 60.\n m = m + 1\n if m == 60:\n m = m - 60.\n d = d + 1\n else:\n raise Exception(\"latitude should be between -90 and 90 \\\n degrees\")\n\n # Set new angle\n self.angle = (d, m, s)\n\n def todegrees(self):\n d, m, s = self.angle\n degrees = d + m / 60. + s / 3600.\n if self.negative:\n degrees = - degrees\n return degrees\n\n def tohours(self):\n\n d, m, s = self.angle\n\n rd = np.mod(d, 15)\n h = (d - rd) / 15\n\n rm = np.mod(m, 15)\n m = (m - rm) / 15 + rd * 4\n\n s = s / 15. + rm * 4.\n\n a = Angle(sexagesimal=(h, m, s), latitude=self.latitude)\n a.negative = self.negative\n\n return a\n\n def toround(self, rval=3):\n\n # Decompose angle\n d, m, s = self.angle\n\n # Round numbers:\n # 1: degrees only\n # 2: degrees and minutes\n # 3: degrees, minutes, and seconds\n # 4.n: degrees, minutes, and decimal seconds with n decimal places\n\n if rval < 2:\n n = int(round((rval - 1.) * 100))\n d = round(d + m / 60. + s / 3600., n)\n if n == 0:\n d = int(d)\n return d\n elif rval < 3:\n m = int(round(m + s / 60.))\n if m == 60:\n m = 0\n d = d + 1\n return (d, m)\n elif rval < 4:\n s = int(round(s))\n if s == 60:\n s = 0\n m = m + 1\n if m == 60:\n m = 0\n d = d + 1\n return (d, m, s)\n else:\n n = int(round((rval - 4.) * 100))\n s = round(s, n)\n if s == 60.:\n s = 0.\n m = m + 1\n if m == 60:\n m = 0\n d = d + 1\n return (d, m, s)\n\n def tostringlist(self, format='ddd:mm:ss', sep=(\"d\", \"m\", \"s\")):\n\n format = format.replace('h', 'd')\n\n r = 1\n if '.d' in format:\n r = 1\n pos = format.find('.')\n nd = len(format[pos + 1:])\n r = r + nd / 100.\n if 'mm' in format:\n r = 2\n if 'ss' in format:\n r = 3\n if '.s' in format:\n r = 4\n pos = format.find('.')\n ns = len(format[pos + 1:])\n r = r + ns / 100.\n\n tup = self.toround(rval=r)\n if type(tup) == tuple:\n tup = list(tup)\n else:\n tup = [tup]\n\n string = []\n\n if 'dd' in format:\n if '.d' in format:\n string.append((\"%0\" + str(nd + 3) + \".\" + str(nd) + \"f\") % \\\n tup[0] + sep[0])\n else:\n string.append(\"%i\" % tup[0] + sep[0])\n if 'mm' in format:\n string.append(\"%02i\" % tup[1] + sep[1])\n if 'ss' in format and not '.s' in format:\n string.append(\"%02i\" % tup[2] + sep[2])\n if 'ss.s' in format:\n string.append((\"%0\" + str(ns + 3) + \".\" + str(ns) + \"f\") % tup[2] + sep[2])\n\n # If style is colons, need to remove trailing colon\n if len(string) >= 1 and sep[0] == ':' and not 'mm' in format:\n string[0] = string[0][:-1]\n if len(string) >= 2 and sep[1] == ':' and not 'ss' in format:\n string[1] = string[1][:-1]\n\n if self.latitude:\n if self.negative:\n string[0] = \"-\" + string[0]\n else:\n string[0] = \"+\" + string[0]\n\n return string\n\n def __str__(self):\n return self.angle.__str__()\n\n def __repr__(self):\n return self.angle.__repr__()\n\n def __add__(self, other):\n\n s = self.angle[2] + other.angle[2]\n m = self.angle[1] + other.angle[1]\n d = self.angle[0] + other.angle[0]\n\n s = Angle(sexagesimal=(d, m, s), latitude=self.latitude)\n s._simplify()\n\n return s\n\n def __mul__(self, other):\n\n d, m, s = self.angle\n s = s * other\n m = m * other\n d = d * other\n\n if self.latitude and self.negative:\n d, m, s = -d, -m, -s\n\n s = Angle(sexagesimal=(d, m, s), latitude=self.latitude)\n s._simplify()\n\n return s\n\n def __eq__(self, other):\n return self.angle[0] == other.angle[0] \\\n and self.angle[1] == other.angle[1] \\\n and almost_equal(self.angle[2], other.angle[2])\n\n def __div__(self, other):\n '''\n Divide an angle by another\n\n This method calculates the division using the angles in degrees, and\n then corrects for any rouding errors if the division should be exact.\n '''\n\n # Find division of angles in degrees\n div = self.todegrees() / other.todegrees()\n\n # Find the nearest integer\n divint = int(round(div))\n\n # Check whether the denominator multiplied by this number is exactly\n # the numerator\n if other * divint == self:\n return divint\n else:\n return div\n\n __truediv__ = __div__\n\ndef smart_round_angle_sexagesimal(x, latitude=False, hours=False):\n\n d, m, s = 0, 0, 0.\n\n divisors_360 = math_util.divisors(360)\n divisors_10 = math_util.divisors(10)\n divisors_60 = math_util.divisors(60)\n\n if hours:\n x /= 15.\n\n if x >= 1:\n d = math_util.closest(divisors_360, x)\n else:\n x = x * 60.\n if x >= 1:\n m = math_util.closest(divisors_60, x)\n else:\n x = x * 60.\n if x >= 1:\n s = math_util.closest(divisors_60, x)\n else:\n t = 1.\n while True:\n t = t * 10.\n x = x * 10.\n if x >= 1:\n s = math_util.closest(divisors_10, x) / t\n break\n\n a = Angle(sexagesimal=(d, m, s), latitude=latitude)\n\n if hours:\n a *= 15\n\n return a\n\n\ndef smart_round_angle_decimal(x, latitude=False):\n\n divisors_360 = math_util.divisors(360)\n divisors_10 = math_util.divisors(10)\n\n if x >= 1:\n d = math_util.closest(divisors_360, x)\n else:\n t = 1.\n while True:\n t = t * 10.\n x = x * 10.\n if x >= 1:\n d = math_util.closest(divisors_10, x) / t\n break\n\n a = Angle(degrees=d, latitude=latitude)\n\n return a\n\n\ndef _get_label_precision(format, latitude=False):\n\n # Find base spacing\n if \"mm\" in format:\n if \"ss\" in format:\n if \"ss.s\" in format:\n n_decimal = len(format.split('.')[1])\n label_spacing = Angle(sexagesimal=(0, 0, 10 ** (-n_decimal)), latitude=latitude)\n else:\n label_spacing = Angle(sexagesimal=(0, 0, 1), latitude=latitude)\n else:\n label_spacing = Angle(sexagesimal=(0, 1, 0), latitude=latitude)\n elif \".\" in format:\n ns = len(format.split('.')[1])\n label_spacing = Angle(degrees=10 ** (-ns), latitude=latitude)\n else:\n label_spacing = Angle(sexagesimal=(1, 0, 0), latitude=latitude)\n\n # Check if hours are used instead of degrees\n if \"hh\" in format:\n label_spacing *= 15\n\n return label_spacing\n\n\nclass InconsistentSpacing(Exception):\n pass\n\n\ndef _check_format_spacing_consistency(format, spacing):\n '''\n Check whether the format can correctly show labels with the specified\n spacing.\n\n For example, if the tick spacing is set to 1 arcsecond, but the format is\n set to dd:mm, then the labels cannot be correctly shown. Similarly, if the\n spacing is set to 1/1000 of a degree, or 3.6\", then a format of dd:mm:ss\n will cause rounding errors, because the spacing includes fractional\n arcseconds.\n\n This function will raise a warning if the format and spacing are\n inconsistent.\n '''\n\n label_spacing = _get_label_precision(format)\n\n if type(spacing / label_spacing) != int:\n raise InconsistentSpacing('Label format and tick spacing are inconsistent. Make sure that the tick spacing is a multiple of the smallest angle that can be represented by the specified format (currently %s). For example, if the format is dd:mm:ss.s, then the tick spacing has to be a multiple of 0.1\". Similarly, if the format is hh:mm:ss, then the tick spacing has to be a multiple of 15\". If you got this error as a result of interactively zooming in to a small region, this means that the default display format for the labels is not accurate enough, so you will need to increase the format precision.' % format)\n" ]
[ [ "numpy.mod", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LesterLian/CROWN-IBP
[ "dbe9fa67221940f88da73292298051a5401b3a84" ]
[ "train_ada.py" ]
[ "## Copyright (C) 2019, Huan Zhang <[email protected]>\n## Hongge Chen <[email protected]>\n## Chaowei Xiao <[email protected]>\n## \n## This program is licenced under the BSD 2-Clause License,\n## contained in the LICENCE file in this directory.\n##\nimport copy\nimport sys\nimport time\n\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import Dataset\n\nfrom ada import WeightedDataLoader\nfrom ada.modules import get_weighted_ce, set_seed\n\nfrom argparser import argparser\nfrom bound_layers import BoundSequential, BoundLinear, BoundConv2d, \\\n BoundDataParallel\nfrom config import load_config, get_path, config_modelloader, \\\n config_dataloader, update_dict\nfrom convex_adversarial import DualNetwork\nfrom eps_scheduler import EpsilonScheduler\n\n\n# sys.settrace(gpu_profile)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\nclass Logger(object):\n def __init__(self, log_file = None):\n self.log_file = log_file\n\n def log(self, *args, **kwargs):\n print(*args, **kwargs)\n if self.log_file:\n print(*args, **kwargs, file = self.log_file)\n self.log_file.flush()\n\n\ndef Train(model, t, loader, eps_scheduler, max_eps, norm, logger, verbose, train, opt, method, **kwargs):\n # if train=True, use training mode\n # if train=False, use test mode, no back prop\n \n num_class = 10\n losses = AverageMeter()\n l1_losses = AverageMeter()\n errors = AverageMeter()\n robust_errors = AverageMeter()\n regular_ce_losses = AverageMeter()\n robust_ce_losses = AverageMeter()\n relu_activities = AverageMeter()\n bound_bias = AverageMeter()\n bound_diff = AverageMeter()\n unstable_neurons = AverageMeter()\n dead_neurons = AverageMeter()\n alive_neurons = AverageMeter()\n batch_time = AverageMeter()\n batch_multiplier = kwargs.get(\"batch_multiplier\", 1) \n kappa = 1\n beta = 1\n if train:\n model.train() \n else:\n model.eval()\n # pregenerate the array for specifications, will be used for scatter\n sa = np.zeros((num_class, num_class - 1), dtype = np.int32)\n for i in range(sa.shape[0]):\n for j in range(sa.shape[1]):\n if j < i:\n sa[i][j] = j\n else:\n sa[i][j] = j + 1\n sa = torch.LongTensor(sa) \n batch_size = loader.batch_size * batch_multiplier\n if batch_multiplier > 1 and train:\n logger.log('Warning: Large batch training. The equivalent batch size is {} * {} = {}.'.format(batch_multiplier, loader.batch_size, batch_size))\n # per-channel std and mean\n std = torch.tensor(loader.std).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n mean = torch.tensor(loader.mean).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n \n model_range = 0.0\n end_eps = eps_scheduler.get_eps(t+1, 0)\n if end_eps < np.finfo(np.float32).tiny:\n logger.log('eps {} close to 0, using natural training'.format(end_eps))\n method = \"natural\"\n for i, data in enumerate(loader):\n if len(data) == 4:\n data, labels, dis, idx = data\n CE = get_weighted_ce(dis)\n elif len(data) == 2:\n data, labels = data\n CE = CrossEntropyLoss\n start = time.time()\n eps = eps_scheduler.get_eps(t, int(i//batch_multiplier)) \n if train and i % batch_multiplier == 0: \n opt.zero_grad()\n # generate specifications\n c = torch.eye(num_class).type_as(data)[labels].unsqueeze(1) - torch.eye(num_class).type_as(data).unsqueeze(0) \n # remove specifications to self\n I = (~(labels.data.unsqueeze(1) == torch.arange(num_class).type_as(labels.data).unsqueeze(0)))\n c = (c[I].view(data.size(0),num_class-1,num_class))\n # scatter matrix to avoid compute margin to self\n sa_labels = sa[labels]\n # storing computed lower bounds after scatter\n lb_s = torch.zeros(data.size(0), num_class)\n ub_s = torch.zeros(data.size(0), num_class)\n\n # FIXME: Assume unnormalized data is from range 0 - 1\n if kwargs[\"bounded_input\"]:\n if norm != np.inf:\n raise ValueError(\"bounded input only makes sense for Linf perturbation. \"\n \"Please set the bounded_input option to false.\")\n data_max = torch.reshape((1. - mean) / std, (1, -1, 1, 1))\n data_min = torch.reshape((0. - mean) / std, (1, -1, 1, 1))\n data_ub = torch.min(data + (eps / std), data_max)\n data_lb = torch.max(data - (eps / std), data_min)\n else:\n if norm == np.inf:\n data_ub = data + (eps / std)\n data_lb = data - (eps / std)\n else:\n # For other norms, eps will be used instead.\n data_ub = data_lb = data\n\n if list(model.parameters())[0].is_cuda:\n data = data.cuda()\n data_ub = data_ub.cuda()\n data_lb = data_lb.cuda()\n labels = labels.cuda()\n c = c.cuda()\n sa_labels = sa_labels.cuda()\n lb_s = lb_s.cuda()\n ub_s = ub_s.cuda()\n # convert epsilon to a tensor\n eps_tensor = data.new(1)\n eps_tensor[0] = eps\n\n # omit the regular cross entropy, since we use robust error\n output = model(data, method_opt=\"forward\", disable_multi_gpu = (method == \"natural\"))\n regular_ce = CE()(output, labels)\n regular_ce_losses.update(regular_ce.cpu().detach().numpy(), data.size(0))\n errors.update(torch.sum(torch.argmax(output, dim=1)!=labels).cpu().detach().numpy()/data.size(0), data.size(0))\n # get range statistic\n model_range = output.max().detach().cpu().item() - output.min().detach().cpu().item()\n \n '''\n torch.set_printoptions(threshold=5000)\n print('prediction: ', output)\n ub, lb, _, _, _, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt=\"interval_range\")\n lb = lb_s.scatter(1, sa_labels, lb)\n ub = ub_s.scatter(1, sa_labels, ub)\n print('interval ub: ', ub)\n print('interval lb: ', lb)\n ub, _, lb, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, upper=True, lower=True, method_opt=\"backward_range\")\n lb = lb_s.scatter(1, sa_labels, lb)\n ub = ub_s.scatter(1, sa_labels, ub)\n print('crown-ibp ub: ', ub)\n print('crown-ibp lb: ', lb) \n ub, _, lb, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, upper=True, lower=True, method_opt=\"full_backward_range\")\n lb = lb_s.scatter(1, sa_labels, lb)\n ub = ub_s.scatter(1, sa_labels, ub)\n print('full-crown ub: ', ub)\n print('full-crown lb: ', lb)\n input()\n '''\n \n\n if verbose or method != \"natural\":\n if kwargs[\"bound_type\"] == \"convex-adv\":\n # Wong and Kolter's bound, or equivalently Fast-Lin\n if kwargs[\"convex-proj\"] is not None:\n proj = kwargs[\"convex-proj\"]\n if norm == np.inf:\n norm_type = \"l1_median\"\n elif norm == 2:\n norm_type = \"l2_normal\"\n else:\n raise(ValueError(\"Unsupported norm {} for convex-adv\".format(norm)))\n else:\n proj = None\n if norm == np.inf:\n norm_type = \"l1\"\n elif norm == 2:\n norm_type = \"l2\"\n else:\n raise(ValueError(\"Unsupported norm {} for convex-adv\".format(norm)))\n if loader.std == [1] or loader.std == [1, 1, 1]:\n convex_eps = eps\n else:\n convex_eps = eps / np.mean(loader.std)\n # for CIFAR we are roughly / 0.2\n # FIXME this is due to a bug in convex_adversarial, we cannot use per-channel eps\n if norm == np.inf:\n # bounded input is only for Linf\n if kwargs[\"bounded_input\"]:\n # FIXME the bounded projection in convex_adversarial has a bug, data range must be positive\n assert loader.std == [1,1,1] or loader.std == [1]\n data_l = 0.0\n data_u = 1.0\n else:\n data_l = -np.inf\n data_u = np.inf\n else:\n data_l = data_u = None\n f = DualNetwork(model, data, convex_eps, proj = proj, norm_type = norm_type, bounded_input = kwargs[\"bounded_input\"], data_l = data_l, data_u = data_u)\n lb = f(c)\n elif kwargs[\"bound_type\"] == \"interval\":\n ub, lb, relu_activity, unstable, dead, alive = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt=\"interval_range\")\n elif kwargs[\"bound_type\"] == \"crown-full\":\n _, _, lb, _ = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, upper=False, lower=True, method_opt=\"full_backward_range\")\n unstable = dead = alive = relu_activity = torch.tensor([0])\n elif kwargs[\"bound_type\"] == \"crown-interval\":\n # Enable multi-GPU only for the computationally expensive CROWN-IBP bounds, \n # not for regular forward propagation and IBP because the communication overhead can outweigh benefits, giving little speedup. \n ub, ilb, relu_activity, unstable, dead, alive = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt=\"interval_range\")\n crown_final_beta = kwargs['final-beta']\n beta = (max_eps - eps * (1.0 - crown_final_beta)) / max_eps\n if beta < 1e-5:\n lb = ilb\n else:\n if kwargs[\"runnerup_only\"]:\n # regenerate a smaller c, with just the runner-up prediction\n # mask ground truthlabel output, select the second largest class\n # print(output)\n # torch.set_printoptions(threshold=5000)\n masked_output = output.detach().scatter(1, labels.unsqueeze(-1), -100)\n # print(masked_output)\n # location of the runner up prediction\n runner_up = masked_output.max(1)[1]\n # print(runner_up)\n # print(labels)\n # get margin from the groud-truth to runner-up only\n runnerup_c = torch.eye(num_class).type_as(data)[labels]\n # print(runnerup_c)\n # set the runner up location to -\n runnerup_c.scatter_(1, runner_up.unsqueeze(-1), -1)\n runnerup_c = runnerup_c.unsqueeze(1).detach()\n # print(runnerup_c)\n # get the bound for runnerup_c\n _, _, clb, bias = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt=\"backward_range\")\n clb = clb.expand(clb.size(0), num_class - 1)\n else:\n # get the CROWN bound using interval bounds \n _, _, clb, bias = model(norm=norm, x_U=data_ub, x_L=data_lb, eps=eps, C=c, method_opt=\"backward_range\")\n bound_bias.update(bias.sum() / data.size(0))\n # how much better is crown-ibp better than ibp?\n diff = (clb - ilb).sum().item()\n bound_diff.update(diff / data.size(0), data.size(0))\n # lb = torch.max(lb, clb)\n lb = clb * beta + ilb * (1 - beta)\n else:\n raise RuntimeError(\"Unknown bound_type \" + kwargs[\"bound_type\"]) \n lb = lb_s.scatter(1, sa_labels, lb)\n robust_ce = CE()(-lb, labels)\n if kwargs[\"bound_type\"] != \"convex-adv\":\n \n relu_activities.update(relu_activity.sum().detach().cpu().item() / data.size(0), data.size(0))\n unstable_neurons.update(unstable.sum().detach().cpu().item() / data.size(0), data.size(0))\n dead_neurons.update(dead.sum().detach().cpu().item() / data.size(0), data.size(0))\n alive_neurons.update(alive.sum().detach().cpu().item() / data.size(0), data.size(0))\n\n if method == \"robust\":\n loss = robust_ce\n elif method == \"robust_activity\":\n loss = robust_ce + kwargs[\"activity_reg\"] * relu_activity.sum()\n elif method == \"natural\":\n loss = regular_ce\n elif method == \"robust_natural\":\n natural_final_factor = kwargs[\"final-kappa\"]\n kappa = (max_eps - eps * (1.0 - natural_final_factor)) / max_eps\n loss = (1-kappa) * robust_ce + kappa * regular_ce\n else:\n raise ValueError(\"Unknown method \" + method)\n\n if train and kwargs[\"l1_reg\"] > np.finfo(np.float32).tiny:\n reg = kwargs[\"l1_reg\"]\n l1_loss = 0.0\n for name, param in model.named_parameters():\n if 'bias' not in name:\n l1_loss = l1_loss + torch.sum(torch.abs(param))\n l1_loss = reg * l1_loss\n loss = loss + l1_loss\n l1_losses.update(l1_loss.cpu().detach().numpy(), data.size(0))\n if train:\n loss.backward()\n if i % batch_multiplier == 0 or i == len(loader) - 1:\n opt.step()\n\n losses.update(loss.cpu().detach().numpy(), data.size(0))\n\n if verbose or method != \"natural\":\n robust_ce_losses.update(robust_ce.cpu().detach().numpy(), data.size(0))\n # robust_ce_losses.update(robust_ce, data.size(0))\n robust_errors.update(torch.sum((lb<0).any(dim=1)).cpu().detach().numpy() / data.size(0), data.size(0))\n\n batch_time.update(time.time() - start)\n if i % 50 == 0 and train:\n logger.log( '[{:2d}:{:4d}]: eps {:4f} '\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Total Loss {loss.val:.4f} ({loss.avg:.4f}) '\n 'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f}) '\n 'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f}) '\n 'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f}) '\n 'Err {errors.val:.4f} ({errors.avg:.4f}) '\n 'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f}) '\n 'Uns {unstable.val:.1f} ({unstable.avg:.1f}) '\n 'Dead {dead.val:.1f} ({dead.avg:.1f}) '\n 'Alive {alive.val:.1f} ({alive.avg:.1f}) '\n 'Tightness {tight.val:.5f} ({tight.avg:.5f}) '\n 'Bias {bias.val:.5f} ({bias.avg:.5f}) '\n 'Diff {diff.val:.5f} ({diff.avg:.5f}) '\n 'R {model_range:.3f} '\n 'beta {beta:.3f} ({beta:.3f}) '\n 'kappa {kappa:.3f} ({kappa:.3f}) '.format(\n t, i, eps, batch_time=batch_time,\n loss=losses, errors=errors, robust_errors = robust_errors, l1_loss = l1_losses,\n regular_ce_loss = regular_ce_losses, robust_ce_loss = robust_ce_losses, \n unstable = unstable_neurons, dead = dead_neurons, alive = alive_neurons,\n tight = relu_activities, bias = bound_bias, diff = bound_diff,\n model_range = model_range, \n beta=beta, kappa = kappa))\n \n \n logger.log( '[FINAL RESULT epoch:{:2d} eps:{:.4f}]: '\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}) '\n 'Total Loss {loss.val:.4f} ({loss.avg:.4f}) '\n 'L1 Loss {l1_loss.val:.4f} ({l1_loss.avg:.4f}) '\n 'CE {regular_ce_loss.val:.4f} ({regular_ce_loss.avg:.4f}) '\n 'RCE {robust_ce_loss.val:.4f} ({robust_ce_loss.avg:.4f}) '\n 'Uns {unstable.val:.3f} ({unstable.avg:.3f}) '\n 'Dead {dead.val:.1f} ({dead.avg:.1f}) '\n 'Alive {alive.val:.1f} ({alive.avg:.1f}) '\n 'Tight {tight.val:.5f} ({tight.avg:.5f}) '\n 'Bias {bias.val:.5f} ({bias.avg:.5f}) '\n 'Diff {diff.val:.5f} ({diff.avg:.5f}) '\n 'Err {errors.val:.4f} ({errors.avg:.4f}) '\n 'Rob Err {robust_errors.val:.4f} ({robust_errors.avg:.4f}) '\n 'R {model_range:.3f} '\n 'beta {beta:.3f} ({beta:.3f}) '\n 'kappa {kappa:.3f} ({kappa:.3f}) \\n'.format(\n t, eps, batch_time=batch_time,\n loss=losses, errors=errors, robust_errors = robust_errors, l1_loss = l1_losses,\n regular_ce_loss = regular_ce_losses, robust_ce_loss = robust_ce_losses, \n unstable = unstable_neurons, dead = dead_neurons, alive = alive_neurons,\n tight = relu_activities, bias = bound_bias, diff = bound_diff,\n model_range = model_range, \n kappa = kappa, beta=beta))\n for i, l in enumerate(model if isinstance(model, BoundSequential) else model.module):\n if isinstance(l, BoundLinear) or isinstance(l, BoundConv2d):\n norm = l.weight.data.detach().view(l.weight.size(0), -1).abs().sum(1).max().cpu()\n logger.log('layer {} norm {}'.format(i, norm))\n if method == \"natural\":\n return errors.avg, errors.avg\n else:\n return robust_errors.avg, errors.avg\n\ndef main(args):\n config = load_config(args)\n global_train_config = config[\"training_params\"]\n models, model_names = config_modelloader(config) \n for model, model_id, model_config in zip(models, model_names, config[\"models\"]):\n # make a copy of global training config, and update per-model config\n train_config = copy.deepcopy(global_train_config)\n if \"training_params\" in model_config:\n train_config = update_dict(train_config, model_config[\"training_params\"])\n model = BoundSequential.convert(model, train_config[\"method_params\"][\"bound_opts\"])\n \n # read training parameters from config file\n epochs = train_config[\"epochs\"]\n lr = train_config[\"lr\"]\n weight_decay = train_config[\"weight_decay\"]\n starting_epsilon = train_config[\"starting_epsilon\"]\n end_epsilon = train_config[\"epsilon\"]\n schedule_length = train_config[\"schedule_length\"]\n schedule_start = train_config[\"schedule_start\"]\n optimizer = train_config[\"optimizer\"]\n method = train_config[\"method\"]\n verbose = train_config[\"verbose\"]\n lr_decay_step = train_config[\"lr_decay_step\"]\n lr_decay_milestones = train_config[\"lr_decay_milestones\"]\n lr_decay_factor = train_config[\"lr_decay_factor\"]\n multi_gpu = train_config[\"multi_gpu\"]\n # parameters specific to a training method\n method_param = train_config[\"method_params\"]\n norm = float(train_config[\"norm\"])\n train_data, test_data = config_dataloader(config, **train_config[\"loader_params\"])\n if isinstance(train_data, Dataset):\n num_samples = len(train_data)\n else:\n num_samples = len(train_data.dataset)\n distribution = torch.Tensor([1.0 / num_samples] * num_samples)\n train_data = WeightedDataLoader(train_data, distribution)\n\n if optimizer == \"adam\":\n opt = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n elif optimizer == \"sgd\":\n opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=weight_decay)\n else:\n raise ValueError(\"Unknown optimizer\")\n \n batch_multiplier = train_config[\"method_params\"].get(\"batch_multiplier\", 1)\n batch_size = train_data.batch_size * batch_multiplier \n num_steps_per_epoch = int(np.ceil(1.0 * len(train_data.dataset) / batch_size))\n epsilon_scheduler = EpsilonScheduler(train_config.get(\"schedule_type\", \"linear\"), schedule_start * num_steps_per_epoch, ((schedule_start + schedule_length) - 1) * num_steps_per_epoch, starting_epsilon, end_epsilon, num_steps_per_epoch)\n max_eps = end_epsilon\n \n if lr_decay_step:\n # Use StepLR. Decay by lr_decay_factor every lr_decay_step.\n lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=lr_decay_step, gamma=lr_decay_factor)\n lr_decay_milestones = None\n elif lr_decay_milestones:\n # Decay learning rate by lr_decay_factor at a few milestones.\n lr_scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=lr_decay_milestones, gamma=lr_decay_factor)\n else:\n raise ValueError(\"one of lr_decay_step and lr_decay_milestones must be not empty.\")\n model_name = get_path(config, model_id, \"model\", load = False)\n best_model_name = get_path(config, model_id, \"best_model\", load = False) \n model_log = get_path(config, model_id, \"train_log\")\n logger = Logger(open(model_log, \"w\"))\n logger.log(model_name)\n logger.log(\"Command line:\", \" \".join(sys.argv[:]))\n logger.log(\"training configurations:\", train_config)\n logger.log(\"Model structure:\")\n logger.log(str(model))\n logger.log(\"data std:\", train_data.std)\n best_err = np.inf\n recorded_clean_err = np.inf\n timer = 0.0\n \n if multi_gpu:\n logger.log(\"\\nUsing multiple GPUs for computing CROWN-IBP bounds\\n\")\n model = BoundDataParallel(model) \n model = model.cuda()\n \n for t in range(epochs):\n epoch_start_eps = epsilon_scheduler.get_eps(t, 0)\n epoch_end_eps = epsilon_scheduler.get_eps(t+1, 0)\n logger.log(\"Epoch {}, learning rate {}, epsilon {:.6g} - {:.6g}\".format(t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))\n # with torch.autograd.detect_anomaly():\n start_time = time.time() \n Train(model, t, train_data, epsilon_scheduler, max_eps, norm, logger, verbose, True, opt, method, **method_param)\n if lr_decay_step:\n # Use stepLR. Note that we manually set up epoch number here, so the +1 offset.\n lr_scheduler.step(epoch=max(t - (schedule_start + schedule_length - 1) + 1, 0))\n elif lr_decay_milestones:\n # Use MultiStepLR with milestones.\n lr_scheduler.step()\n epoch_time = time.time() - start_time\n timer += epoch_time\n logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))\n logger.log(\"Evaluating...\")\n with torch.no_grad():\n # evaluate\n err, clean_err = Train(model, t, test_data, EpsilonScheduler(\"linear\", 0, 0, epoch_end_eps, epoch_end_eps, 1), max_eps, norm, logger, verbose, False, None, method, **method_param)\n\n logger.log('saving to', model_name)\n torch.save({\n 'state_dict' : model.module.state_dict() if multi_gpu else model.state_dict(), \n 'epoch' : t,\n }, model_name)\n\n # save the best model after we reached the schedule\n if t >= (schedule_start + schedule_length):\n if err <= best_err:\n best_err = err\n recorded_clean_err = clean_err\n logger.log('Saving best model {} with error {}'.format(best_model_name, best_err))\n torch.save({\n 'state_dict' : model.module.state_dict() if multi_gpu else model.state_dict(), \n 'robust_err' : err,\n 'clean_err' : clean_err,\n 'epoch' : t,\n }, best_model_name)\n\n logger.log('Total Time: {:.4f}'.format(timer))\n logger.log('Model {} best err {}, clean err {}'.format(model_id, best_err, recorded_clean_err))\n\n\nif __name__ == \"__main__\":\n set_seed(7)\n args = argparser()\n main(args)\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.abs", "torch.LongTensor", "torch.max", "torch.Tensor", "torch.reshape", "torch.min", "torch.eye", "torch.argmax", "numpy.finfo", "torch.tensor", "torch.no_grad", "numpy.mean", "torch.arange", "numpy.zeros", "torch.optim.lr_scheduler.StepLR" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
akbapu14/GOSeq2Seq
[ "779b7b105db544cdc1278fac4187e00521bf0fd8" ]
[ "stackedSeq2Seq.py" ]
[ "import tensorflow as tf\nfrom seq2seq.models import bridges\nfrom seq2seq.encoders import rnn_encoder\nfrom seq2seq.decoders import attention_decoder\nfrom seq2seq.decoders import basic_decoder\nfrom seq2seq.decoders import attention\nfrom seq2seq.training import utils as training_utils\nfrom seq2seq.contrib.seq2seq.decoder import _transpose_batch_time\nfrom seq2seq.contrib.seq2seq import helper as tf_decode_helper\nfrom tensorflow.contrib.rnn import LSTMStateTuple\nfrom seq2seq import losses as seq2seq_losses\nimport collections\nfrom seq2seq.models.model_base import ModelBase, _flatten_dict\nimport numpy as np\nimport time\nimport pickle\n\n\n\n\n#Parameters\ninput_vocab_size = 96100 + 5\noutput_vocab_size = 96582 + 3\ninput_embedding_size = 500\noutput_embedding_size = 500\nnumberArticles = 31\n\noptimizer_params = {\n \"optimizer.name\": \"Adam\",\n \"optimizer.learning_rate\": 1e-4,\n \"optimizer.params\": {}, # Arbitrary parameters for the optimizer\n \"optimizer.lr_decay_type\": \"\",\n \"optimizer.lr_decay_steps\": 100,\n \"optimizer.lr_decay_rate\": 0.99,\n \"optimizer.lr_start_decay_at\": 0,\n \"optimizer.lr_stop_decay_at\": tf.int32.max,\n \"optimizer.lr_min_learning_rate\": 1e-12,\n \"optimizer.lr_staircase\": False,\n \"optimizer.clip_gradients\": 5.0,\n \"optimizer.sync_replicas\": 0,\n \"optimizer.sync_replicas_to_aggregate\": 0,\n}\n\n#Akilesh's version of tackling articles with multiple sentences by breaking up and summing attention/outputs\n\n#Inputs\npmode = tf.contrib.learn.ModeKeys.TRAIN\nencoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')\nencoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')\ndecoder_targets_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='decoder_lengths')\ndecoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')\narticleIndicators= tf.placeholder(shape=(None,), dtype=tf.int32, name='l')\n# numValues = tf.placeholder(tf.float32)\n#Embeddings\ninput_embeddings = tf.Variable(tf.random_uniform([input_vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)\noutput_embeddings = tf.Variable(tf.random_uniform([output_vocab_size, output_embedding_size], -1.0, 1.0), dtype=tf.float32)\n\n#Network\nencoder_inputs_embedded = tf.nn.embedding_lookup(input_embeddings, encoder_inputs, name=\"input_embedding_vector\")\ndecoder_targets_embedded = tf.nn.embedding_lookup(output_embeddings, decoder_targets, name=\"decoder_embedding_vector\")\n\nencoder = rnn_encoder.BidirectionalRNNEncoder(params={}, mode=pmode)\n\neout = encoder.encode(encoder_inputs_embedded, encoder_inputs_length)\n\n#eout.attention_values = (4,5,256)\n#eout.attention_values_length = [5,5,5,5]\n#eout.outputs = (4,5,256)\n#eout.final_state -> encoder_final_state -> LSTMStateTuple[0] = (4,128)\ndef load_obj(name):\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)\ndef sumUp(someTensor):\n #Tensors where first dimension is what you need to concatenate over\n # weightedTensor = tf.Print(weightedTensor, tf.divide(1, numValues))\n partitioned = tf.dynamic_partition(data=someTensor, partitions=articleIndicators, num_partitions=numberArticles, name=\"Partition_Data\")\n finalList = [tf.reduce_mean(tensor, axis=0) for tensor in partitioned]\n return tf.stack(finalList)\ndef compute_loss(decoder_output, labels, labelLengths):\n \"\"\"Computes the loss for this model.\n\n Returns a tuple `(losses, loss)`, where `losses` are the per-batch\n losses and loss is a single scalar tensor to minimize.\n \"\"\"\n #pylint: disable=R0201\n # Calculate loss per example-timestep of shape [B, T]\n losses = seq2seq_losses.cross_entropy_sequence_loss(\n logits=decoder_output.logits[:, :, :],\n targets=tf.transpose(labels[:, 1:], [1, 0]),\n sequence_length=labelLengths - 1)\n\n # Calculate the average log perplexity\n loss = tf.reduce_sum(losses) / tf.to_float(\n tf.reduce_sum(labelLengths - 1))\n\n return losses, loss\ndef _create_optimizer():\n \"\"\"Creates the optimizer\"\"\"\n name = optimizer_params[\"optimizer.name\"]\n optimizer = tf.contrib.layers.OPTIMIZER_CLS_NAMES[name](\n learning_rate=optimizer_params[\"optimizer.learning_rate\"],\n **optimizer_params[\"optimizer.params\"])\n return optimizer\ndef _clip_gradients(grads_and_vars):\n \"\"\"Clips gradients by global norm.\"\"\"\n gradients, variables = zip(*grads_and_vars)\n clipped_gradients, _ = tf.clip_by_global_norm(\n gradients, optimizer_params[\"optimizer.clip_gradients\"])\n return list(zip(clipped_gradients, variables))\ndef _build_train_op(loss):\n \"\"\"Creates the training operation\"\"\"\n learning_rate_decay_fn = training_utils.create_learning_rate_decay_fn(\n decay_type=optimizer_params[\"optimizer.lr_decay_type\"] or None,\n decay_steps=optimizer_params[\"optimizer.lr_decay_steps\"],\n decay_rate=optimizer_params[\"optimizer.lr_decay_rate\"],\n start_decay_at=optimizer_params[\"optimizer.lr_start_decay_at\"],\n stop_decay_at=optimizer_params[\"optimizer.lr_stop_decay_at\"],\n min_learning_rate=optimizer_params[\"optimizer.lr_min_learning_rate\"],\n staircase=optimizer_params[\"optimizer.lr_staircase\"])\n\n optimizer = _create_optimizer()\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n learning_rate=optimizer_params[\"optimizer.learning_rate\"],\n learning_rate_decay_fn=learning_rate_decay_fn,\n clip_gradients=_clip_gradients,\n optimizer=optimizer,\n summaries=[\"learning_rate\", \"loss\", \"gradients\", \"gradient_norm\"])\n\n return train_op\n\ndef predict(decoder_output):\n predictions = {}\n # Decoders returns output in time-major form [T, B, ...]\n # Here we transpose everything back to batch-major for the user\n output_dict = collections.OrderedDict(\n zip(decoder_output._fields, decoder_output))\n decoder_output_flat = _flatten_dict(output_dict)\n decoder_output_flat = {\n k: _transpose_batch_time(v)\n for k, v in decoder_output_flat.items()\n }\n predictions.update(decoder_output_flat)\n return predictions\n\ndef hbatch(inputs, max_sequence_length=None):\n \"\"\"\n Args:\n inputs:\n list of sentences (integer lists)\n max_sequence_length:\n integer specifying how large should `max_time` dimension be.\n If None, maximum sequence length would be used\n\n Outputs:\n inputs_time_major:\n input sentences transformed into time-major matrix\n (shape [max_time, batch_size]) padded with 0s\n sequence_lengths:\n batch-sized list of integers specifying amount of active\n time steps in each input sequence\n \"\"\"\n\n sequence_lengths = [len(seq) for seq in inputs]\n batch_size = len(inputs)\n\n if max_sequence_length is None:\n max_sequence_length = max(sequence_lengths)\n\n inputs_batch_major = np.zeros(shape=[batch_size, max_sequence_length], dtype=np.int32) # == PAD\n\n for i, seq in enumerate(inputs):\n for j, element in enumerate(seq):\n inputs_batch_major[i, j] = element\n\n # [batch_size, max_time] -> [max_time, batch_size]\n inputs_time_major = inputs_batch_major.swapaxes(0, 1)\n\n return inputs_time_major, sequence_lengths\n\nsummedAttention = sumUp(eout.attention_values)\nsummedLengths = eout.attention_values_length[:1]\nsummedOutputs = sumUp(eout.outputs)\n\ndecoder = attention_decoder.AttentionDecoder(params={}, mode=pmode,\nvocab_size=output_vocab_size,\nattention_values=summedAttention,\nattention_values_length=summedLengths,\nattention_keys=summedOutputs,\nattention_fn=attention.AttentionLayerBahdanau(params={}, mode=pmode))\n\nbatch_size = 2\ntarget_start_id = 1\n# helper_infer = tf_decode_helper.GreedyEmbeddingHelper(\n# embedding=output_embeddings,\n# start_tokens=tf.fill([batch_size], target_start_id),\n# end_token=5)\nhelper_train = tf_decode_helper.TrainingHelper(\n inputs=decoder_targets_embedded[:, :-1],\n sequence_length=decoder_targets_length - 1)\ndstate = eout.final_state\n\nsummed_encoder_final_state_c = tf.add(tf.multiply(sumUp(dstate[0].c), .5), tf.multiply(sumUp(dstate[1].c), .5))\nsummed_encoder_final_state_h = tf.add(tf.multiply(sumUp(dstate[0].h), .5), tf.multiply(sumUp(dstate[1].h), .5))\nsummed_encoder_final_state = LSTMStateTuple(\n c=summed_encoder_final_state_c,\n h=summed_encoder_final_state_h\n)\n\ndecoder_output, _, = decoder(summed_encoder_final_state, helper_train)\n#\n\npredictions = predict(decoder_output)['predicted_ids']\nlosses, loss = compute_loss(decoder_output=decoder_output, labels=decoder_targets, labelLengths=decoder_targets_length)\ntrain_op = _build_train_op(loss)\n\n\nsess = tf.Session()\ninit_op = tf.global_variables_initializer()\ninit_l = tf.local_variables_initializer()\nsaver = tf.train.Saver()\nsess.run(init_op)\nsess.run(init_l)\n\ntestArray = [[1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5], [1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5]]\nvalArray = [[6,5,4,3,2] * 20,[7,5,4,34,2] * 20, [7,5,4,34,2] * 20, [7,5,4,34,2] * 20,[7,5,4,34,2] * 20]\ntest1Array = [[1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5], [1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5], [1,2,3,4,5]]\nendArray = [[6,5,4,3,2] * 20,[7,5,4,34,2] * 20, [7,5,4,34,2] * 20, [7,5,4,34,2] * 20,[7,5,4,34,2] * 20]\nstacked_articles_train = load_obj(\"stacked_articles_train\")\nstacked_annotations_train = load_obj(\"stacked_annotations_train\")\ndef getNext():\n list_of_random_indices = random.sample(list(range(len(stacked_articles_train))), numberArticles)\n inputs = []\n targets = []\n articleIndicators = []\n for i,index in enumerate(list_of_random_indices):\n for l in stacked_articles_train[index]:\n inputs.append(l)\n articleIndicators.append(i)\n targets.append(stacked_annotations_train[index])\n features, features_lengths = hbatch(inputs)\n labels, labels_lengths = hbatch(targets)\n # features = [[1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5], [1,2,3,4,5], [6,7,8,9,10], [1,2,3,6,5], [1,2,3,4,5]]\n # labels = [[6,5,4,3,2] * 20,[7,5,4,34,2] * 20, [7,5,4,34,2] * 20, [7,5,4,34,2] * 20,[7,5,4,34,2] * 20]\n # features_lengths = [5] * 8\n # labels_lengths = [100] * 5\n # articleIndicators = [0,0,1,1,2,3,4,4]\n\n return [features, labels, features_lengths, labels_lengths, articleIndicators]\n#Training Cycle\n\nfor i in range(10000):\n start = time.time()\n # saver.restore(sess, \"model.ckpt\")\n # print(\"Model restored.\")\n inputs = getNext()\n while len(inputs[4] < 700):\n inputs = getNext()\n print(sess.run([train_op], {encoder_inputs: inputs[0], decoder_targets: inputs[1], encoder_inputs_length: inputs[2], decoder_targets_length: inputs[3], articleIndicators: inputs[4]}))\n # print(sess.run([train_op], {encoder_inputs: test1Array, encoder_inputs_length: [5] * 9, lengthOfArticles: [0,0,1,1,2,3,4,4,3], decoder_targets: endArray, decoder_targets_length: [100] * 5}))\n\n print(\"This batch of 10 took: \" + str(time.time() - start) + \" seconds\")\n # save_path = saver.save(sess, \"model.ckpt\")\n # print(\"Model saved in file: %s\" % save_path)\n# d = sess.run([train_op], {encoder_inputs: testArray, encoder_inputs_length: [5] * 8, lengthOfArticles: [0,0,1,1,2,3,4,4], decoder_targets: valArray, decoder_targets_length: [100] * 5})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# bridge = bridges.InitialStateBridge(encoder_outputs=eout,\n# decoder_state_size=128,\n# params={},\n# mode=tf.contrib.learn.ModeKeys.TRAIN)\n# decoder_initial_state = bridge()\n\n# dout = decoder.decode(eout.outputs, encoder_inputs_embedded, decoder_targets)\n# encoder_logits = tf.contrib.layers.linear(outputs, output_embedding_size)\n# decoder_logits = tf.contrib.layers.linear(encoder_logits, output_vocab_size)\n# prediction = tf.argmax(decoder_logits, 2)\n\n\n#Training\n# stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n# labels=tf.one_hot(decoder_targets, depth=output_vocab_size, dtype=tf.float32),\n# logits=decoder_logits,\n# )\n# loss = tf.reduce_mean(stepwise_cross_entropy)\n# train_op = tf.train.AdamOptimizer().minimize(loss)\n\n# decoder_targets_predicted = tf.nn.embedding_lookup(output_embeddings, decoder_targets)\n\n# for i in range(1000):\n# print(sess.run([train_op, loss, prediction], {encoder_inputs: testArray, encoder_inputs_length: [5,5], decoder_targets: endArray}))\n# print(sess.run(prediction, {encoder_inputs: valArray, encoder_inputs_length: [5,5], decoder_targets: endArray}))\n# test = tf.get_default_graph().get_tensor_by_name(\"first_inputs\")\n# with tf.device(\"/gpu:0\"):\n# encoder2 = rnn_encoder.UnidirectionalRNNEncoder(params={}, mode=tf.contrib.learn.ModeKeys.TRAIN)\n# eout2 = encoder2.encode(encoder_inputs_embedded, encoder_inputs_length)\n#\n#\n# with tf.device(\"/gpu:0\"):\n# encoder3 = rnn_encoder.StackBidirectionalRNNEncoder(params={}, mode=tf.contrib.learn.ModeKeys.TRAIN)\n# eout3 = encoder3.encode(encoder_inputs_embedded, encoder_inputs_length)\n" ]
[ [ "tensorflow.local_variables_initializer", "tensorflow.transpose", "tensorflow.dynamic_partition", "tensorflow.reduce_mean", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.contrib.framework.get_global_step", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.clip_by_global_norm", "tensorflow.contrib.rnn.LSTMStateTuple", "tensorflow.Session", "tensorflow.train.Saver", "tensorflow.random_uniform", "tensorflow.nn.embedding_lookup", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
hyeon-jo/C3D-DeepFakeDetection-pytorch
[ "3ce1b22f76ba5f0871fbd4e8745d4915bae76d3b" ]
[ "models/c3d.py" ]
[ "\"\"\"C3D\"\"\"\r\nimport math\r\nfrom collections import OrderedDict\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn.modules.utils import _triple\r\n\r\n\r\nclass Flatten(nn.Module):\r\n def forward(self, x):\r\n return x.view(x.size(0), 1)\r\n\r\n\r\nclass AttentionModule(nn.Module):\r\n def __init__(self):\r\n super(AttentionModule, self).__init__()\r\n self.temporal_size = 4\r\n self.height = 8\r\n self.width = 8\r\n reduction_ratio = 16\r\n\r\n self.spatial_attention = nn.Sequential(\r\n nn.Conv3d(512, 1, kernel_size=(1, self.height, self.width), bias=True),\r\n nn.BatchNorm3d(1),\r\n nn.Sigmoid()\r\n )\r\n self.temporal_attention = nn.Sequential(\r\n nn.Conv3d(512, 1, kernel_size=(self.temporal_size, 1, 1), bias=True),\r\n nn.BatchNorm3d(1),\r\n nn.Sigmoid()\r\n )\r\n self.avg_pool = nn.AdaptiveAvgPool3d(512)\r\n self.max_pool = nn.AdaptiveMaxPool3d(512)\r\n # self.channel_attention = nn.Sequential(\r\n # Flatten(),\r\n # nn.Linear(512, 512 // reduction_ratio),\r\n # nn.ReLU(),\r\n # nn.Linear(512 // reduction_ratio, 512)\r\n # )\r\n self.sigmoid = nn.Sigmoid()\r\n self.relu = nn.ReLU()\r\n\r\n def forward(self, x):\r\n spatial_x = self.spatial_attention(x)\r\n temporal_x = self.sigmoid(self.temporal_attention(x))\r\n # channel_x = self.channel_attention(self.avg_pool(x)) + self.channel_attention(self.max_pool(x))\r\n # channel_x = self.sigmoid(channel_x).unsqueeze(2).unsqueeze(3).unsqueeze(4).expand_as(x)\r\n\r\n # attn_x = x * channel_x\r\n attn_x = x * temporal_x\r\n attn_x = attn_x * spatial_x\r\n\r\n return attn_x\r\n\r\n\r\n\r\nclass C3D(nn.Module):\r\n \"\"\"C3D with BN and pool5 to be AdaptiveAvgPool3d(1).\"\"\"\r\n def __init__(self, with_classifier=False, return_conv=False, num_classes=101):\r\n super(C3D, self).__init__()\r\n self.with_classifier = with_classifier\r\n self.num_classes = num_classes\r\n self.return_conv = return_conv\r\n\r\n self.conv1 = nn.Conv3d(3, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn1 = nn.BatchNorm3d(64)\r\n self.relu1 = nn.ReLU()\r\n self.pool1 = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))\r\n\r\n self.conv2 = nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn2 = nn.BatchNorm3d(128)\r\n self.relu2 = nn.ReLU()\r\n self.pool2 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\r\n\r\n self.conv3a = nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn3a = nn.BatchNorm3d(256)\r\n self.relu3a = nn.ReLU()\r\n self.conv3b = nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn3b = nn.BatchNorm3d(256)\r\n self.relu3b = nn.ReLU()\r\n self.pool3 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\r\n\r\n self.conv4a = nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn4a = nn.BatchNorm3d(512)\r\n self.relu4a = nn.ReLU()\r\n self.conv4b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn4b = nn.BatchNorm3d(512)\r\n self.relu4b = nn.ReLU()\r\n self.pool4 = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))\r\n\r\n self.conv5a = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn5a = nn.BatchNorm3d(512)\r\n self.relu5a = nn.ReLU()\r\n self.conv5b = nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))\r\n self.bn5b = nn.BatchNorm3d(512)\r\n self.relu5b = nn.ReLU()\r\n\r\n self.attention = AttentionModule()\r\n\r\n if self.return_conv:\r\n self.feature_pool = nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) # 9216\r\n # self.feature_pool = nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)) 4182\r\n\r\n self.pool5 = nn.AdaptiveAvgPool3d(1)\r\n\r\n if self.with_classifier:\r\n self.linear = nn.Linear(512, self.num_classes)\r\n\r\n def forward(self, x):\r\n x = self.conv1(x) \r\n x = self.bn1(x)\r\n x = self.relu1(x)\r\n x = self.pool1(x)\r\n\r\n x = self.conv2(x)\r\n x = self.bn2(x)\r\n x = self.relu2(x)\r\n x = self.pool2(x)\r\n\r\n x = self.conv3a(x)\r\n x = self.bn3a(x)\r\n x = self.relu3a(x)\r\n x = self.conv3b(x)\r\n x = self.bn3b(x)\r\n x = self.relu3b(x)\r\n x = self.pool3(x)\r\n\r\n x = self.conv4a(x)\r\n x = self.bn4a(x)\r\n x = self.relu4a(x)\r\n x = self.conv4b(x)\r\n x = self.bn4b(x)\r\n x = self.relu4b(x)\r\n x = self.pool4(x)\r\n\r\n x = self.conv5a(x)\r\n x = self.bn5a(x)\r\n x = self.relu5a(x)\r\n x = self.conv5b(x)\r\n x = self.bn5b(x)\r\n x = self.relu5b(x)\r\n\r\n if self.return_conv:\r\n x = self.feature_pool(x)\r\n # print(x.shape)\r\n return x.view(x.shape[0], -1)\r\n\r\n x = self.attention(x)\r\n\r\n x = self.pool5(x)\r\n x = x.view(-1, 512)\r\n\r\n if self.with_classifier:\r\n x = self.linear(x)\r\n if self.num_classes == 1:\r\n x = nn.Sigmoid()(x)\r\n x = torch.squeeze(x)\r\n \r\n return x\r\n\r\n\r\nif __name__ == '__main__':\r\n c3d = C3D()" ]
[ [ "torch.nn.AdaptiveMaxPool3d", "torch.nn.Sigmoid", "torch.nn.AdaptiveAvgPool3d", "torch.nn.MaxPool3d", "torch.nn.Conv3d", "torch.nn.Linear", "torch.nn.ReLU", "torch.nn.BatchNorm3d", "torch.squeeze" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
RaghuSpaceRajan/ConfigSpace
[ "0e0cd1dbafc8485c3d3e44add342f0c9a223ed9f" ]
[ "test/read_and_write/test_irace_writer.py" ]
[ "# Copyright (c) 2014-2017, ConfigSpace developers\n# Matthias Feurer\n# Katharina Eggensperger\n# Mohsin Ali\n# and others (see commit history).\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the <organization> nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport unittest\nimport shutil\nimport tempfile\nimport os\n\nfrom ConfigSpace.configuration_space import ConfigurationSpace\nimport ConfigSpace.read_and_write.irace as irace\n\nfrom ConfigSpace.hyperparameters import CategoricalHyperparameter, \\\n UniformIntegerHyperparameter, UniformFloatHyperparameter, \\\n OrdinalHyperparameter\nfrom ConfigSpace.conditions import EqualsCondition, InCondition, \\\n AndConjunction, OrConjunction\nfrom ConfigSpace.forbidden import ForbiddenInClause, ForbiddenEqualsClause, \\\n ForbiddenAndConjunction\n\n\n# Copyright (c) 2014-2016, ConfigSpace developers\n# Matthias Feurer\n# Katharina Eggensperger\n# and others (see commit history).\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the <organization> nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nint_a = UniformIntegerHyperparameter(\"int_a\", -1, 6)\n\n\nclass TestIraceWriter(unittest.TestCase):\n '''\n Test IRACE writer\n '''\n def setUp(self):\n self.test_dir = tempfile.mkdtemp()\n self.cwd = os.getcwd()\n os.chdir(self.test_dir)\n\n def tearDown(self):\n os.chdir(self.cwd)\n shutil.rmtree(self.test_dir)\n\n def test_write_illegal_argument(self):\n sp = {\"a\": int_a}\n self.assertRaisesRegex(\n TypeError, r\"irace.write expects an \"\n r\"instance of \"\n r\"<class \"\n r\"'ConfigSpace.configuration_\"\n r\"space.ConfigurationSpace'>, you provided \"\n r\"'<(type|class) 'dict'>'\", irace.write, sp)\n\n def test_write_int(self):\n expected = \"int_a '--int_a ' i (-1, 6)\\n\"\n cs = ConfigurationSpace()\n cs.add_hyperparameter(int_a)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_float(self):\n expected = \"float_a '--float_a ' r (16.000000, 1024.000000)\\n\"\n cs = ConfigurationSpace()\n cs.add_hyperparameter(UniformFloatHyperparameter(\"float_a\", 16, 1024))\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_categorical(self):\n expected = \"cat_a '--cat_a ' c {a,b,c}\\n\"\n cs = ConfigurationSpace()\n cs.add_hyperparameter(\n CategoricalHyperparameter(\"cat_a\", [\"a\", \"b\", \"c\"]))\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_ordinal(self):\n expected = \"ord_a '--ord_a ' o {a,b,3}\\n\"\n cs = ConfigurationSpace()\n cs.add_hyperparameter(OrdinalHyperparameter(\"ord_a\", [\"a\", \"b\", 3]))\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_equals_condition_categorical(self):\n expected = \"ls '--ls ' c {sa,ca,ny}\\ntemp '--temp ' r (0.500000, 1.000000)| ls==sa\\n\"\n\n temp = UniformFloatHyperparameter(\"temp\", 0.5, 1)\n ls = CategoricalHyperparameter(\"ls\", [\"sa\", \"ca\", \"ny\"], \"sa\")\n\n cs = ConfigurationSpace()\n cs.add_hyperparameter(temp)\n cs.add_hyperparameter(ls)\n c1 = EqualsCondition(temp, ls, 'sa')\n cs.add_condition(c1)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_equals_condition_numerical(self):\n expected = \"temp '--temp ' i (1, 2)\\nls '--ls ' c {sa,ca,ny}| temp==2\\n\"\n\n temp = UniformIntegerHyperparameter(\"temp\", 1, 2)\n ls = CategoricalHyperparameter(\"ls\", [\"sa\", \"ca\", \"ny\"], \"sa\")\n\n cs = ConfigurationSpace()\n cs.add_hyperparameter(temp)\n cs.add_hyperparameter(ls)\n c1 = EqualsCondition(ls, temp, 2)\n cs.add_condition(c1)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_in_condition(self):\n expected = (\n \"ls '--ls ' c {sa,ca,ny}\\ntemp '--temp ' r (0.500000, 1.000000)| ls %in% c(sa,ca)\\n\"\n )\n\n temp = UniformFloatHyperparameter(\"temp\", 0.5, 1)\n ls = CategoricalHyperparameter(\"ls\", [\"sa\", \"ca\", \"ny\"], \"sa\")\n\n cs = ConfigurationSpace()\n cs.add_hyperparameter(temp)\n cs.add_hyperparameter(ls)\n c1 = InCondition(temp, ls, ['sa', 'ca'])\n cs.add_condition(c1)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_AndConjunction_condition(self):\n expected = (\n \"lp '--lp ' c {mi,bo}\\nls '--ls ' c {sa,ca,ny}\\ntemp '--temp ' \"\n \"r (0.500000, 1.000000)| ls %in% c(sa,ca) && lp %in% c(bo)\\n\"\n )\n\n temp = UniformFloatHyperparameter(\"temp\", 0.5, 1)\n ls = CategoricalHyperparameter(\"ls\", [\"sa\", \"ca\", \"ny\"], \"sa\")\n lp = CategoricalHyperparameter(\"lp\", [\"mi\", \"bo\"], \"bo\")\n\n cs = ConfigurationSpace()\n cs.add_hyperparameter(temp)\n cs.add_hyperparameter(lp)\n cs.add_hyperparameter(ls)\n\n c1 = InCondition(temp, ls, ['sa', 'ca'])\n c2 = InCondition(temp, lp, ['bo'])\n c3 = AndConjunction(c1, c2)\n cs.add_condition(c3)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_OrConjunction_condition(self):\n import numpy as np\n expected = (\n \"lp '--lp ' c {mi,bo}\\ntemp '--temp ' r (2.000000, 5.000000)\\nls \"\n \"'--ls ' c {sa,ca,ny}| temp==3.0 || lp %in% c(bo)\\n\")\n\n temp = UniformFloatHyperparameter(\n \"temp\", np.exp(2), np.exp(5), log=True)\n ls = CategoricalHyperparameter(\"ls\", [\"sa\", \"ca\", \"ny\"], \"sa\")\n lp = CategoricalHyperparameter(\"lp\", [\"mi\", \"bo\"], \"bo\")\n\n cs = ConfigurationSpace()\n cs.add_hyperparameter(temp)\n cs.add_hyperparameter(lp)\n cs.add_hyperparameter(ls)\n\n c1 = EqualsCondition(ls, temp, np.exp(3))\n c2 = InCondition(ls, lp, ['bo'])\n c3 = OrConjunction(c1, c2)\n cs.add_condition(c3)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_forbidden(self):\n cs = ConfigurationSpace()\n\n hp1 = CategoricalHyperparameter(\"parent\", [0, 1])\n hp2 = UniformIntegerHyperparameter(\"child\", 0, 2)\n hp3 = UniformIntegerHyperparameter(\"child2\", 0, 2)\n hp4 = UniformIntegerHyperparameter(\"child3\", 0, 2)\n hp5 = CategoricalHyperparameter(\"child4\", [4, 5, 6, 7])\n\n cs.add_hyperparameters([hp1, hp2, hp3, hp4, hp5])\n\n forb2 = ForbiddenEqualsClause(hp1, 1)\n forb3 = ForbiddenInClause(hp2, range(2, 3))\n forb4 = ForbiddenInClause(hp3, range(2, 3))\n forb5 = ForbiddenInClause(hp4, range(2, 3))\n forb6 = ForbiddenInClause(hp5, [6, 7])\n\n and1 = ForbiddenAndConjunction(forb2, forb3)\n and2 = ForbiddenAndConjunction(forb2, forb4)\n and3 = ForbiddenAndConjunction(forb2, forb5)\n\n cs.add_forbidden_clauses(\n [forb2, forb3, forb4, forb5, forb6, and1, and2, and3])\n\n irace.write(cs) # generates file called forbidden.txt\n\n def test_write_log_int(self):\n expected = \"int_log '--int_log ' i (2, 4)\\n\"\n int_log = UniformIntegerHyperparameter(\"int_log\", 10, 100, log=True)\n cs = ConfigurationSpace()\n cs.add_hyperparameter(int_log)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n\n def test_write_log_float(self):\n import numpy as np\n expected = \"float_log '--float_log ' r (2.000000, 5.000000)\\n\"\n float_log = UniformFloatHyperparameter(\n \"float_log\", np.exp(2), np.exp(5), log=True)\n cs = ConfigurationSpace()\n cs.add_hyperparameter(float_log)\n value = irace.write(cs)\n self.assertEqual(expected, value)\n" ]
[ [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zcemycl/autonomous_mobile_robot
[ "302b3336005acd81202ebbbb0c52a4b2692fa9c7" ]
[ "robot_control/src/path_planner.py" ]
[ "#!/usr/bin/python\n\nimport numpy as np\nimport yaml\nimport math\nfrom operator import itemgetter\nimport heapq\nimport pprint\n\n\n\n\ndef dijkstras(occupancy_map, x_spacing, y_spacing, start, goal):\n \"\"\"\n Implements Dijkstra's shortest path algorithm\n Input:\n occupancy_map - an N by M numpy array of boolean values (represented\n as integers 0 and 1) that represents the locations of the obstacles\n in the world\n x_spacing - parameter representing spacing between adjacent columns\n y_spacing - parameter representing spacing between adjacent rows\n start - a 3 by 1 numpy array of (x,y,theta) for the starting position \n goal - a 3 by 1 numpy array of (x,y,theta) for the finishing position \n Output: \n path: list of the indices of the nodes on the shortest path found\n starting with \"start\" and ending with \"end\" (each node is in\n metric coordinates)\n \"\"\"\n # We will use this delta function to search surrounding nodes.\n delta = [[-1, 0], # go up\n [0, -1], # go left\n [1, 0], # go down\n [0, 1]] # go right\n\n # Each node on the map \"costs\" 1 step to reach.\n cost = 1\n # Convert numpy array of map to list of map, makes it easier to search.\n occ_map = occupancy_map.tolist()\n\n # Converge start and goal positions to map indices.\n x = int(math.ceil((start.item(0) / x_spacing) - 0.5)) # startingx\n y = int(math.ceil((start.item(1) / y_spacing) - 0.5)) # startingy\n goalX = int(math.ceil((goal.item(0) / x_spacing) - 0.5))\n goalY = int(math.ceil((goal.item(1) / y_spacing) - 0.5))\n # print \"Start Pose: \", x, y\n # print \"Goal Pose: \", goalX, goalY\n\n # Make a map to keep track of all the nodes and their cost distance values.\n possible_nodes = [[0 for row in range(len(occ_map[0]))] for col in range(len(occ_map[1]))]\n row = y\n col = x\n\n possible_nodes[row][col] = 1 #This means the starting node has been searched.\n # print \"Possible Nodes: \"\n # pprint.pprint(possible_nodes)\n\n # The g_value will count the number of steps each node is from the start.\n # Since we are at the start node, the total cost is 0.\n g_value = 0\n frontier_nodes = [(g_value, col, row)] # dist, x, y\n searched_nodes = []\n parent_node = {} # Dictionary that Maps {child node : parent node}\n loopcount = 0\n\n while len(frontier_nodes) != 0:\n # print \"\\n>>>>>>>>>>>>LOOP COUNT: \", loopcount, \"\\n\"\n frontier_nodes.sort(reverse=True) #sort from shortest distance to farthest\n current_node = frontier_nodes.pop()\n # print \"current_node: \", current_node\n heapq.heappush(searched_nodes, current_node)\n # print \"frontier nodes: \", searched_nodes\n if current_node[1] == goalX and current_node[2] == goalY:\n # print \"Goal found!\"\n # print \"NEAREST NODE: \", current_node\n # print \"searched_nodes: \\n\", searched_nodes\n # print \"\\n\"\n # print sorted(searched_nodes, key = itemgetter(0))\n break\n g_value, col, row = current_node\n # print \"current g, col, row:\", g_value, col, row\n\n # Check surrounding neighbors.\n for i in delta:\n possible_expansion_x = col + i[0]\n possible_expansion_y = row + i[1]\n valid_expansion = 0 <= possible_expansion_x < len(occupancy_map[0]) and 0 <= possible_expansion_y < len(occ_map)\n # print \"Current expansion Node: \", possible_expansion_x, possible_expansion_y\n\n if valid_expansion:\n try:\n unsearched_node = possible_nodes[possible_expansion_x][possible_expansion_y] == 0\n open_node = occ_map[possible_expansion_x][possible_expansion_y] == 0\n except:\n unsearched_node = False\n if unsearched_node and open_node:\n possible_nodes[possible_expansion_x][possible_expansion_y] = 1\n possible_node = (g_value + cost, possible_expansion_x, possible_expansion_y)\n frontier_nodes.append(possible_node)\n # print \"frontier_nodes:\", frontier_nodes\n # This now builds parent/child relationship\n parent_node[possible_node] = current_node\n # print \"Parent Node: \\n\", parent_node\n # print \"While Possible Nodes: \"\n # pprint.pprint(possible_nodes)\n loopcount = loopcount+1\n\n # print \"Generating path...\"\n\n route = []\n child_node = current_node\n while parent_node.has_key(child_node):\n route.append(parent_node[child_node])\n child_node = parent_node[child_node]\n route.sort()\n\n # Convert route back to metric units:\n\n path = []\n position = [start.item(0), start.item(1)] #starting point passed in by function\n path.append(position) #add it to the list for the path\n\n for i in range(0, len(route)):\n position = [(route[i][1]+0.5)*x_spacing, (route[i][2]+0.5)*y_spacing ]\n path.append(position)\n\n # Add the goal state:\n\n position = [goal.item(0), goal.item(1)]\n path.append(position)\n\n # print \"Pathh: \"\n # pprint.pprint(path)\n path = np.array(path)\n return path\n\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
soujyo/punctuation-restoration
[ "cfbc3f76cba3c7f4a1597166deba267c713424a5" ]
[ "src/focal_loss.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FocalLoss(nn.Module):\n def __init__(self, weight=None, gamma=2, reduction='mean'):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.reduction = reduction\n self.weight = weight\n\n def forward(self, input, target):\n ce_loss = F.cross_entropy(input, target, reduction=self.reduction, weight=self.weight)\n pt = torch.exp(-ce_loss)\n focal_loss = ((1 - pt) ** self.gamma * ce_loss).mean()\n return focal_loss" ]
[ [ "torch.exp", "torch.nn.functional.cross_entropy" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
GeorgRamer/spectroscopy_data
[ "f8eab01bcfa5c1ec8b47eec91954775dfe81d274" ]
[ "spectroscopy_data/utils.py" ]
[ "import numpy as np\n\n\ndef gaussian_band(wn, A, s, m):\n return A/s*np.sqrt(2*np.pi)*np.exp(-(wn-m)**2/2/s**2)\n \ndef lorentzian_band(wn, A, w, m):\n return A /(1 + (wn - m)**2/w**2)/(w*np.pi)\n\n\ndef band(wn, band_params):\n if band_params[0] == \"gauss\":\n return gaussian_band(wn, *band_params[1:])\n elif band_params[0] == \"lorentz\":\n return lorentzian_band(wn, *band_params[1:])\n else:\n raise ArgumentError('Unknown band {}'.format(band_params[0]))\n\ndef spectrum(wn, band_params, noise_level=0):\n spec = np.zeros_like(wn)\n for band_param in band_params:\n spec = spec + band(wn, band_param)\n if noise_level > 0:\n spec = spec + noise_level * np.random.randn(*spec.shape)\n return spec\n" ]
[ [ "numpy.random.randn", "numpy.exp", "numpy.zeros_like", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ulises1229/Python-Raytracer
[ "ad89b9dabda1c3eeb68af2d3578c3f38dee9f5b9" ]
[ "sightpy/utils/colour_functions.py" ]
[ "import numpy as np\r\n\r\ndef sRGB_linear_to_sRGB(rgb_linear):\r\n\r\n '''sRGB standard for gamma inverse correction.'''\r\n rgb = np.where( rgb_linear <= 0.00304, 12.92 * rgb_linear, 1.055 * np.power(rgb_linear, 1.0/2.4) - 0.055)\r\n \r\n # clip intensity if needed (rgb values > 1.0) by scaling\r\n rgb_max = np.amax(rgb, axis=0) + 0.00001 # avoid division by zero\r\n intensity_cutoff = 1.0\r\n rgb = np.where(rgb_max > intensity_cutoff, rgb * intensity_cutoff / (rgb_max), rgb)\r\n \r\n return rgb\r\n\r\n\r\ndef sRGB_to_sRGB_linear(rgb):\r\n\r\n '''sRGB standard for gamma inverse correction.''' \r\n rgb_linear = np.where( rgb <= 0.03928, rgb / 12.92, np.power((rgb + 0.055) / 1.055, 2.4))\r\n\r\n return rgb_linear\r\n\r\n" ]
[ [ "numpy.amax", "numpy.where", "numpy.power" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isaaccorley/torchrs
[ "bb27a29e344741f8dda504ac296af6d03a20e7df" ]
[ "torchrs/datasets/tiselac.py" ]
[ "import os\nfrom typing import Tuple\n\nimport torch\nimport numpy as np\nfrom einops import rearrange\n\nfrom torchrs.transforms import Compose, ToTensor\n\n\nclass Tiselac(torch.utils.data.Dataset):\n \"\"\" TiSeLac dataset from the Time Series Land Cover Classification Challenge (2017)\n https://sites.google.com/site/dinoienco/tiselac-time-series-land-cover-classification-challenge\n\n 'A MSTC Land Cover classification problem for data taken from the Reunion island.\n A case is a pixel. Measurements are taken over 23 time points (days), with\n 10 dimensions: 7 surface reflectances (Ultra Blue, Blue, Green, Red, NIR, SWIR1 and SWIR2)\n plus 3 indices (NDVI, NDWI and BI). Class values relate to one of 9 land cover types class values.'\n \"\"\"\n classes = [\n \"Urban Areas\",\n \"Other built-up surfaces\",\n \"Forests\",\n \"Sparse Vegetation\",\n \"Rocks and bare soil\",\n \"Grassland\",\n \"Sugarcane crops\",\n \"Other crops\",\n \"Water\"\n ]\n splits = [\"train\", \"test\"]\n\n def __init__(\n self,\n root: str = \".data/tiselac\",\n split: str = \"train\",\n transform: Compose = Compose([ToTensor(permute_dims=False)])\n ):\n assert split in self.splits\n self.root = root\n self.transform = transform\n self.series, self.labels = self.load_file(root, split)\n\n @staticmethod\n def load_file(path: str, split: str) -> Tuple[np.ndarray, np.ndarray]:\n x = np.loadtxt(os.path.join(path, f\"{split}.txt\"), dtype=np.int16, delimiter=\",\")\n y = np.loadtxt(os.path.join(path, f\"{split}_labels.txt\"), dtype=np.uint8)\n x = rearrange(x, \"n (t c) -> n t c\", c=10)\n return x, y\n\n def __len__(self) -> int:\n return len(self.series)\n\n def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:\n x, y = self.series[idx], self.labels[idx] - 1\n x, y = self.transform(x).squeeze(dim=0), torch.tensor(y).to(torch.long)\n return x, y\n" ]
[ [ "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
leondz/danlp
[ "6860d217effa30c8c1b7bdb0c53f72aaef301056" ]
[ "danlp/models/xlmr_models.py" ]
[ "from danlp.download import DEFAULT_CACHE_DIR, download_model, \\\n _unzip_process_func\n\nfrom allennlp.models.archival import load_archive\nfrom allennlp.common.util import import_module_and_submodules\nfrom allennlp.common.util import prepare_environment\nimport torch\nimport os, warnings\n\nimport_module_and_submodules(\"danlp.models.allennlp_models\")\nfrom danlp.models.allennlp_models.coref.predictors.coref import CorefPredictor\n\nfrom typing import List\n\nclass XLMRCoref():\n \"\"\"\n XLM-Roberta Coreference Resolution Model.\n\n For predicting which expressions (word or group of words) \n refer to the same entity in a document. \n\n :param str cache_dir: the directory for storing cached models\n :param bool verbose: `True` to increase verbosity\n \"\"\"\n def __init__(self, cache_dir=DEFAULT_CACHE_DIR, verbose=False):\n\n # download the model or load the model path\n model_path = download_model('xlmr.coref', cache_dir,\n process_func=_unzip_process_func,\n verbose=verbose)\n \n archive = load_archive(model_path)\n self.config = archive.config\n prepare_environment(self.config)\n self.model = archive.model\n self.dataset_reader = archive.validation_dataset_reader\n self.predictor = CorefPredictor(model=self.model, dataset_reader=self.dataset_reader)\n \n def predict(self, document: List[List[str]]):\n \"\"\"\n Predict coreferences in a document\n\n :param List[List[str]] document: segmented and tokenized text\n :return: a dictionary\n :rtype: Dict\n \"\"\"\n\n preds = self.predictor.predict_tokenized(document)\n\n return preds\n\n def predict_clusters(self, document: List[List[str]]):\n \"\"\"\n Predict clusters of entities in the document. \n Each predicted cluster contains a list of references.\n A reference is a tuple (ref text, start id, end id).\n The ids refer to the token ids in the entire document. \n \n :param List[List[str]] document: segmented and tokenized text\n :return: a list of clusters\n :rtype: List[List[Tuple]]\n \"\"\"\n \n preds = self.predict(document)\n tokens = [t for d in document for t in d]\n clusters = []\n\n for idx in preds['clusters']:\n cluster = []\n for ref_idx in idx:\n start_id = ref_idx[0]\n end_id = ref_idx[1]+1\n ref = tokens[start_id:end_id]\n cluster.append((ref, start_id, end_id))\n clusters.append(cluster)\n\n return clusters\n\n\n\nclass XlmrNed():\n \"\"\"\n XLM-Roberta for Named Entity Disambiguation.\n\n For predicting whether or not a specific entity (QID) is mentioned in a sentence.\n\n :param str cache_dir: the directory for storing cached models\n :param bool verbose: `True` to increase verbosity\n \"\"\"\n\n def __init__(self, cache_dir=DEFAULT_CACHE_DIR, verbose=False):\n from transformers import XLMRobertaTokenizer, XLMRobertaForSequenceClassification\n #download the model or load the model path\n model_path = download_model('xlmr.ned', cache_dir,\n process_func=_unzip_process_func,\n verbose=verbose)\n self.classes = ['0', '1']\n\n self.tokenizer = XLMRobertaTokenizer.from_pretrained(model_path)\n self.model = XLMRobertaForSequenceClassification.from_pretrained(model_path, num_labels=len(self.classes))\n\n self.max_length = self.model.roberta.embeddings.position_embeddings.num_embeddings - 2\n\n def _classes(self):\n return self.classes\n \n def _get_pred(self, sentence, kg_context):\n input1 = self.tokenizer.encode_plus(sentence, kg_context, add_special_tokens=True, return_tensors='pt',\n max_length=self.max_length, truncation='only_second', return_overflowing_tokens=True)\n if 'overflowing_tokens' in input1 and input1['overflowing_tokens'].shape[1]>0:\n warnings.warn('Maximum length for sequence exceeded, truncation may result in unexpected results. Consider running the model on a shorter sequence than {} tokens'.format(self.max_length))\n pred = self.model(input1['input_ids'])[0]\n\n return pred\n \n def predict(self, sentence: str, kg_context: str):\n \"\"\"\n Predict whether a QID is mentioned in a sentence or not.\n\n :param str sentence: raw text\n :param str kg_context: raw text\n :return: \n :rtype: str\n \"\"\"\n\n pred = self._get_pred(sentence, kg_context)\n pred = pred.argmax().item()\n predclass = self.classes[pred]\n \n return predclass\n \n def predict_proba(self, sentence: str, kg_context: str):\n proba=[]\n \n pred=self._get_pred(sentence, kg_context)\n proba.append(torch.nn.functional.softmax(pred[0], dim=0).detach().numpy())\n\n return proba\n \n\n\ndef load_xlmr_coref_model(cache_dir=DEFAULT_CACHE_DIR, verbose=False):\n \"\"\"\n Loads an XLM-R coreference model.\n\n :param str cache_dir: the directory for storing cached models\n :param bool verbose: `True` to increase verbosity\n :return: an XLM-R coreference model\n \"\"\"\n return XLMRCoref(cache_dir, verbose)\n \n\ndef load_xlmr_ned_model(cache_dir=DEFAULT_CACHE_DIR, verbose=False):\n \"\"\"\n Loads an XLM-R model for named entity disambiguation.\n\n :param str cache_dir: the directory for storing cached models\n :param bool verbose: `True` to increase verbosity\n :return: an XLM-R NED model\n \"\"\"\n return XlmrNed(cache_dir, verbose)\n\n" ]
[ [ "torch.nn.functional.softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
isi-vista/rtg
[ "149415f424f2a6585cbe0d97f0007b8b0b53d164" ]
[ "rtg/exp.py" ]
[ "import copy\nimport os\nimport random\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom functools import partial\nfrom typing import Optional, Dict, List, Tuple, Union, Any\nimport time\nfrom collections import Counter\n\nimport numpy as np\nimport torch\nimport hashlib\nimport portalocker\nimport tqdm\n\nimport rtg\nfrom rtg import log, yaml, device\nfrom rtg.data.dataset import (TSVData, BatchIterable, LoopingIterable, SqliteFile, GenerativeBatchIterable)\nfrom rtg.data.codec import Field, SPField, NLField, PretrainMatchField\nfrom rtg.utils import IO, line_count\nfrom rtg.registry import CRITERION, OPTIMIZER, SCHEDULE, MODEL\nfrom rtg.schema import config_checks\nfrom rtg.distrib import dtorch\n\n\nseeded = False\n\n\ndef load_conf(inp: Union[str, Path]):\n with IO.reader(inp) as fh:\n return yaml.load(fh)\n\n\nclass BaseExperiment:\n\n def __init__(self, work_dir: Union[str, Path], read_only=False,\n config: Union[str, Path, Optional[Dict[str, Any]]] = None):\n if type(work_dir) is str:\n work_dir = Path(work_dir)\n\n log.info(f\"Initializing an experiment. Directory = {work_dir}\")\n self.read_only = read_only\n self.work_dir = work_dir\n self.log_dir = work_dir / 'logs'\n self.log_file = self.log_dir / 'rtg.log'\n self.data_dir = work_dir / 'data'\n self.model_dir = work_dir / 'models'\n self._config_file = work_dir / 'conf.yml'\n if isinstance(config, str) or isinstance(config, Path):\n config = load_conf(config)\n self.config = config if config else load_conf(self._config_file)\n config_checks(self.config)\n self.codec_name = self.config.get('prep', {}).get('codec_lib', 'sentpiece') # with default\n codec_libs = {'sentpiece': SPField,\n 'nlcodec': NLField,\n 'pretrainmatch': PretrainMatchField}\n self.codec_supports_multiproc = self.codec_name in {'nlcodec'}\n assert self.codec_name in codec_libs, f'{self.codec_name} is not in {codec_libs.keys()}'\n log.info(f\"codec lib = {self.codec_name}\")\n self.Field = codec_libs[self.codec_name]\n\n self._shared_field_file = self.data_dir / f'{self.codec_name}.shared.model'\n self._prepared_flag = self.work_dir / '_PREPARED'\n self._trained_flag = self.work_dir / '_TRAINED'\n\n self.train_file = self.data_dir / 'train.tsv.gz'\n self.train_db = self.data_dir / 'train.db'\n self.train_db_tmp = self.data_dir / 'train.db.tmp'\n self.finetune_file = self.data_dir / 'finetune.db'\n self.valid_file = self.data_dir / 'valid.tsv.gz'\n self.combo_file = self.data_dir / 'combo.tsv.gz'\n # a set of samples to watch the progress qualitatively\n self.samples_file = self.data_dir / 'samples.tsv.gz'\n\n if not read_only:\n for _dir in [self.model_dir, self.data_dir, self.log_dir]:\n if not _dir.exists():\n _dir.mkdir(parents=True)\n\n assert self.config, 'Looks like the config is emtpy or invalid'\n self.maybe_seed()\n\n self.shared_field = self.Field(str(self._shared_field_file)) \\\n if self._shared_field_file.exists() else None\n\n self.last_state_file = self.model_dir / 'last_state.pt'\n self.parent_model_state = self.data_dir / 'parent_model_state.pt'\n\n @property\n def problem_type(self):\n raise NotImplementedError\n\n def maybe_seed(self):\n global seeded\n if not seeded and 'seed' in self.config:\n seed = self.config['seed']\n log.info(f\"Manual seeding the RNG with {seed}\")\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n seeded = True\n else:\n log.info(\"No manual seed! Letting the RNGs do their stuff\")\n\n def store_config(self):\n yaml.dump(self.config, stream=self._config_file)\n\n @property\n def model_type(self) -> Optional[str]:\n return self.config.get('model_type')\n\n @model_type.setter\n def model_type(self, mod_type: str):\n self.config['model_type'] = mod_type\n\n def has_prepared(self):\n return self._prepared_flag.exists()\n\n def has_trained(self):\n return self._trained_flag.exists()\n\n def store_model(self, optimizer_step: int, model, train_score: float, val_score: float, keep: int,\n prefix='model', keeper_sort='step'):\n \"\"\"\n saves model to a given path\n :param optimizer_step: optimizer step of the model\n :param model: model object itself\n :param train_score: score of model on training split\n :param val_score: score of model on validation split\n :param keep: number of good models to keep, bad models will be deleted\n :param prefix: prefix to store model. default is \"model\"\n :param keeper_sort: criteria for choosing the old or bad models for deletion.\n Choices: {'total_score', 'step'}\n :return:\n \"\"\"\n # TODO: improve this by skipping the model save if the model is not good enough to be saved\n if self.read_only:\n log.warning(\"Ignoring the store request; experiment is readonly\")\n return\n name = f'{prefix}_{optimizer_step:03d}_{train_score:.6f}_{val_score:.6f}.pkl'\n path = self.model_dir / name\n log.info(f\"Saving optimizer step {optimizer_step} to {path}\")\n torch.save(model, str(path))\n\n del_models = []\n if keeper_sort == 'total_score':\n del_models = self.list_models(sort='total_score', desc=False)[keep:]\n elif keeper_sort == 'step':\n del_models = self.list_models(sort='step', desc=True)[keep:]\n else:\n Exception(f'Sort criteria{keeper_sort} not understood')\n for d_model in del_models:\n log.info(f\"Deleting model {d_model} . Keep={keep}, sort={keeper_sort}\")\n os.remove(str(d_model))\n\n with IO.writer(os.path.join(self.model_dir, 'scores.tsv'), append=True) as f:\n cols = [str(optimizer_step), datetime.now().isoformat(), name, f'{train_score:g}',\n f'{val_score:g}']\n f.write('\\t'.join(cols) + '\\n')\n\n if self.last_state_file.exists():\n self.last_state_file.unlink()\n self.last_state_file.symlink_to(name) # in the same dir\n\n @staticmethod\n def _path_to_validn_score(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n valid_score = float(parts[-1])\n return valid_score\n\n @staticmethod\n def _path_to_total_score(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n tot_score = float(parts[-2]) + float(parts[-1])\n return tot_score\n\n @staticmethod\n def _path_to_step_no(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n step_no = int(parts[-3])\n return step_no\n\n def list_models(self, sort: str = 'step', desc: bool = True) -> List[Path]:\n \"\"\"\n Lists models in descending order of modification time\n :param sort: how to sort models ?\n - valid_score: sort based on score on validation set\n - total_score: sort based on validation_score + training_score\n - mtime: sort by modification time\n - step (default): sort by step number\n :param desc: True to sort in reverse (default); False to sort in ascending\n :return: list of model paths\n \"\"\"\n paths = list(self.model_dir.glob('model_*.pkl'))\n if not paths:\n paths = list(self.model_dir.glob('embeddings_*.gz'))\n sorters = {\n 'valid_score': self._path_to_validn_score,\n 'total_score': self._path_to_total_score,\n 'mtime': lambda p: p.stat().st_mtime,\n 'step': self._path_to_step_no\n }\n if sort not in sorters:\n raise Exception(f'Sort {sort} not supported. valid options: {sorters.keys()}')\n return sorted(paths, key=sorters[sort], reverse=desc)\n\n def _get_first_model(self, sort: str, desc: bool) -> Tuple[Optional[Path], int]:\n \"\"\"\n Gets the first model that matches the given sort criteria\n :param sort: sort mechanism\n :param desc: True for descending, False for ascending\n :return: Tuple[Optional[Path], step_num:int]\n \"\"\"\n models = self.list_models(sort=sort, desc=desc)\n if models:\n name = models[0].name.replace('.pkl', '').replace('.txt.gz', '')\n step, train_score, valid_score = name.split('_')[-3:]\n return models[0], int(step)\n else:\n return None, 0\n\n def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n \"\"\"Gets best Known model (best on lowest scores on training and validation sets)\n \"\"\"\n return self._get_first_model(sort='total_score', desc=False)\n\n def get_last_saved_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='step', desc=True)\n\n @property\n def model_args(self) -> Optional[Dict]:\n \"\"\"\n Gets args from file\n :return: args if exists or None otherwise\n \"\"\"\n return self.config.get('model_args')\n\n @model_args.setter\n def model_args(self, model_args):\n \"\"\"\n set model args\n \"\"\"\n self.config['model_args'] = model_args\n\n @property\n def shared_vocab(self) -> Field:\n return self.shared_field\n\n @staticmethod\n def get_first_found_file(paths: List[Path]):\n \"\"\"returns the first file that is not None, and actually exists on disc;\n If no file is valid, it returns None\"\"\"\n for p in paths:\n if p and p.exists():\n return p\n return None\n\n def pre_process(self, args=None, force=False):\n if 'parent' in self.config and not self.parent_model_state.exists():\n self.inherit_parent()\n if self.has_prepared() and not force:\n log.warning(\"Already prepared\")\n return\n args = args if args else self.config['prep']\n\n if 'same_data' in args:\n data = Path(args['same_data']) / 'data'\n assert data.exists()\n log.info(f\"Reusing prepared data dir from {data}\")\n if self.data_dir.exists():\n if self.data_dir.is_symlink():\n self.data_dir.unlink()\n else:\n self.data_dir.rename('data.bak')\n self.data_dir.symlink_to(data.resolve(), target_is_directory=True)\n self.reload()\n self._prepared_flag.touch()\n\n def inherit_parent(self):\n raise NotImplemented()\n\n def train(self, args=None):\n raise NotImplementedError()\n\n def reload(self):\n exp = type(self)(self.work_dir, read_only=self.read_only)\n self.__dict__ = exp.__dict__\n\n @classmethod\n def _checkpt_to_model_state(cls, checkpt_path: Union[str, Path]):\n state = torch.load(checkpt_path, map_location=device)\n if 'model_state' in state:\n state = state['model_state']\n return state\n\n @classmethod\n def average_states(cls, model_paths: List[Path]):\n assert model_paths, 'at least one model checkpoint should be given. Check your directory'\n for i, mp in enumerate(model_paths):\n next_state = cls._checkpt_to_model_state(mp)\n if i < 1:\n state_dict = next_state\n key_set = set(state_dict.keys())\n else:\n # noinspection PyUnboundLocalVariable\n assert key_set == set(next_state.keys())\n for key in key_set: # Running average\n state_dict[key] = (i*state_dict[key] + next_state[key]) / (i + 1)\n return state_dict\n\n def maybe_ensemble_state(self, model_paths: Optional[List[str]], ensemble: int = 1):\n if model_paths and len(model_paths) == 1:\n log.info(f\" Restoring state from requested model {model_paths[0]}\")\n return self._checkpt_to_model_state(model_paths[0])\n elif not model_paths and ensemble <= 1:\n model_path, _ = self.get_best_known_model()\n log.info(f\" Restoring state from best known model: {model_path}\")\n return self._checkpt_to_model_state(model_path)\n else:\n if not model_paths:\n # Average last n models\n model_paths = self.list_models(sort='step', desc=True)[:ensemble]\n digest = hashlib.md5(\";\".join(str(p) for p in model_paths).encode('utf-8')).hexdigest()\n cache_file = self.model_dir / f'avg_state{len(model_paths)}_{digest}.pkl'\n lock_file = cache_file.with_suffix('.lock')\n MAX_TIMEOUT = 1 * 60 * 60 # 1 hour\n with portalocker.Lock(lock_file, 'w', timeout=MAX_TIMEOUT) as fh:\n # check if downloaded by other parallel process\n if lock_file.exists() and cache_file.exists():\n log.info(f\"Cache exists: reading from {cache_file}\")\n state = self._checkpt_to_model_state(cache_file)\n else:\n log.info(f\"Averaging {len(model_paths)} model states :: {model_paths}\")\n state = self.average_states(model_paths)\n if len(model_paths) > 1:\n log.info(f\"Caching the averaged state at {cache_file}\")\n torch.save(state, str(cache_file))\n return state\n \n def load_model(self, model_paths=None, ensemble=1):\n from .registry import MODELS\n model = MODELS[self.model_type].Model(exp=self, **self.model_args)[0]\n state = self.maybe_ensemble_state(model_paths=model_paths, ensemble=ensemble)\n errors = model.load_state_dict(state)\n log.info(f\"{errors}\")\n return model\n\n def load_model_with_state(self, checkpt_state):\n from .registry import MODELS\n chkpt = checkpt_state\n state = chkpt['model_state']\n model_type = chkpt['model_type']\n model_args = chkpt['model_args']\n model = MODELS[model_type].Model(exp=self, **model_args)[0]\n errors = model.load_state_dict(state)\n log.info(f\"{errors}\")\n log.info(f\"Successfully restored the model state of : {model_type}\")\n return model\n\n def get_conf_component(self, kind, extra_args=None):\n \"\"\" Creates a component such as schedule, criterion, optimizer based on config\"\"\"\n from rtg.registry import registry\n assert kind in registry, f'component {kind} is unknown; valid: {registry.keys()}'\n if not kind in self.config:\n log.warning(f\"{kind} not found in config; skipping\")\n return None\n name, args = self.config[kind]['name'], self.config[kind].get('args') or {}\n assert name in registry[kind], f'{kind}={name} is invalid; valid: {registry[kind].keys()}'\n factory = registry[kind][name]\n extra_args = extra_args or {}\n log.info(f\"creating {kind} {name} with args {args}\")\n return factory(**extra_args, **args)\n\n def get_criterion(self, extra_args=None):\n return self.get_conf_component(CRITERION, extra_args=extra_args)\n\n def get_schedule(self):\n return self.get_conf_component(SCHEDULE)\n\n def get_optimizer(self, params):\n return self.get_conf_component(OPTIMIZER, extra_args=dict(params=params))\n\n\nclass TranslationExperiment(BaseExperiment):\n\n def __init__(self, work_dir: Union[str, Path], read_only=False,\n config: Union[str, Path, Optional[Dict[str, Any]]] = None):\n super().__init__(work_dir, read_only=read_only, config=config)\n self._src_field_file = self.data_dir / f'{self.codec_name}.src.model'\n self._tgt_field_file = self.data_dir / f'{self.codec_name}.tgt.model'\n\n self.emb_src_file = self.data_dir / 'emb_src.pt'\n self.emb_tgt_file = self.data_dir / 'emb_tgt.pt'\n self.ext_emb_src_file = self.data_dir / 'ext_emb_src.pt' # external Embeddings\n self.ext_emb_tgt_file = self.data_dir / 'ext_emb_tgt.pt' # external Embeddings\n self.src_field = None\n self.tgt_field = None\n self.reload_vocabs()\n\n # Either shared field OR individual src and tgt fields\n assert not (self.shared_field and self.src_field)\n assert not (self.shared_field and self.tgt_field)\n # both are set or both are unset\n #assert (self.src_field is None) == (self.tgt_field is None)\n\n self._unsupervised = self.model_type in {'binmt', 'rnnlm', 'tfmlm'}\n if self._unsupervised:\n self.mono_train_src = self.data_dir / 'mono.train.src.gz'\n self.mono_train_tgt = self.data_dir / 'mono.train.tgt.gz'\n self.mono_valid_src = self.data_dir / 'mono.valid.src.gz'\n self.mono_valid_tgt = self.data_dir / 'mono.valid.tgt.gz'\n\n\n\n @property\n def problem_type(self):\n from rtg.registry import ProblemType\n return ProblemType.TRANSLATION\n\n def reload_vocabs(self):\n self.src_field, self.tgt_field, self.shared_field = [\n self.Field(str(f)) if f.exists() else None for f in (\n self._src_field_file, self._tgt_field_file, self._shared_field_file)]\n\n def check_line_count(self, name, file1, file2):\n count1 = line_count(file1)\n count2 = line_count(file2)\n if count1 == count2:\n log.info(f\"Found {count1:,} parallel lines for {name}\")\n else:\n log.error(f\"Found line mismatch in {name} \")\n raise Exception(f'{file1} has {count1:,} lines but {file2} has {count2:,} lines')\n\n def pre_process_parallel(self, args: Dict[str, Any]):\n # check if files are parallel\n self.check_line_count('validation', args['valid_src'], args['valid_tgt'])\n if 'spark' in self.config:\n log.warning(f\"Spark backend detected: line count on training data is skipped\")\n else:\n log.warning(f\"Going to count lines. If this is a big dataset, it will take long time\")\n self.check_line_count('training', args['train_src'], args['train_tgt'])\n\n xt_args = dict(no_split_toks=args.get('no_split_toks'),\n char_coverage=args.get('char_coverage', 0))\n min_co_ev = args.get('min_co_ev', None)\n pieces = args['pieces']\n if args.get('shared_vocab'): # shared vocab\n assert 'max_types' in args, f'prep.max_types is required when prep.shared_vocab=true'\n max_types = args['max_types']\n corpus = [args[key] for key in ['train_src', 'train_tgt', 'mono_src', 'mono_tgt']\n if args.get(key)]\n assert isinstance(pieces, str), f'shared vocab cant support different pieces for src, tgt;' \\\n f' given pieces={pieces}. Either set shared_vocab=false or pieces=<a string>'\n self.shared_field = self._make_vocab(\"shared\", self._shared_field_file, pieces, max_types,\n corpus=corpus, min_co_ev=min_co_ev, **xt_args)\n else: # separate vocabularies\n src_corpus = [args[key] for key in ['train_src', 'mono_src'] if args.get(key)]\n src_min_co_ev = args.get('src_min_co_ev', min_co_ev)\n tgt_min_co_ev = args.get('tgt_min_co_ev', min_co_ev)\n src_pieces = tgt_pieces = pieces\n if not isinstance(pieces, str):\n assert len(pieces) == 2\n src_pieces, tgt_pieces = pieces\n log.info(f\"Vocab types: src: {src_pieces} and tgt: {tgt_pieces}\")\n\n max_src_types = args.get('max_src_types', args.get('max_types'))\n max_tgt_types = args.get('max_tgt_types', args.get('max_types'))\n assert max_src_types and max_tgt_types, 'prep.{max_src_types,max_tgt_types} are required' \\\n ' when prep.shared_vocab=false'\n self.src_field = self._make_vocab(\"src\", self._src_field_file, src_pieces, max_src_types, corpus=src_corpus,\n min_co_ev=src_min_co_ev, **xt_args)\n # target vocabulary\n tgt_corpus = [args[key] for key in ['train_tgt', 'mono_tgt'] if args.get(key)]\n self.tgt_field = self._make_vocab(\"src\", self._tgt_field_file, tgt_pieces, max_tgt_types, corpus=tgt_corpus,\n min_co_ev=tgt_min_co_ev, **xt_args)\n train_file = self.train_db\n self._pre_process_parallel('train_src', 'train_tgt', out_file=train_file, args=args,\n line_check=False)\n self._pre_process_parallel('valid_src', 'valid_tgt', out_file=self.valid_file, args=args,\n line_check=False)\n\n if args.get(\"finetune_src\") or args.get(\"finetune_tgt\"):\n self._pre_process_parallel('finetune_src', 'finetune_tgt', self.finetune_file)\n\n # get samples from validation set\n n_samples = args.get('num_samples', 5)\n space_tokr = lambda line: line.strip().split()\n val_raw_recs = TSVData.read_raw_parallel_recs(\n args['valid_src'], args['valid_tgt'], args['truncate'], args['src_len'],\n args['tgt_len'], src_tokenizer=space_tokr, tgt_tokenizer=space_tokr)\n val_raw_recs = list(val_raw_recs)\n random.shuffle(val_raw_recs)\n samples = val_raw_recs[:n_samples]\n TSVData.write_parallel_recs(samples, self.samples_file)\n\n def _make_vocab(self, name: str, vocab_file: Path, model_type: str, vocab_size: int,\n corpus: List, no_split_toks: List[str] = None, char_coverage=0,\n min_co_ev=None) -> Field:\n \"\"\"\n Construct vocabulary file\n :param name: name : src, tgt or shared -- for the sake of logging\n :param vocab_file: where to save the vocab file\n :param model_type: sentence piece model type\n :param vocab_size: max types in vocab\n :param corpus: as the name says, list of files from which the vocab should be learned\n :param no_split_toks: tokens that needs to be preserved from splitting, or added\n :return:\n \"\"\"\n if vocab_file.exists():\n log.info(f\"{vocab_file} exists. Skipping the {name} vocab creation\")\n return self.Field(str(vocab_file))\n flat_uniq_corpus = set() # remove dupes, flat the nested list or sets\n for i in corpus:\n if isinstance(i, set) or isinstance(i, list):\n flat_uniq_corpus.update(i)\n else:\n flat_uniq_corpus.add(i)\n\n flat_uniq_corpus = list(flat_uniq_corpus)\n log.info(f\"Going to build {name} vocab from files\")\n xt_args = {}\n if min_co_ev:\n xt_args[\"min_co_ev\"] = min_co_ev\n return self.Field.train(model_type, vocab_size, str(vocab_file), flat_uniq_corpus,\n no_split_toks=no_split_toks, char_coverage=char_coverage, **xt_args)\n\n def pre_process_mono(self, args):\n xt_args = dict(no_split_toks=args.get('no_split_toks'),\n char_coverage=args.get('char_coverage', 0))\n\n mono_files = [args[key] for key in ['mono_train_src', 'mono_train_tgt'] if key in args]\n assert mono_files, \"At least one of 'mono_train_src', 'mono_train_tgt' should be set\"\n log.info(f\"Found mono files: {mono_files}\")\n if args.get('shared_vocab'):\n self.shared_field = self._make_vocab(\"shared\", self._shared_field_file, args['pieces'],\n args['max_types'], corpus=mono_files, **xt_args)\n else: # separate vocabularies\n if 'mono_train_src' in args:\n self.src_field = self._make_vocab(\"src\", self._src_field_file,\n args['pieces'], args['max_src_types'],\n corpus=[args['mono_train_src']], **xt_args)\n else:\n log.warning(\"Skipping source vocab creation since mono_train_src is not given\")\n\n # target vocabulary\n if 'mono_train_tgt' in args:\n self.tgt_field = self._make_vocab(\"src\", self._tgt_field_file,\n args['pieces'], args['max_tgt_types'],\n corpus=[args['mono_train_tgt']], **xt_args)\n else:\n log.warning(\"Skipping target vocab creation since mono_train_tgt is not given\")\n\n def _prep_file(file_key, out_file, do_truncate, max_len, field: Field):\n if file_key not in args:\n log.warning(f'Skipped: {file_key} because it is not found in config')\n return\n\n raw_file = args[file_key]\n\n recs = TSVData.read_raw_mono_recs(raw_file, do_truncate, max_len, field.encode_as_ids)\n # TODO: use SQLite storage\n TSVData.write_mono_recs(recs, out_file)\n if args.get('text_files'):\n recs = TSVData.read_raw_mono_recs(raw_file, do_truncate, max_len, field.tokenize)\n TSVData.write_mono_recs(recs, str(out_file).replace('.tsv', '.pieces.tsv'))\n\n _prep_file('mono_train_src', self.mono_train_src, args['truncate'], args['src_len'],\n self.src_vocab)\n _prep_file('mono_train_tgt', self.mono_train_tgt, args['truncate'], args['tgt_len'],\n self.tgt_vocab)\n\n _prep_file('mono_valid_src', self.mono_valid_src, args['truncate'], args['src_len'],\n self.src_vocab)\n _prep_file('mono_valid_tgt', self.mono_valid_tgt, args['truncate'], args['tgt_len'],\n self.tgt_vocab)\n\n def _pre_process_parallel(self, src_key: str, tgt_key: str, out_file: Path,\n args: Optional[Dict[str, Any]] = None, line_check=True,\n split_ratio: float = 0.):\n \"\"\"\n Pre process records of a parallel corpus\n :param args: all arguments for 'prep' task\n :param src_key: key that contains source sequences\n :param tgt_key: key that contains target sequences\n :param out_file: path to store processed TSV data (compresses if name ends with .gz)\n :return:\n \"\"\"\n args = args if args else self.config['prep']\n log.info(f\"Going to prep files {src_key} and {tgt_key}\")\n assert src_key in args, f'{src_key} not found in experiment config or args'\n assert tgt_key in args, f'{tgt_key} not found in experiment config or args'\n if line_check:\n assert line_count(args[src_key]) == line_count(args[tgt_key]), \\\n f'{args[src_key]} and {args[tgt_key]} must have same number of lines'\n # create Piece IDs\n s_time = time.time()\n reader_func = TSVData.read_raw_parallel_recs\n parallel_recs = reader_func(\n args[src_key], args[tgt_key], args['truncate'], args['src_len'], args['tgt_len'],\n src_tokenizer=partial(self.src_vocab.encode_as_ids, split_ratio=split_ratio),\n tgt_tokenizer=partial(self.tgt_vocab.encode_as_ids, split_ratio=split_ratio))\n if any([out_file.name.endswith(suf) for suf in ('.nldb', '.nldb.tmp')]):\n from nlcodec.db import MultipartDb\n MultipartDb.create(path=out_file, recs=parallel_recs, field_names=('x', 'y'))\n elif any([out_file.name.endswith(suf) for suf in ('.db', '.db.tmp')]):\n SqliteFile.write(out_file, records=parallel_recs)\n else:\n TSVData.write_parallel_recs(parallel_recs, out_file)\n e_time = time.time()\n log.info(f\"Time taken to process: {timedelta(seconds=(e_time - s_time))}\")\n if args.get('text_files'):\n # Redo again as plain text files\n parallel_recs = reader_func(\n args[src_key], args[tgt_key], args['truncate'], args['src_len'], args['tgt_len'],\n src_tokenizer=self.src_vocab.tokenize, tgt_tokenizer=self.tgt_vocab.tokenize)\n\n text_file_name = str(out_file).replace('.db', '.tsv.gz').replace('.tsv', '.pieces.tsv')\n TSVData.write_parallel_recs(parallel_recs, text_file_name)\n\n def maybe_pre_process_embeds(self, do_clean=False):\n\n def _read_vocab(path: Path) -> List[str]:\n with IO.reader(path) as rdr:\n vocab = [line.strip().split()[0] for line in rdr]\n if do_clean:\n # sentence piece starts with '▁' character\n vocab = [word[1:] if word[0] == '▁' else word for word in vocab]\n return vocab\n\n def _map_and_store(inp: Path, vocab_file: Path):\n id_to_str = _read_vocab(vocab_file)\n str_to_id = {tok: idx for idx, tok in enumerate(id_to_str)}\n assert len(id_to_str) == len(id_to_str)\n vocab_size = len(id_to_str)\n\n matched_set, ignored_set, duplicate_set = set(), set(), set()\n\n with inp.open(encoding='utf-8') as in_fh:\n header = in_fh.readline()\n parts = header.strip().split()\n if len(parts) == 2:\n tot, dim = int(parts[0]), int(parts[1])\n matrix = torch.zeros(vocab_size, dim)\n else:\n assert len(parts) > 2\n word, vec = parts[0], [float(x) for x in parts[1:]]\n dim = len(vec)\n matrix = torch.zeros(vocab_size, dim)\n if word in str_to_id:\n matrix[str_to_id[word]] = torch.tensor(vec, dtype=torch.float)\n matched_set.add(word)\n else:\n ignored_set.add(word)\n\n for line in in_fh:\n parts = line.strip().split()\n word = parts[0]\n if word in str_to_id:\n if word in matched_set:\n duplicate_set.add(word)\n # Note: this overwrites duplicate words\n vec = [float(x) for x in parts[1:]]\n matrix[str_to_id[word]] = torch.tensor(vec, dtype=torch.float)\n matched_set.add(word)\n else:\n ignored_set.add(word)\n pre_trained = matched_set | ignored_set\n vocab_set = set(id_to_str)\n oovs = vocab_set - matched_set\n stats = {\n 'pre_trained': len(pre_trained),\n 'vocab': len(vocab_set),\n 'matched': len(matched_set),\n 'ignored': len(ignored_set),\n 'oov': len(oovs)\n }\n stats.update({\n 'oov_rate': stats['oov'] / stats['vocab'],\n 'match_rate': stats['matched'] / stats['vocab'],\n 'useless_rate': stats['ignored'] / stats['pre_trained'],\n 'useful_rate': stats['matched'] / stats['pre_trained']\n })\n return matrix, stats\n\n def _write_emb_matrix(matrix, path: str):\n torch.save(matrix, path)\n\n def _write_dict(dict, path: Path):\n with IO.writer(path) as out:\n for key, val in dict.items():\n out.write(f\"{key}\\t{val}\\n\")\n\n args = self.config['prep']\n mapping = {\n 'pre_emb_src': self.emb_src_file,\n 'pre_emb_tgt': self.emb_tgt_file,\n 'ext_emb_src': self.ext_emb_src_file,\n 'ext_emb_tgt': self.ext_emb_tgt_file,\n }\n if not any(x in args for x in mapping):\n log.info(\"No pre trained embeddings are found in config; skipping it\")\n return\n\n for key, outp in mapping.items():\n if key in args:\n inp = Path(args[key])\n assert inp.exists()\n voc_file = self.data_dir / f'sentpiece.shared.vocab'\n if not voc_file.exists():\n field_name = key.split('_')[-1] # emb_src --> src ; emb_tgt --> tgt\n voc_file = self.data_dir / f'sentpiece.{field_name}.vocab'\n assert voc_file.exists()\n\n log.info(f\"Processing {key}: {inp}\")\n emb_matrix, report = _map_and_store(inp, voc_file)\n _write_dict(report, Path(str(outp) + '.report.txt'))\n _write_emb_matrix(emb_matrix, str(outp))\n\n def shrink_vocabs(self):\n assert self.codec_name == 'nlcodec', 'Only nlcodec supports shrinking of vocabs'\n args = self.config['prep']\n\n if self.shared_vocab:\n corpus = [args[key] for key in ['train_src', 'train_tgt', 'mono_src', 'mono_tgt']\n if args.get(key)]\n remap_src = self.shared_vocab.shrink_vocab(files=corpus, min_freq=1,\n save_at=self._shared_field_file)\n remap_tgt = remap_src\n else:\n corpus_src = [args[key] for key in ['train_src', 'mono_src'] if args.get(key)]\n remap_src = self.src_vocab.shrink_vocab(files=corpus_src, min_freq=1,\n save_at=self._src_field_file)\n corpus_tgt = [args[key] for key in ['train_tgt', 'mono_tgt'] if args.get(key)]\n remap_tgt = self.tgt_vocab.shrink_vocab(files=corpus_tgt, min_freq=1,\n save_at=self._tgt_field_file)\n self.reload_vocabs()\n self.model_args['src_vocab'] = len(self.src_vocab)\n self.model_args['tgt_vocab'] = len(self.tgt_vocab)\n return remap_src, remap_tgt\n\n def inherit_parent(self):\n parent = self.config['parent']\n parent_exp = type(self)(parent['experiment'], read_only=True)\n log.info(f\"Parent experiment: {parent_exp.work_dir}\")\n parent_exp.has_prepared()\n vocab_sepc = parent.get('vocab')\n if vocab_sepc:\n log.info(f\"Parent vocabs inheritance spec: {vocab_sepc}\")\n codec_lib = parent_exp.config['prep'].get('codec_lib')\n if codec_lib:\n self.config['prep']['codec_lib'] = codec_lib\n\n def _locate_field_file(exp: TranslationExperiment, name, check_exists=False) -> Path:\n switch = {'src': exp._src_field_file,\n 'tgt': exp._tgt_field_file,\n 'shared': exp._shared_field_file}\n assert name in switch, f'{name} not allowed; valid options= {switch.keys()}'\n file = switch[name]\n if check_exists:\n assert file.exists(), f'{file} doesnot exist; for {name} of {exp.work_dir}'\n return file\n\n for to_field, from_field in vocab_sepc.items():\n from_field_file = _locate_field_file(parent_exp, from_field, check_exists=True)\n to_field_file = _locate_field_file(self, to_field, check_exists=False)\n IO.copy_file(from_field_file, to_field_file)\n self.reload_vocabs()\n else:\n log.info(\"No vocabularies are inherited from parent\")\n model_sepc = parent.get('model')\n if model_sepc:\n log.info(\"Parent model inheritance spec\")\n if model_sepc.get('args'):\n self.model_args = parent_exp.model_args\n ensemble = model_sepc.get('ensemble', 1)\n model_paths = parent_exp.list_models(sort='step', desc=True)[:ensemble]\n log.info(f\"Averaging {len(model_paths)} checkpoints of parent model: \\n{model_paths}\")\n avg_state = self.average_states(model_paths=model_paths)\n log.info(f\"Saving parent model's state to {self.parent_model_state}\")\n torch.save(avg_state, self.parent_model_state)\n\n shrink_spec = parent.get('shrink')\n if shrink_spec:\n remap_src, remap_tgt = self.shrink_vocabs()\n def map_rows(mapping: List[int], source: torch.Tensor, name=''):\n assert max(mapping) < len(source)\n target = torch.zeros((len(mapping), *source.shape[1:]),\n dtype=source.dtype, device=source.device)\n for new_idx, old_idx in enumerate(mapping):\n target[new_idx] = source[old_idx]\n log.info(f\"Mapped {name} {source.shape} --> {target.shape} \")\n return target\n\n \"\"\" src_embed.0.lut.weight [N x d]\n tgt_embed.0.lut.weight [N x d]\n generator.proj.weight [N x d]\n generator.proj.bias [N] \"\"\"\n if remap_src:\n key = 'src_embed.0.lut.weight'\n avg_state[key] = map_rows(remap_src, avg_state[key], name=key)\n if remap_tgt:\n map_keys = ['tgt_embed.0.lut.weight', 'generator.proj.weight', 'generator.proj.bias']\n for key in map_keys:\n if key not in avg_state:\n log.warning(f'{key} not found in avg_state of parent model. Mapping skipped')\n continue\n avg_state[key] = map_rows(remap_tgt, avg_state[key], name=key)\n if self.parent_model_state.exists():\n self.parent_model_state.rename(self.parent_model_state.with_suffix('.orig'))\n torch.save(avg_state, self.parent_model_state)\n self.persist_state() # this will fix src_vocab and tgt_vocab of model_args conf\n\n def pre_process(self, args=None, force=False):\n args = args or self.config['prep']\n super(TranslationExperiment, self).pre_process(args)\n\n if self.has_prepared() and not force:\n log.warning(\"Already prepared\")\n return\n\n if self._unsupervised:\n self.pre_process_mono(args)\n else:\n self.pre_process_parallel(args)\n\n self.maybe_pre_process_embeds()\n # update state on disk\n self.persist_state()\n self._prepared_flag.touch()\n\n def persist_state(self):\n \"\"\"Writes state of current experiment to the disk\"\"\"\n assert not self.read_only\n if 'model_args' not in self.config:\n self.config['model_args'] = {}\n args = self.config['model_args']\n if self.model_type in {'rnnlm', 'tfmlm', 'wv_cbow'}:\n # Language models\n # TODO: improve the design of this thing\n args['vocab_size'] = max(len(self.src_vocab) if self.src_vocab else 0,\n len(self.tgt_vocab) if self.tgt_vocab else 0)\n else:\n # Translation models\n args['src_vocab'] = len(self.src_vocab) if self.src_vocab else 0\n args['tgt_vocab'] = len(self.tgt_vocab) if self.tgt_vocab else 0\n\n self.config['updated_at'] = datetime.now().isoformat()\n if 'rtg_version' not in self.config:\n self.config['rtg_version'] = {}\n version = self.config['rtg_version']\n if version.get('last_worked', None) != rtg.__version__:\n version['previous'] = version.get('last_worked')\n version['last_worked'] = rtg.__version__\n self.store_config()\n\n @classmethod\n def maybe_adjust_batch_size(cls, batch_size):\n orig = batch_size\n scaler = dtorch.batch_size_scaler\n if scaler > 1:\n if isinstance(batch_size, int):\n batch_size = batch_size // scaler\n else:\n batch_size = [x // scaler for x in batch_size]\n log.info(f\"batch_size:: given={orig}; adjusted to {dtorch.world_size}workers\"\n f\" x {dtorch.grad_accum}accumulations =>{batch_size}\")\n return batch_size\n\n def train(self, args=None):\n run_args = copy.deepcopy(self.config.get('trainer', {}))\n if args:\n run_args.update(args)\n if 'init_args' in run_args:\n del run_args['init_args']\n train_steps = run_args['steps']\n finetune_steps = run_args.pop('finetune_steps', None)\n if finetune_steps:\n assert isinstance(finetune_steps, int)\n assert finetune_steps > train_steps, f'finetune_steps={finetune_steps} should be' \\\n f' greater than steps={train_steps}'\n\n _, last_step = self.get_last_saved_model()\n if self._trained_flag.exists():\n # noinspection PyBroadException\n try:\n last_step = max(last_step, yaml.load(self._trained_flag.read_text())['steps'])\n except Exception as _:\n pass\n\n if last_step >= train_steps and (finetune_steps is None or last_step >= finetune_steps):\n log.warning(\n f\"Already trained upto {last_step}; Requested: train={train_steps}, finetune={finetune_steps} Skipped\")\n return\n from .registry import MODELS\n trainer = MODELS[self.model_type].Trainer(self)\n run_args['batch_size'] = self.maybe_adjust_batch_size(run_args['batch_size'])\n if last_step < train_steps: # regular training\n stopped = trainer.train(fine_tune=False, **run_args)\n if not self.read_only:\n status = dict(steps=train_steps, early_stopped=stopped, finetune=False)\n try:\n status['earlier'] = yaml.load(self._trained_flag.read_text())\n except Exception as _:\n pass\n yaml.dump(status, stream=self._trained_flag)\n if finetune_steps: # Fine tuning\n finetune_batch_size = run_args['batch_size']\n if 'finetune_batch_size' in run_args:\n finetune_batch_size = self.maybe_adjust_batch_size(run_args.pop('finetune_batch_size'))\n log.info(f\"Fine tuning upto {finetune_steps}, batch_size={finetune_batch_size}\")\n assert finetune_batch_size\n run_args['steps'] = finetune_steps\n run_args['batch_size'] = finetune_batch_size\n\n stopped = trainer.train(fine_tune=True, **run_args)\n status = dict(steps=finetune_steps, early_stopped=stopped, finetune=True)\n try:\n status['earlier'] = yaml.load(self._trained_flag.read_text())\n except Exception as _:\n pass\n yaml.dump(status, stream=self._trained_flag)\n\n @property\n def src_vocab(self) -> Field:\n return self.shared_field if self.shared_field is not None else self.src_field\n\n @property\n def tgt_vocab(self) -> Field:\n return self.shared_field if self.shared_field is not None else self.tgt_field\n\n def _get_batch_args(self):\n prep_args = self.config.get('prep', {})\n return {ok: prep_args[ik] for ik, ok in\n [('src_len', 'max_src_len'), ('tgt_len', 'max_tgt_len'), ('truncate', 'truncate')]\n if ik in prep_args}\n\n def get_train_data(self, batch_size: Union[int, Tuple[int, int]], steps: int = 0, sort_by='eq_len_rand_batch',\n batch_first=True, shuffle=False, fine_tune=False, keep_in_mem=False,\n split_ratio: float = 0., dynamic_epoch=False, y_is_cls=False):\n\n data_path = self.train_db if self.train_db.exists() else self.train_file\n if fine_tune:\n if not self.finetune_file.exists():\n # user may have added fine tune file later\n self._pre_process_parallel('finetune_src', 'finetune_tgt', self.finetune_file)\n log.info(\"Using Fine tuning corpus instead of training corpus\")\n data_path = self.finetune_file\n\n if split_ratio > 0:\n data_path = IO.maybe_tmpfs(data_path)\n train_file = data_path.with_suffix('.db.tmp')\n assert not y_is_cls, 'Not supported feature'\n file_creator = partial(self.file_creator, train_file=train_file, split_ratio=split_ratio)\n train_data = GenerativeBatchIterable(\n file_creator=file_creator, batches=steps, batch_size=batch_size, field=self.tgt_vocab,\n dynamic_epoch=dynamic_epoch, batch_first=batch_first, shuffle=shuffle, sort_by=sort_by,\n **self._get_batch_args())\n else:\n train_data = BatchIterable(\n data_path=data_path, batch_size=batch_size, field=self.tgt_vocab, sort_by=sort_by,\n batch_first=batch_first, shuffle=shuffle, y_is_cls=y_is_cls, **self._get_batch_args())\n # default, read data once completely, if steps > 0, truncate or loop depending on steps and data size\n if steps > 0:\n train_data = LoopingIterable(train_data, steps)\n return train_data\n\n def file_creator(self, train_file, split_ratio, *args, **kwargs):\n self._pre_process_parallel(*args, src_key='train_src', tgt_key='train_tgt',\n out_file=train_file, split_ratio=split_ratio, **kwargs)\n return train_file\n\n def get_val_data(self, batch_size: Union[int, Tuple[int, int]], sort_desc=False, batch_first=True,\n shuffle=False, y_is_cls=False):\n prep = self.config.get('prep', {})\n raw_tgt = prep.get('valid_tgt_raw', None)\n if not raw_tgt:\n raise Exception('Config value prep.valid_tgt_raw is required. It should have path to a file'\n ' having raw (unmodified) target file, to be used for computing BLEU.')\n raw_src = prep.get('valid_src_raw', prep.get('valid_src'))\n for path in (raw_src, raw_tgt):\n assert Path(path).exists(), f'File at {path} does not exist; it is required'\n raw_path = Path(raw_src), Path(raw_tgt)\n return BatchIterable(self.valid_file, batch_size=batch_size, sort_desc=sort_desc,\n batch_first=batch_first, shuffle=shuffle, field=self.tgt_vocab,\n keep_in_mem=True, raw_path=raw_path, y_is_cls=y_is_cls,\n **self._get_batch_args())\n\n def get_combo_data(self, batch_size: int, steps: int = 0, sort_desc=False, batch_first=True,\n shuffle=False):\n if not self.combo_file.exists():\n # user may have added fine tune file later\n self._pre_process_parallel('combo_src', 'combo_tgt', self.combo_file)\n combo_file = IO.maybe_tmpfs(self.combo_file)\n data = BatchIterable(\n combo_file, batch_size=batch_size, sort_desc=sort_desc, field=self.tgt_vocab,\n batch_first=batch_first, shuffle=shuffle, **self._get_batch_args())\n if steps > 0:\n data = LoopingIterable(data, steps)\n return data\n\n def copy_vocabs(self, other):\n \"\"\"\n Copies vocabulary files from self to other\n :param other: other experiment\n :return:\n \"\"\"\n other: TranslationExperiment = other\n if not other.data_dir.exists():\n other.data_dir.mkdir(parents=True)\n for source, destination in [(self._src_field_file, other._src_field_file),\n (self._tgt_field_file, other._tgt_field_file),\n (self._shared_field_file, other._shared_field_file)]:\n if source.exists():\n IO.copy_file(source.resolve(), destination.resolve())\n src_txt_file = source.with_name(source.name.replace('.model', '.vocab'))\n if src_txt_file.exists():\n dst_txt_file = destination.with_name(\n destination.name.replace('.model', '.vocab'))\n IO.copy_file(src_txt_file, dst_txt_file)\n\n def get_mono_data(self, split: str, side: str, batch_size: int, sort_desc: bool = False,\n batch_first: bool = False, shuffle: bool = False, num_batches: int = 0):\n \"\"\"\n reads monolingual data\n :param split: name of the split. choices = {train, valid}\n :param side: which side ? choices = {src, tgt}\n :param batch_size: what should be batch size. example =64\n :param sort_desc: should the seqs in batch be sorted descending order of length ?\n :param batch_first: should the first dimension be batch instead of time step ?\n :param shuffle: should the seqs be shuffled before reading (and for each re-reading\n if num_batches is too large)\n :param num_batches: how many batches to read?\n :return: iterator of batches\n \"\"\"\n assert side in ('src', 'tgt')\n assert split in ('train', 'valid')\n inp_file = {\n ('train', 'src'): self.mono_train_src,\n ('train', 'tgt'): self.mono_train_tgt,\n ('valid', 'src'): self.mono_valid_src,\n ('valid', 'tgt'): self.mono_valid_tgt,\n }[(split, side)]\n assert inp_file.exists()\n # read this file\n field = self.tgt_vocab if side == 'tgt' else self.src_field\n data = BatchIterable(inp_file, batch_size=batch_size, sort_desc=sort_desc,\n batch_first=batch_first, shuffle=shuffle, field=field,\n **self._get_batch_args())\n\n if num_batches > 0:\n data = LoopingIterable(data, num_batches)\n return data\n\n def get_class_freqs(self):\n batch_size = self.config.get('trainer', {}).get('batch_size', 2000)\n train_data = self.get_train_data(batch_size=batch_size, steps=-1, shuffle=False)\n freq_file = self.data_dir / 'class.freqs.tsv'\n vocab = self.tgt_vocab\n\n if not freq_file.exists():\n stats = Counter()\n for batch in tqdm.tqdm(train_data):\n for seq in batch.y_seqs.tolist():\n stats.update(seq)\n if vocab.pad_idx >= 0:\n stats[vocab.pad_idx] = 0\n\n with freq_file.open('w', encoding='utf-8') as out:\n for i in range(len(vocab)):\n name = vocab.class_names[i]\n freq = stats.get(i, 0)\n out.write(f'{i}\\t{name}\\t{freq}\\n')\n\n with freq_file.open('r') as lines:\n recs = (line.split('\\t') for line in lines)\n recs = [(int(r[0]), r[1], int(r[2])) for r in recs]\n return recs\n\n def get_pre_transform(self, side: str):\n assert side in ('src', 'tgt')\n from rtg.transform import TextTransform\n conf_chain = self.config.get('prep', {}).get(f'{side}_pre_proc', None)\n if conf_chain:\n transform = TextTransform.make(names=conf_chain)\n else:\n transform = TextTransform.recommended_pre()\n return transform\n\n def get_post_transform(self, side: str):\n assert side in ('src', 'tgt')\n from rtg.transform import TextTransform\n conf_chain = self.config.get('prep', {}).get(f'{side}_post_proc', None)\n if conf_chain:\n transform = TextTransform.make(names=conf_chain)\n else:\n transform = TextTransform.recommended_post()\n return transform\n" ]
[ [ "numpy.random.seed", "torch.load", "torch.zeros", "torch.manual_seed", "torch.tensor", "torch.cuda.is_available", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
joesdesk/ssnmf
[ "7ae321a2a19cde174679ba5acde2f151cf0c99e8" ]
[ "nonnegfac/nnls.py" ]
[ "import numpy as np\nimport scipy.optimize as opt\nimport scipy.sparse as sps\nimport numpy.linalg as nla\nimport scipy.linalg as sla\nimport time\n\n\ndef nnlsm_blockpivot(A, B, is_input_prod=False, init=None):\n \"\"\" Nonnegativity-constrained least squares with block principal pivoting method and column grouping\n\n Solves min ||AX-B||_2^2 s.t. X >= 0 element-wise.\n\n J. Kim and H. Park, Fast nonnegative matrix factorization: An active-set-like method and comparisons,\n SIAM Journal on Scientific Computing, \n vol. 33, no. 6, pp. 3261-3281, 2011.\n\n Parameters\n ----------\n A : numpy.array, shape (m,n)\n B : numpy.array or scipy.sparse matrix, shape (m,k)\n\n Optional Parameters\n -------------------\n is_input_prod : True/False. - If True, the A and B arguments are interpreted as\n AtA and AtB, respectively. Default is False.\n init: numpy.array, shape (n,k). - If provided, init is used as an initial value for the algorithm.\n Default is None.\n\n Returns\n -------\n X, (success, Y, num_cholesky, num_eq, num_backup)\n X : numpy.array, shape (n,k) - solution\n success : True/False - True if the solution is found. False if the algorithm did not terminate\n due to numerical errors.\n Y : numpy.array, shape (n,k) - Y = A.T * A * X - A.T * B\n num_cholesky : int - the number of Cholesky factorizations needed\n num_eq : int - the number of linear systems of equations needed to be solved\n num_backup: int - the number of appearances of the back-up rule. See SISC paper for details.\n \"\"\"\n if is_input_prod:\n AtA = A\n AtB = B\n else:\n AtA = A.T.dot(A)\n if sps.issparse(B):\n AtB = B.T.dot(A)\n AtB = AtB.T\n else:\n AtB = A.T.dot(B)\n\n (n, k) = AtB.shape\n MAX_ITER = n * 5\n\n if init is not None:\n PassSet = init > 0\n X, num_cholesky, num_eq = normal_eq_comb(AtA, AtB, PassSet)\n Y = AtA.dot(X) - AtB\n else:\n X = np.zeros([n, k])\n Y = -AtB\n PassSet = np.zeros([n, k], dtype=bool)\n num_cholesky = 0\n num_eq = 0\n\n p_bar = 3\n p_vec = np.zeros([k])\n p_vec[:] = p_bar\n ninf_vec = np.zeros([k])\n ninf_vec[:] = n + 1\n not_opt_set = np.logical_and(Y < 0, ~PassSet)\n infea_set = np.logical_and(X < 0, PassSet)\n\n not_good = np.sum(not_opt_set, axis=0) + np.sum(infea_set, axis=0)\n not_opt_colset = not_good > 0\n not_opt_cols = not_opt_colset.nonzero()[0]\n\n big_iter = 0\n num_backup = 0\n success = True\n while not_opt_cols.size > 0:\n big_iter += 1\n if MAX_ITER > 0 and big_iter > MAX_ITER:\n success = False\n break\n\n cols_set1 = np.logical_and(not_opt_colset, not_good < ninf_vec)\n temp1 = np.logical_and(not_opt_colset, not_good >= ninf_vec)\n temp2 = p_vec >= 1\n cols_set2 = np.logical_and(temp1, temp2)\n cols_set3 = np.logical_and(temp1, ~temp2)\n\n cols1 = cols_set1.nonzero()[0]\n cols2 = cols_set2.nonzero()[0]\n cols3 = cols_set3.nonzero()[0]\n\n if cols1.size > 0:\n p_vec[cols1] = p_bar\n ninf_vec[cols1] = not_good[cols1]\n true_set = np.logical_and(not_opt_set, np.tile(cols_set1, (n, 1)))\n false_set = np.logical_and(infea_set, np.tile(cols_set1, (n, 1)))\n PassSet[true_set] = True\n PassSet[false_set] = False\n if cols2.size > 0:\n p_vec[cols2] = p_vec[cols2] - 1\n temp_tile = np.tile(cols_set2, (n, 1))\n true_set = np.logical_and(not_opt_set, temp_tile)\n false_set = np.logical_and(infea_set, temp_tile)\n PassSet[true_set] = True\n PassSet[false_set] = False\n if cols3.size > 0:\n for col in cols3:\n candi_set = np.logical_or(\n not_opt_set[:, col], infea_set[:, col])\n to_change = np.max(candi_set.nonzero()[0])\n PassSet[to_change, col] = ~PassSet[to_change, col]\n num_backup += 1\n\n (X[:, not_opt_cols], temp_cholesky, temp_eq) = normal_eq_comb(\n AtA, AtB[:, not_opt_cols], PassSet[:, not_opt_cols])\n num_cholesky += temp_cholesky\n num_eq += temp_eq\n X[abs(X) < 1e-12] = 0\n Y[:, not_opt_cols] = AtA.dot(X[:, not_opt_cols]) - AtB[:, not_opt_cols]\n Y[abs(Y) < 1e-12] = 0\n\n not_opt_mask = np.tile(not_opt_colset, (n, 1))\n not_opt_set = np.logical_and(\n np.logical_and(not_opt_mask, Y < 0), ~PassSet)\n infea_set = np.logical_and(\n np.logical_and(not_opt_mask, X < 0), PassSet)\n not_good = np.sum(not_opt_set, axis=0) + np.sum(infea_set, axis=0)\n not_opt_colset = not_good > 0\n not_opt_cols = not_opt_colset.nonzero()[0]\n\n return X, (success, Y, num_cholesky, num_eq, num_backup)\n\n\ndef nnlsm_activeset(A, B, overwrite=False, is_input_prod=False, init=None):\n \"\"\" Nonnegativity-constrained least squares with active-set method and column grouping\n\n Solves min ||AX-B||_2^2 s.t. X >= 0 element-wise.\n\n Algorithm of this routine is close to the one presented in the following paper but\n is different in organising inner- and outer-loops:\n M. H. Van Benthem and M. R. Keenan, J. Chemometrics 2004; 18: 441-450\n\n Parameters\n ----------\n A : numpy.array, shape (m,n)\n B : numpy.array or scipy.sparse matrix, shape (m,k)\n\n Optional Parameters\n -------------------\n is_input_prod : True/False. - If True, the A and B arguments are interpreted as\n AtA and AtB, respectively. Default is False.\n init: numpy.array, shape (n,k). - If provided, init is used as an initial value for the algorithm.\n Default is None.\n\n Returns\n -------\n X, (success, Y, num_cholesky, num_eq, num_backup)\n X : numpy.array, shape (n,k) - solution\n success : True/False - True if the solution is found. False if the algorithm did not terminate\n due to numerical errors.\n Y : numpy.array, shape (n,k) - Y = A.T * A * X - A.T * B\n num_cholesky : int - the number of Cholesky factorizations needed\n num_eq : int - the number of linear systems of equations needed to be solved\n \"\"\"\n if is_input_prod:\n AtA = A\n AtB = B\n else:\n AtA = A.T.dot(A)\n if sps.issparse(B):\n AtB = B.T.dot(A)\n AtB = AtB.T\n else:\n AtB = A.T.dot(B)\n\n (n, k) = AtB.shape\n MAX_ITER = n * 5\n num_cholesky = 0\n num_eq = 0\n not_opt_set = np.ones([k], dtype=bool)\n\n if overwrite:\n X, num_cholesky, num_eq = normal_eq_comb(AtA, AtB)\n PassSet = X > 0\n not_opt_set = np.any(X < 0, axis=0)\n elif init is not None:\n X = init\n X[X < 0] = 0\n PassSet = X > 0\n else:\n X = np.zeros([n, k])\n PassSet = np.zeros([n, k], dtype=bool)\n\n Y = np.zeros([n, k])\n opt_cols = (~not_opt_set).nonzero()[0]\n not_opt_cols = not_opt_set.nonzero()[0]\n\n Y[:, opt_cols] = AtA.dot(X[:, opt_cols]) - AtB[:, opt_cols]\n\n big_iter = 0\n success = True\n while not_opt_cols.size > 0:\n big_iter += 1\n if MAX_ITER > 0 and big_iter > MAX_ITER:\n success = False\n break\n\n (Z, temp_cholesky, temp_eq) = normal_eq_comb(\n AtA, AtB[:, not_opt_cols], PassSet[:, not_opt_cols])\n num_cholesky += temp_cholesky\n num_eq += temp_eq\n\n Z[abs(Z) < 1e-12] = 0\n\n infea_subset = Z < 0\n temp = np.any(infea_subset, axis=0)\n infea_subcols = temp.nonzero()[0]\n fea_subcols = (~temp).nonzero()[0]\n\n if infea_subcols.size > 0:\n infea_cols = not_opt_cols[infea_subcols]\n\n (ix0, ix1_subsub) = infea_subset[:, infea_subcols].nonzero()\n ix1_sub = infea_subcols[ix1_subsub]\n ix1 = not_opt_cols[ix1_sub]\n\n X_infea = X[(ix0, ix1)]\n\n alpha = np.zeros([n, len(infea_subcols)])\n alpha[:] = np.inf\n alpha[(ix0, ix1_subsub)] = X_infea / (X_infea - Z[(ix0, ix1_sub)])\n min_ix = np.argmin(alpha, axis=0)\n min_vals = alpha[(min_ix, range(0, alpha.shape[1]))]\n\n X[:, infea_cols] = X[:, infea_cols] + \\\n (Z[:, infea_subcols] - X[:, infea_cols]) * min_vals\n X[(min_ix, infea_cols)] = 0\n PassSet[(min_ix, infea_cols)] = False\n\n elif fea_subcols.size > 0:\n fea_cols = not_opt_cols[fea_subcols]\n\n X[:, fea_cols] = Z[:, fea_subcols]\n Y[:, fea_cols] = AtA.dot(X[:, fea_cols]) - AtB[:, fea_cols]\n\n Y[abs(Y) < 1e-12] = 0\n\n not_opt_subset = np.logical_and(\n Y[:, fea_cols] < 0, ~PassSet[:, fea_cols])\n new_opt_cols = fea_cols[np.all(~not_opt_subset, axis=0)]\n update_cols = fea_cols[np.any(not_opt_subset, axis=0)]\n\n if update_cols.size > 0:\n val = Y[:, update_cols] * ~PassSet[:, update_cols]\n min_ix = np.argmin(val, axis=0)\n PassSet[(min_ix, update_cols)] = True\n\n not_opt_set[new_opt_cols] = False\n not_opt_cols = not_opt_set.nonzero()[0]\n\n return X, (success, Y, num_cholesky, num_eq)\n\n\ndef normal_eq_comb(AtA, AtB, PassSet=None):\n \"\"\" Solve many systems of linear equations using combinatorial grouping.\n\n M. H. Van Benthem and M. R. Keenan, J. Chemometrics 2004; 18: 441-450\n\n Parameters\n ----------\n AtA : numpy.array, shape (n,n)\n AtB : numpy.array, shape (n,k)\n\n Returns\n -------\n (Z,num_cholesky,num_eq)\n Z : numpy.array, shape (n,k) - solution\n num_cholesky : int - the number of unique cholesky decompositions done\n num_eq: int - the number of systems of linear equations solved\n \"\"\"\n num_cholesky = 0\n num_eq = 0\n if AtB.size == 0:\n Z = np.zeros([])\n elif (PassSet is None) or np.all(PassSet):\n Z = nla.solve(AtA, AtB)\n num_cholesky = 1\n num_eq = AtB.shape[1]\n else:\n Z = np.zeros(AtB.shape)\n if PassSet.shape[1] == 1:\n if np.any(PassSet):\n cols = PassSet.nonzero()[0]\n Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])\n num_cholesky = 1\n num_eq = 1\n else:\n #\n # Both _column_group_loop() and _column_group_recursive() work well.\n # Based on preliminary testing,\n # _column_group_loop() is slightly faster for tiny k(<10), but\n # _column_group_recursive() is faster for large k's.\n #\n grps = _column_group_recursive(PassSet)\n for gr in grps:\n cols = PassSet[:, gr[0]].nonzero()[0]\n if cols.size > 0:\n ix1 = np.ix_(cols, gr)\n ix2 = np.ix_(cols, cols)\n #\n # scipy.linalg.cho_solve can be used instead of numpy.linalg.solve.\n # For small n(<200), numpy.linalg.solve appears faster, whereas\n # for large n(>500), scipy.linalg.cho_solve appears faster.\n # Usage example of scipy.linalg.cho_solve:\n # Z[ix1] = sla.cho_solve(sla.cho_factor(AtA[ix2]),AtB[ix1])\n #\n Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])\n num_cholesky += 1\n num_eq += len(gr)\n num_eq += len(gr)\n return Z, num_cholesky, num_eq\n\n\ndef _column_group_loop(B):\n \"\"\" Given a binary matrix, find groups of the same columns\n with a looping strategy\n\n Parameters\n ----------\n B : numpy.array, True/False in each element\n\n Returns\n -------\n A list of arrays - each array contain indices of columns that are the same.\n \"\"\"\n initial = [np.arange(0, B.shape[1])]\n before = initial\n after = []\n for i in range(0, B.shape[0]):\n all_ones = True\n vec = B[i]\n for cols in before:\n if len(cols) == 1:\n after.append(cols)\n else:\n all_ones = False\n subvec = vec[cols]\n trues = subvec.nonzero()[0]\n falses = (~subvec).nonzero()[0]\n if trues.size > 0:\n after.append(cols[trues])\n if falses.size > 0:\n after.append(cols[falses])\n before = after\n after = []\n if all_ones:\n break\n return before\n\n\ndef _column_group_recursive(B):\n \"\"\" Given a binary matrix, find groups of the same columns\n with a recursive strategy\n\n Parameters\n ----------\n B : numpy.array, True/False in each element\n\n Returns\n -------\n A list of arrays - each array contain indices of columns that are the same.\n \"\"\"\n initial = np.arange(0, B.shape[1])\n return [a for a in column_group_sub(B, 0, initial) if len(a) > 0]\n\n\ndef column_group_sub(B, i, cols):\n vec = B[i][cols]\n if len(cols) <= 1:\n return [cols]\n if i == (B.shape[0] - 1):\n col_trues = cols[vec.nonzero()[0]]\n col_falses = cols[(~vec).nonzero()[0]]\n return [col_trues, col_falses]\n else:\n col_trues = cols[vec.nonzero()[0]]\n col_falses = cols[(~vec).nonzero()[0]]\n after = column_group_sub(B, i + 1, col_trues)\n after.extend(column_group_sub(B, i + 1, col_falses))\n return after\n\n\ndef _test_column_grouping(m=10, n=5000, num_repeat=5, verbose=False):\n print ('\\nTesting column_grouping ...\\n')\n A = np.array([[True, False, False, False, False],\n [True, True, False, True, True]])\n grps1 = _column_group_loop(A)\n grps2 = _column_group_recursive(A)\n grps3 = [np.array([0]),\n np.array([1, 3, 4]),\n np.array([2])]\n print ('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')\n print ('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps3)]) else 'Fail')\n\n for i in iter(range(0, num_repeat)):\n A = np.random.rand(m, n)\n B = A > 0.5\n start = time.time()\n grps1 = _column_group_loop(B)\n elapsed_loop = time.time() - start\n start = time.time()\n grps2 = _column_group_recursive(B)\n elapsed_recursive = time.time() - start\n if verbose:\n print ('Loop :', elapsed_loop)\n print ('Recursive:', elapsed_recursive)\n print ('OK' if all([np.array_equal(a, b) for (a, b) in zip(grps1, grps2)]) else 'Fail')\n # sorted_idx = np.concatenate(grps)\n # print B\n # print sorted_idx\n # print B[:,sorted_idx]\n return\n\n\ndef _test_normal_eq_comb(m=10, k=3, num_repeat=5):\n print ('\\nTesting normal_eq_comb() ...\\n')\n for i in iter(range(0, num_repeat)):\n A = np.random.rand(2 * m, m)\n X = np.random.rand(m, k)\n C = (np.random.rand(m, k) > 0.5)\n X[~C] = 0\n B = A.dot(X)\n B = A.T.dot(B)\n A = A.T.dot(A)\n Sol, a, b = normal_eq_comb(A, B, C)\n print ('OK' if np.allclose(X, Sol) else 'Fail')\n return\n\n\ndef _test_nnlsm():\n print ('\\nTesting nnls routines ...\\n')\n m = 100\n n = 10\n k = 200\n rep = 5\n\n for r in iter(range(0, rep)):\n A = np.random.rand(m, n)\n X_org = np.random.rand(n, k)\n X_org[np.random.rand(n, k) < 0.5] = 0\n B = A.dot(X_org)\n # B = np.random.rand(m,k)\n # A = np.random.rand(m,n/2)\n # A = np.concatenate((A,A),axis=1)\n # A = A + np.random.rand(m,n)*0.01\n # B = np.random.rand(m,k)\n\n import time\n start = time.time()\n C1, info = nnlsm_blockpivot(A, B)\n elapsed2 = time.time() - start\n rel_norm2 = nla.norm(C1 - X_org) / nla.norm(X_org)\n print ('nnlsm_blockpivot: ', 'OK ' if info[0] else 'Fail',\\\n 'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed2, rel_norm2))\n\n start = time.time()\n C2, info = nnlsm_activeset(A, B)\n num_backup = 0\n elapsed1 = time.time() - start\n rel_norm1 = nla.norm(C2 - X_org) / nla.norm(X_org)\n print ('nnlsm_activeset: ', 'OK ' if info[0] else 'Fail',\\\n 'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed1, rel_norm1))\n\n import scipy.optimize as opt\n start = time.time()\n C3 = np.zeros([n, k])\n for i in iter(range(0, k)):\n res = opt.nnls(A, B[:, i])\n C3[:, i] = res[0]\n elapsed3 = time.time() - start\n rel_norm3 = nla.norm(C3 - X_org) / nla.norm(X_org)\n print ('scipy.optimize.nnls: ', 'OK ',\\\n 'elapsed:{0:.4f} error:{1:.4e}'.format(elapsed3, rel_norm3))\n\n if num_backup > 0:\n break\n if rel_norm1 > 10e-5 or rel_norm2 > 10e-5 or rel_norm3 > 10e-5:\n break\n print ('')\n\nif __name__ == '__main__':\n _test_column_grouping()\n _test_normal_eq_comb()\n _test_nnlsm()\n" ]
[ [ "numpy.all", "numpy.argmin", "numpy.any", "numpy.ix_", "scipy.sparse.issparse", "numpy.allclose", "numpy.arange", "numpy.zeros", "numpy.logical_or", "numpy.random.rand", "numpy.logical_and", "numpy.array", "numpy.sum", "numpy.linalg.solve", "numpy.array_equal", "scipy.optimize.nnls", "numpy.tile", "numpy.linalg.norm", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
davidGCR/VioDenseDuplication
[ "6bfe92a26603eba87373ae66cfcf71ef82b8c35d" ]
[ "src/TubeletGeneration/visual_utils.py" ]
[ "import os\nimport numpy as np\nimport cv2\n\nfrom PIL import Image\n\ncolor = {'green':(0,255,0),\n 'blue':(255,165,0),\n 'dark red':(0,0,139),\n 'red':(0, 0, 255),\n 'dark slate blue':(139,61,72),\n 'aqua':(255,255,0),\n 'brown':(42,42,165),\n 'deep pink':(147,20,255),\n 'fuchisia':(255,0,255),\n 'yellow':(0,238,238),\n 'orange':(0,165,255),\n 'saddle brown':(19,69,139),\n 'black':(0,0,0),\n 'white':(255,255,255)}\n\ndef draw_boxes(img, boxes, scores=None, tags=None, ids=None, line_thick=1, line_color='white'):\n width = img.shape[1]\n height = img.shape[0]\n for i in range(len(boxes)):\n one_box = boxes[i]\n one_box = np.array([max(one_box[0], 0), max(one_box[1], 0),\n min(one_box[2], width - 1), min(one_box[3], height - 1)])\n x1,y1,x2,y2 = np.array(one_box[:4]).astype(int)\n\n # if tags[i]==\"merged\":\n # line_color = 'deep pink'\n # line_thick = 2\n if isinstance(line_color, list):\n cv2.rectangle(img, (x1,y1), (x2,y2), line_color[i], line_thick)\n if scores is not None:\n text = \"{:.3f}\".format(scores[i])\n cv2.putText(img, text, (x1, y1 - 7), cv2.FONT_ITALIC, 0.5, line_color[i], line_thick)\n if ids is not None:\n text = \"{}\".format(int(ids[i]))\n cv2.putText(img, text, (x1, y1 + 15), cv2.FONT_ITALIC, 0.5, line_color[i], line_thick)\n else:\n cv2.rectangle(img, (x1,y1), (x2,y2), color[line_color], line_thick)\n if scores is not None:\n text = \"{:.3f}\".format(scores[i])\n \n cv2.putText(img, text, (x1, y1 - 7), cv2.FONT_ITALIC, 0.5, color[line_color], line_thick)\n if ids is not None:\n text = \"{}\".format(int(ids[i]))\n cv2.putText(img, text, (x1, y1 + 15), cv2.FONT_ITALIC, 0.5, color[line_color], line_thick)\n return img\n\ndef imread(path):\n with Image.open(path) as img:\n return img.convert('RGB')\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BioPyTeam/biopy
[ "5c1444280d0a5098b61a99d96dc2825259c7ced5" ]
[ "src/biopy/metrics/reconstruction_error.py" ]
[ "import torch\nimport torch.nn as nn\nfrom torch.utils import data\nfrom collections import defaultdict\nfrom torch.utils.data import DataLoader\nfrom torch.nn import MSELoss\nfrom ..models import DomainTranslator\n\n\nclass LatentSpace:\n\n def __init__(self, dim, model, dataset, omics=['miRNA', 'mRNA', 'meth27-450-preprocessed']):\n\n self.dim = dim\n self.omics = omics\n self.num_omics = 0\n self.latent_mapping = {}\n self.pointsByOmicsID = {}\n\n dataset.standardize()\n _, dtest = dataset.train_val_test_split()\n\n for omic in omics:\n self.__gen_latent_space(model, dtest, omic)\n\n self.__genPointsByOmicsID()\n\n def __gen_latent_space(self, model, dataset, omic):\n '''creates dict latent_mapping with associations\n key:encoded_point -> val:original_data_point\n NOTE: does not work with images'''\n dataset = dataset.set_omic(omic)\n self.num_omics += len(dataset)\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n batches = 32\n loader = data.DataLoader(dataset, batch_size=batches, shuffle=True,\n num_workers=4, pin_memory=False, drop_last=False)\n model.eval()\n model.to(device)\n for batch_features, _ in loader:\n batch_cuda = batch_features.to(device)\n\n z = model.encode_and_sample(batch_cuda)\n z = z.detach().cpu()\n\n for i, data_point in enumerate(z):\n self.latent_mapping[data_point] = (batch_features[i], omic)\n\n def __euclidean_distance(self, input, target):\n return torch.cdist(input.unsqueeze(0), target.unsqueeze(0))[0]\n\n def __genPointsByOmicsID(self):\n for omic in self.omics:\n self.pointsByOmicsID[omic] = [k for k, v in self.latent_mapping.items() if v[1] == omic]\n\n def __findNearestNeighbor(self, sample, target_samples):\n\n min_dist = np.inf\n ts_min_dist = None\n\n for ts in target_samples:\n dist = self.__euclidean_distance(sample, ts)\n if dist < min_dist:\n min_dist = dist\n ts_min_dist = ts\n\n return min_dist, ts_min_dist\n\n def nearestNeighbor(self, sample, target_omics_id):\n target_samples = self.pointsByOmicsID[omic_id] # filter by omics id\n distance, ts_min_dist = self.__findNearestNeighbor(sample, target_samples)\n return distance, ts_min_dist\n\n def check_content(self):\n for k, v in self.latent_mapping:\n print(f\"({k}, {v})\")\n\n def __len__(self):\n return len(self.latent_mapping)\n\n\nclass ReconstructionError:\n \"\"\"\n decoders: ordered list of decoders (by omics id)\n latent_space: LatentSpace object\n \"\"\"\n\n def __init__(self, decoders, latent_space):\n self.decoders = decoders\n self.latent_space = latent_space\n\n def pointwise_error(self, **kwargs):\n\n # input: exit of a decoder. target: sample to compare with\n if 'input' in kwargs.keys() and 'target' in kwargs.keys():\n\n input, target = kwargs['input'], kwargs['target']\n\n return torch.cdist(input.unsqueeze(0), target.unsqueeze(0))[0]\n\n # input_latent: point in the latent space (same object)\n # input_omics_id: omics_id of input_latent\n # target_omics_id: omics_id of the samples to evaluate the pointwise error with\n elif 'input_latent' in kwargs.keys() and \\\n 'input_omics_id' in kwargs.keys() and \\\n 'target_omics_id' in kwargs.keys():\n\n input_latent = kwargs['input_latent']\n io_id = kwargs['input_omics_id']\n to_id = kwargs['target_omics_id']\n\n input = self.decoders[io_id](input_latent) # get the exit of the proper decoder\n\n # get the nearest target sample of the correct target omics id\n _, ts_min_dist = self.latent_space.nearestNeighbor(input_latent, to_id)\n\n target = self.latent_space.latent_mapping[ts_min_dist][0] # find the final target\n\n return torch.cdist(input.unsqueeze(0), target.unsqueeze(0))[0] / (distance + 1)\n\n else:\n raise Exception(\"Not handled\")\n return None\n\n def class_error(self, io_id, to_id):\n\n io_samples = self.latent_space.filterByOmicsID(io_id)\n\n cum_error = 0\n for input_latent in io_sample:\n cum_error += self.pointwise_error(\n input_latent=input_latent,\n input_omics_id=io_id,\n target_omics_id=to_id\n )\n\n return cum_error / len(io_samples)\n\n def cumulative_error(self):\n\n cum_error = 0\n\n for io_id in range(self.latent_space.num_omics):\n for to_id in range(self.latent_space.num_omics):\n cum_error += self.class_error(io_id, to_id)\n\n return cum_error / (self.latent_space.num_omics ** 2)\n\n\n\"\"\"\nHOW TO USE:\nore = OmicsReconstructionError(dataset_test_omics, aaes)\nore('mRNA', 'miRNA')\n\"\"\"\n\n\nclass OmicsReconstructionError:\n \"\"\"\n dataset: a dictionary of the form: \n {\n 'mRNA': Dataset,\n 'miRNA': Dataset,\n 'meth27-450-preprocessed': Dataset\n }\n aaes: a dictionary of the form:\n {\n 'mRNA': AAE,\n 'miRNA': AAE,\n 'meth27-450-preprocessed': AAE\n }\n \"\"\"\n supports_train = False\n separate_omics = False\n\n def __init__(self, dataset, aaes, device, **kwargs):\n self.dataset = dataset\n self.device = device\n self.translators = defaultdict(lambda: {})\n for oid1 in self.dataset.keys():\n for oid2 in self.dataset.keys():\n self.translators[oid1][oid2] = DomainTranslator(aaes[oid1].encoder, aaes[oid2].decoder).to(\n self.device).eval()\n\n \"\"\"\n omics_in: id omics that is autoencoded\n omics_out: id omics to calculate the error with\n return: MSE between the two datasets of the corresponding omics\n \"\"\"\n\n def __call__(self, omics_in=None, omics_out=None, mean_strategy='only_translations', **kwargs):\n if omics_in is not None:\n assert omics_out is not None, \"Must set both omics_in and omics_out if omics_in is set\"\n return self._get_error(omics_in, omics_out)\n else:\n if mean_strategy == 'only_translations':\n assert len(list(self.dataset.keys())) > 1, \"'only_translations' strategy can be used if dataset contains more than 1 omic\"\n omics_pairs = [(omic_src, omic_dst) for omic_src in self.dataset.keys() for omic_dst in self.dataset.keys() \\\n if omic_src != omic_dst]\n else:\n omics_pairs = [(omic_src, omic_dst) for omic_src in self.dataset.keys() for omic_dst in self.dataset.keys()]\n\n return sum([self._get_error(*omics_pair) for omics_pair in omics_pairs]) / len(omics_pairs)\n\n def _get_error(self, omics_in, omics_out):\n cum_err = 0\n for index in range(len(self.dataset[omics_in])):\n cum_err += self._error_sample(omics_in, omics_out, index)\n return (cum_err / len(self.dataset[omics_in])).item()\n\n def _error_sample(self, omics_in, omics_out, index):\n return MSELoss(reduction='mean')(\n self.translators[omics_in][omics_out](self.dataset[omics_in][index][0].unsqueeze(0).to(self.device)),\n self.dataset[omics_out][index][0].unsqueeze(0).to(self.device))\n" ]
[ [ "torch.utils.data.DataLoader", "torch.nn.MSELoss", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
evhub/roe
[ "7d5c90e73a97cad66d6efddc87d6d7b8a22d8aed" ]
[ "bbopt/backends/hyperopt.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# __coconut_hash__ = 0xfdeb9be1\n\n# Compiled with Coconut version 2.0.0-a_dev53 [How Not to Be Seen]\n\n\"\"\"\nThe hyperopt backend. Does black box optimization using hyperopt.\n\"\"\"\n\n# Coconut Header: -------------------------------------------------------------\n\nfrom __future__ import print_function, absolute_import, unicode_literals, division\nimport sys as _coconut_sys, os as _coconut_os\n_coconut_file_dir = _coconut_os.path.dirname(_coconut_os.path.dirname(_coconut_os.path.abspath(__file__)))\n_coconut_cached_module = _coconut_sys.modules.get(str(\"__coconut__\"))\nif _coconut_cached_module is not None and _coconut_os.path.dirname(_coconut_cached_module.__file__) != _coconut_file_dir: # type: ignore\n del _coconut_sys.modules[str(\"__coconut__\")]\n_coconut_sys.path.insert(0, _coconut_file_dir)\n_coconut_module_name = _coconut_os.path.splitext(_coconut_os.path.basename(_coconut_file_dir))[0]\nif _coconut_module_name and _coconut_module_name[0].isalpha() and all(c.isalpha() or c.isdigit() for c in _coconut_module_name) and \"__init__.py\" in _coconut_os.listdir(_coconut_file_dir):\n _coconut_full_module_name = str(_coconut_module_name + \".__coconut__\")\n import __coconut__ as _coconut__coconut__\n _coconut__coconut__.__name__ = _coconut_full_module_name\n for _coconut_v in vars(_coconut__coconut__).values():\n if getattr(_coconut_v, \"__module__\", None) == str(\"__coconut__\"):\n try:\n _coconut_v.__module__ = _coconut_full_module_name\n except AttributeError:\n _coconut_v_type = type(_coconut_v)\n if getattr(_coconut_v_type, \"__module__\", None) == str(\"__coconut__\"):\n _coconut_v_type.__module__ = _coconut_full_module_name\n _coconut_sys.modules[_coconut_full_module_name] = _coconut__coconut__\nfrom __coconut__ import *\nfrom __coconut__ import _coconut_call_set_names, _coconut_handle_cls_kwargs, _coconut_handle_cls_stargs, _namedtuple_of, _coconut, _coconut_super, _coconut_MatchError, _coconut_iter_getitem, _coconut_base_compose, _coconut_forward_compose, _coconut_back_compose, _coconut_forward_star_compose, _coconut_back_star_compose, _coconut_forward_dubstar_compose, _coconut_back_dubstar_compose, _coconut_pipe, _coconut_star_pipe, _coconut_dubstar_pipe, _coconut_back_pipe, _coconut_back_star_pipe, _coconut_back_dubstar_pipe, _coconut_none_pipe, _coconut_none_star_pipe, _coconut_none_dubstar_pipe, _coconut_bool_and, _coconut_bool_or, _coconut_none_coalesce, _coconut_minus, _coconut_map, _coconut_partial, _coconut_get_function_match_error, _coconut_base_pattern_func, _coconut_addpattern, _coconut_sentinel, _coconut_assert, _coconut_raise, _coconut_mark_as_match, _coconut_reiterable, _coconut_self_match_types, _coconut_dict_merge, _coconut_exec, _coconut_comma_op, _coconut_multi_dim_arr, _coconut_mk_anon_namedtuple\n_coconut_sys.path.pop(0)\n\n# Compiled Coconut: -----------------------------------------------------------\n\n\n\nsys = _coconut_sys #5 (line num in coconut source)\n\nimport numpy as np #7 (line num in coconut source)\n\nfrom hyperopt import hp #9 (line num in coconut source)\nfrom hyperopt import FMinIter #9 (line num in coconut source)\nfrom hyperopt import tpe #9 (line num in coconut source)\nfrom hyperopt import anneal #9 (line num in coconut source)\nfrom hyperopt.pyll import as_apply #15 (line num in coconut source)\nfrom hyperopt.base import Domain #16 (line num in coconut source)\nfrom hyperopt.base import Trials #16 (line num in coconut source)\nfrom hyperopt.base import STATUS_OK #16 (line num in coconut source)\nfrom hyperopt.base import STATUS_RUNNING #16 (line num in coconut source)\nfrom hyperopt.base import JOB_STATE_DONE #16 (line num in coconut source)\nfrom hyperopt.base import spec_from_misc #16 (line num in coconut source)\n\nfrom bbopt.util import sorted_items #25 (line num in coconut source)\nfrom bbopt.backends.util import StandardBackend #26 (line num in coconut source)\nfrom bbopt.backends.util import negate_objective #26 (line num in coconut source)\nfrom bbopt.backends.util import get_names_and_features #26 (line num in coconut source)\n\n\n# Utilities:\n\ndef create_space(name, func, *args): #35 (line num in coconut source)\n \"\"\"Create a hyperopt space for the given parameter.\"\"\" #36 (line num in coconut source)\n _coconut_case_match_to_0 = func, args #37 (line num in coconut source)\n _coconut_case_match_check_0 = False #37 (line num in coconut source)\n _coconut_match_set_name_choices = _coconut_sentinel #37 (line num in coconut source)\n if _coconut.isinstance(_coconut_case_match_to_0, _coconut.abc.Iterable): #37 (line num in coconut source)\n _coconut_match_temp_0 = _coconut.tuple(_coconut_case_match_to_0) #37 (line num in coconut source)\n if (_coconut.len(_coconut_match_temp_0) == 2) and (_coconut_match_temp_0[0] == \"choice\") and (_coconut.isinstance(_coconut_match_temp_0[1], _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_temp_0[1]) == 1): #37 (line num in coconut source)\n _coconut_match_set_name_choices = _coconut_match_temp_0[1][0] #37 (line num in coconut source)\n _coconut_case_match_check_0 = True #37 (line num in coconut source)\n if _coconut_case_match_check_0: #37 (line num in coconut source)\n if _coconut_match_set_name_choices is not _coconut_sentinel: #37 (line num in coconut source)\n choices = _coconut_match_set_name_choices #37 (line num in coconut source)\n if _coconut_case_match_check_0: #37 (line num in coconut source)\n return hp.choice(name, choices) #39 (line num in coconut source)\n if not _coconut_case_match_check_0: #40 (line num in coconut source)\n _coconut_match_set_name_start = _coconut_sentinel #40 (line num in coconut source)\n _coconut_match_set_name_stop = _coconut_sentinel #40 (line num in coconut source)\n _coconut_match_set_name_step = _coconut_sentinel #40 (line num in coconut source)\n if _coconut.isinstance(_coconut_case_match_to_0, _coconut.abc.Iterable): #40 (line num in coconut source)\n _coconut_match_temp_1 = _coconut.tuple(_coconut_case_match_to_0) #40 (line num in coconut source)\n if (_coconut.len(_coconut_match_temp_1) == 2) and (_coconut_match_temp_1[0] == \"randrange\") and (_coconut.isinstance(_coconut_match_temp_1[1], _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_temp_1[1]) == 3): #40 (line num in coconut source)\n _coconut_match_set_name_start = _coconut_match_temp_1[1][0] #40 (line num in coconut source)\n _coconut_match_set_name_stop = _coconut_match_temp_1[1][1] #40 (line num in coconut source)\n _coconut_match_set_name_step = _coconut_match_temp_1[1][2] #40 (line num in coconut source)\n _coconut_case_match_check_0 = True #40 (line num in coconut source)\n if _coconut_case_match_check_0: #40 (line num in coconut source)\n if _coconut_match_set_name_start is not _coconut_sentinel: #40 (line num in coconut source)\n start = _coconut_match_set_name_start #40 (line num in coconut source)\n if _coconut_match_set_name_stop is not _coconut_sentinel: #40 (line num in coconut source)\n stop = _coconut_match_set_name_stop #40 (line num in coconut source)\n if _coconut_match_set_name_step is not _coconut_sentinel: #40 (line num in coconut source)\n step = _coconut_match_set_name_step #40 (line num in coconut source)\n if _coconut_case_match_check_0: #40 (line num in coconut source)\n if step != 1: #41 (line num in coconut source)\n raise ValueError(\"the hyperopt backend only supports a randrange step size of 1 (use bb.choice(name, range(start, stop, step)) instead)\") #42 (line num in coconut source)\n# despite being called randint, hp.randint is exclusive\n return start + hp.randint(name, stop - start) #44 (line num in coconut source)\n if not _coconut_case_match_check_0: #45 (line num in coconut source)\n _coconut_match_set_name_a = _coconut_sentinel #45 (line num in coconut source)\n _coconut_match_set_name_b = _coconut_sentinel #45 (line num in coconut source)\n if _coconut.isinstance(_coconut_case_match_to_0, _coconut.abc.Iterable): #45 (line num in coconut source)\n _coconut_match_temp_2 = _coconut.tuple(_coconut_case_match_to_0) #45 (line num in coconut source)\n if (_coconut.len(_coconut_match_temp_2) == 2) and (_coconut_match_temp_2[0] == \"uniform\") and (_coconut.isinstance(_coconut_match_temp_2[1], _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_temp_2[1]) == 2): #45 (line num in coconut source)\n _coconut_match_set_name_a = _coconut_match_temp_2[1][0] #45 (line num in coconut source)\n _coconut_match_set_name_b = _coconut_match_temp_2[1][1] #45 (line num in coconut source)\n _coconut_case_match_check_0 = True #45 (line num in coconut source)\n if _coconut_case_match_check_0: #45 (line num in coconut source)\n if _coconut_match_set_name_a is not _coconut_sentinel: #45 (line num in coconut source)\n a = _coconut_match_set_name_a #45 (line num in coconut source)\n if _coconut_match_set_name_b is not _coconut_sentinel: #45 (line num in coconut source)\n b = _coconut_match_set_name_b #45 (line num in coconut source)\n if _coconut_case_match_check_0: #45 (line num in coconut source)\n return hp.uniform(name, a, b) #46 (line num in coconut source)\n if not _coconut_case_match_check_0: #47 (line num in coconut source)\n _coconut_match_set_name_mu = _coconut_sentinel #47 (line num in coconut source)\n _coconut_match_set_name_sigma = _coconut_sentinel #47 (line num in coconut source)\n if _coconut.isinstance(_coconut_case_match_to_0, _coconut.abc.Iterable): #47 (line num in coconut source)\n _coconut_match_temp_3 = _coconut.tuple(_coconut_case_match_to_0) #47 (line num in coconut source)\n if (_coconut.len(_coconut_match_temp_3) == 2) and (_coconut_match_temp_3[0] == \"normalvariate\") and (_coconut.isinstance(_coconut_match_temp_3[1], _coconut.abc.Sequence)) and (_coconut.len(_coconut_match_temp_3[1]) == 2): #47 (line num in coconut source)\n _coconut_match_set_name_mu = _coconut_match_temp_3[1][0] #47 (line num in coconut source)\n _coconut_match_set_name_sigma = _coconut_match_temp_3[1][1] #47 (line num in coconut source)\n _coconut_case_match_check_0 = True #47 (line num in coconut source)\n if _coconut_case_match_check_0: #47 (line num in coconut source)\n if _coconut_match_set_name_mu is not _coconut_sentinel: #47 (line num in coconut source)\n mu = _coconut_match_set_name_mu #47 (line num in coconut source)\n if _coconut_match_set_name_sigma is not _coconut_sentinel: #47 (line num in coconut source)\n sigma = _coconut_match_set_name_sigma #47 (line num in coconut source)\n if _coconut_case_match_check_0: #47 (line num in coconut source)\n return hp.normal(name, mu, sigma) #48 (line num in coconut source)\n raise TypeError(\"invalid parameter {_coconut_format_0}\".format(_coconut_format_0=(name))) #49 (line num in coconut source)\n\n\n\ndef examples_to_trials(examples, params): #52 (line num in coconut source)\n \"\"\"Create hyperopt trials from the given examples.\"\"\" #53 (line num in coconut source)\n trials = [] #54 (line num in coconut source)\n NA = object() # used to mark missing values #55 (line num in coconut source)\n\n for tid, ex in enumerate(examples): #57 (line num in coconut source)\n\n _coconut_match_to_0 = ex #59 (line num in coconut source)\n _coconut_match_check_0 = False #59 (line num in coconut source)\n _coconut_match_set_name_gain = _coconut_sentinel #59 (line num in coconut source)\n if _coconut.isinstance(_coconut_match_to_0, _coconut.abc.Mapping): #59 (line num in coconut source)\n _coconut_match_temp_4 = _coconut_match_to_0.get(\"gain\", _coconut_sentinel) #59 (line num in coconut source)\n if _coconut_match_temp_4 is not _coconut_sentinel: #59 (line num in coconut source)\n _coconut_match_set_name_gain = _coconut_match_temp_4 #59 (line num in coconut source)\n _coconut_match_check_0 = True #59 (line num in coconut source)\n if _coconut_match_check_0: #59 (line num in coconut source)\n if _coconut_match_set_name_gain is not _coconut_sentinel: #59 (line num in coconut source)\n gain = _coconut_match_set_name_gain #59 (line num in coconut source)\n if _coconut_match_check_0: #59 (line num in coconut source)\n loss = negate_objective(gain) #60 (line num in coconut source)\n else: #61 (line num in coconut source)\n loss = ex[\"loss\"] #62 (line num in coconut source)\n result = {\"status\": STATUS_OK, \"loss\": loss} #63 (line num in coconut source)\n\n vals = {} #68 (line num in coconut source)\n idxs = {} #69 (line num in coconut source)\n for k, v in get_names_and_features(ex[\"values\"], params, fallback_func=lambda name, func, *args, **kwargs: NA, converters={\"choice\": lambda val, choices: choices.index(val), \"randrange\": lambda val, start, stop, step: val - start}, convert_fallback=False): #70 (line num in coconut source)\n vals[k] = [v,] if v is not NA else [] #80 (line num in coconut source)\n idxs[k] = [tid,] if v is not NA else [] #81 (line num in coconut source)\n\n misc = {\"tid\": tid, \"idxs\": idxs, \"vals\": vals, \"cmd\": None} #83 (line num in coconut source)\n\n trials.append({\"tid\": tid, \"result\": result, \"misc\": misc, \"spec\": spec_from_misc(misc), \"state\": JOB_STATE_DONE, \"owner\": None, \"book_time\": None, \"refresh_time\": None, \"exp_key\": None}) #90 (line num in coconut source)\n\n return trials #102 (line num in coconut source)\n\n\n# Backend:\n\n\nclass HyperoptBackend(StandardBackend): #107 (line num in coconut source)\n \"\"\"The hyperopt backend uses hyperopt for black box optimization.\"\"\" #108 (line num in coconut source)\n backend_name = \"hyperopt\" #109 (line num in coconut source)\n implemented_funcs = (\"choice\", \"randrange\", \"uniform\", \"normalvariate\") #110 (line num in coconut source)\n\n @override #118 (line num in coconut source)\n def setup_backend(self, params, algo=tpe.suggest, rstate=None, show_progressbar=False, **options): #119 (line num in coconut source)\n \"\"\"Special method to initialize the backend from params.\"\"\" #120 (line num in coconut source)\n if rstate is None: #121 (line num in coconut source)\n try: #122 (line num in coconut source)\n rstate = np.random.default_rng() #123 (line num in coconut source)\n except AttributeError: #124 (line num in coconut source)\n rstate = np.random.RandomState() #125 (line num in coconut source)\n self.params = params #126 (line num in coconut source)\n\n space = (as_apply)(dict(((name), (create_space(name, func, *args))) for name, (func, args, kwargs) in sorted_items(params))) #128 (line num in coconut source)\n\n domain = Domain(self.set_current_values, space) #133 (line num in coconut source)\n\n self.trials = Trials() #135 (line num in coconut source)\n\n self.fmin_iter = FMinIter(algo, domain, self.trials, rstate, show_progressbar=show_progressbar, **options) #137 (line num in coconut source)\n\n\n @override #146 (line num in coconut source)\n def tell_examples(self, new_examples): #147 (line num in coconut source)\n \"\"\"Special method that allows fast updating of the backend with new examples.\"\"\" #148 (line num in coconut source)\n trial_list = examples_to_trials(new_examples, self.params) #149 (line num in coconut source)\n self.trials.insert_trial_docs(trial_list) #150 (line num in coconut source)\n self.trials.refresh() #151 (line num in coconut source)\n\n# run one iteration of hyperparameter optimization, with values saved\n# to the self.set_current_values callback passed to Domain\n next(self.fmin_iter) #155 (line num in coconut source)\n\n assert self.current_values is not None, self.current_values #157 (line num in coconut source)\n assert set(self.current_values.keys()) == set(self.params), self.current_values #158 (line num in coconut source)\n\n\n def set_current_values(self, values): #160 (line num in coconut source)\n \"\"\"Callback to set the values for this run.\"\"\" #161 (line num in coconut source)\n assert isinstance(values, dict), values #162 (line num in coconut source)\n self.current_values = values #163 (line num in coconut source)\n return {\"status\": STATUS_RUNNING} #164 (line num in coconut source)\n\n\n# Registered names:\n\n\n_coconut_call_set_names(HyperoptBackend) #171 (line num in coconut source)\nHyperoptBackend.register() #171 (line num in coconut source)\n\nHyperoptBackend.register_alg(\"tree_structured_parzen_estimator\", algo=tpe.suggest) #173 (line num in coconut source)\nHyperoptBackend.register_alg(\"annealing\", algo=anneal.suggest) #174 (line num in coconut source)\nif sys.version_info >= (3,): #175 (line num in coconut source)\n from hyperopt import atpe #176 (line num in coconut source)\n HyperoptBackend.register_alg(\"adaptive_tpe\", algo=atpe.suggest) #177 (line num in coconut source)\n\nHyperoptBackend.register_meta_for_all_algs(\"any_hyperopt\") #179 (line num in coconut source)\n" ]
[ [ "numpy.random.RandomState", "numpy.random.default_rng" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
EPRI-SQA/Melody
[ "b9e576676fecefc7a3da1611e1c068f3cc728a89" ]
[ "srcs/utils/pcap_visualize.py" ]
[ "import dpkt\nfrom argparse import ArgumentParser\nfrom srcs.proto import css_pb2\nimport pandas as pd\nimport plotly.offline as py\nimport plotly.graph_objs as go\n\n\ndef process_pcap(pcap_file,keys_to_plot):\n\t\n\tpcap = dpkt.pcap.Reader(open(pcap_file))\n\tpkt_parsed = css_pb2.CyberMessage()\n\n\tunique_app_id_pairs = []\n\n\tdata_fields = ['timestamp','src_dst'] + keys_to_plot\n\n\tpcap_dataframe = pd.DataFrame(columns=data_fields)\n\n\tfor timestamp, buf in pcap:\n\n\t\t#unpack the ethernet frame\n\t\teth = dpkt.ethernet.Ethernet(buf)\n\t\tip = eth.data\n\t\t\n\t\t#we are only intersted in the UDP packets that follow our custom protocol\n\t\tif ip.p == dpkt.ip.IP_PROTO_UDP:\n\t\t\n\t\t\tudp_payload = ip.data.data\n\t\t\tpkt_parsed.ParseFromString(str(udp_payload))\n\t\t\t#extract the source-dst application ID pair\n\t\t\tsrc_application_id = pkt_parsed.src_application_id\n\t\t\tdst_application_id = pkt_parsed.dst_application_id\n\n\t\t\tpcap_dataframe_entry = [timestamp, (src_application_id,dst_application_id)]\n\n\t\t\tif (src_application_id,dst_application_id) not in unique_app_id_pairs:\n\t\t\t\tunique_app_id_pairs.append((src_application_id,dst_application_id))\n\n\t\t\t#Finish this function for when the key requested is not in the packet\n\t\t\tfor key in keys_to_plot:\n\t\t\t\tfor content in pkt_parsed.content:\n\t\t\t\t\tif content.key == key:\n\t\t\t\t\t\tpcap_dataframe_entry.append(float(content.value))\n\t\t\n\t\t\tdf_append = pd.DataFrame([pcap_dataframe_entry], columns=data_fields)\n\t\t\tpcap_dataframe = pd.concat([pcap_dataframe, df_append], axis=0)\n\t\n\tstart_time = pcap_dataframe['timestamp'].iloc[0]\n\tpcap_dataframe['rel_time'] = pcap_dataframe['timestamp'] - start_time\n\t#convert unix time to pandas datetime\n\t#pcap_dataframe['time'] = pd.to_datetime(pcap_dataframe['timestamp'],unit='ms',origin='unix')\n\t# Reset Index\n\tpcap_dataframe = pcap_dataframe.reset_index()\n\t# Drop old index column\n\tpcap_dataframe = pcap_dataframe.drop(columns=\"index\")\n\n\treturn pcap_dataframe, unique_app_id_pairs\n\ndef physical_measurement_plotter(dataframe, src_dst_pairs,keys_to_plot):\n\n\t#fix the code to work for multiple keys\n\tfor key in keys_to_plot:\n\t\tdata =[]\n\t\tfor src_dst in src_dst_pairs:\n\t\t\tpcap_dataframe_src_dst = dataframe[dataframe.src_dst == src_dst]\n\n\t\t\tdata.append(go.Scatter(x=pcap_dataframe_src_dst['rel_time'], y=pcap_dataframe_src_dst[key], name=str(src_dst[0] + ',' + src_dst[1])))\n\n\t\tlayout = go.Layout(\n\t\t xaxis=dict(\n\t\t title='Time since start of emulation',\n\t\t showticklabels=True,\t\t \n\t\t ),\n\t\t yaxis=dict(\n\t\t title=key,\n\t\t showticklabels=True,\n\t\t )\n\t\t)\n\t\tfig = go.Figure(data=data, layout =layout)\n\t\n\tpy.plot(fig, filename = 'physical_measurement_plot.html')\n\t\nif __name__ == \"__main__\":\n\n\tparser = ArgumentParser(description='Visualize data from a given pcap file')\n\tparser.add_argument('-f','--filename', required=True, help=\"name of .pcap file that needs analysis\")\n\tparser.add_argument('-k','--keys', action='append', help='keys that you wish to plot the time series for', required=True)\n\targs = parser.parse_args()\n\tpcap_file = args.filename\n\tkeys_to_plot = args.keys\n\tpcap_dataframe, unique_app_id_pairs = process_pcap(pcap_file,keys_to_plot)\n\tphysical_measurement_plotter(pcap_dataframe,unique_app_id_pairs,keys_to_plot)\n\n" ]
[ [ "pandas.concat", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
jhkennedy/asf-tools
[ "435a544bd6f3f4953679e5d891c0e454f7bdd471" ]
[ "asf_tools/tile.py" ]
[ "from typing import Tuple, Union\n\nimport numpy as np\n\n\ndef tile_array(array: Union[np.ndarray, np.ma.MaskedArray], tile_shape: Tuple[int, int] = (200, 200),\n pad_value: float = None) -> Union[np.ndarray, np.ma.MaskedArray]:\n \"\"\"Tile a 2D numpy array\n\n Turn a 2D numpy array like:\n >>> array = [[0, 0, 1, 1],\n ... [0, 0, 1, 1],\n ... [2, 2, 3, 3],\n ... [2, 2, 3, 3]]\n >>> array.shape\n (4, 4)\n\n into a tiled array like:\n >>> tiles = tiled_array(array, 2, 2)\n >>> print(tiles)\n [[[0, 0],\n [0, 0]],\n [[1, 1],\n [1, 1]],\n [[2, 2],\n [2, 2]],\n [[3, 3],\n [3, 3]]]\n >>> tiles.shape\n (4, 2, 2)\n\n Args:\n array: 2D array to tile\n tile_shape: the shape of each tile\n pad_value: right-bottom pad `a` with `pad` as needed so `a` is evenly divisible into tiles\n\n Returns:\n the tiled array\n \"\"\"\n array_rows, array_columns = array.shape\n tile_rows, tile_columns = tile_shape\n\n # CREDIT: https://twitter.com/LizzUltee/status/1379508448262512641\n rpad = -array_rows % tile_rows\n cpad = -array_columns % tile_columns\n\n if (rpad or cpad) and pad_value is None:\n raise ValueError(f'Cannot evenly tile a {array.shape} array into ({tile_rows},{tile_columns}) tiles')\n\n if rpad or cpad:\n padded_array = np.pad(array, ((0, rpad), (0, cpad)), constant_values=pad_value)\n if isinstance(array, np.ma.MaskedArray):\n mask = np.pad(array.mask, ((0, rpad), (0, cpad)), constant_values=True)\n padded_array = np.ma.MaskedArray(padded_array, mask=mask)\n else:\n padded_array = array\n\n tile_list = []\n for rows in np.vsplit(padded_array, range(tile_rows, array_rows, tile_rows)):\n tile_list.extend(np.hsplit(rows, range(tile_columns, array_columns, tile_columns)))\n\n dstack = np.ma.dstack if isinstance(array, np.ma.MaskedArray) else np.dstack\n tiled = np.moveaxis(dstack(tile_list), -1, 0)\n\n return tiled\n\n\ndef untile_array(tiled_array: Union[np.ndarray, np.ma.MaskedArray], array_shape: Tuple[int, int]) \\\n -> Union[np.ndarray, np.ma.MaskedArray]:\n \"\"\"Untile a tiled array into a 2D numpy array\n\n This is the reverse of `tile_array` and will turn a tiled array like:\n >>> tiled_array = [[[0,0],\n ... [0,0]],\n ... [[1,1],\n ... [1,1]],\n ... [[2,2],\n ... [2,2]],\n ... [[3,3],\n ... [3,3]]]\n >>> tiled_array.shape\n (4, 2, 2)\n\n into a 2D array like:\n >>> array = untile_array(tiled_array)\n >>> print(array)\n [[0, 0, 1, 1],\n [0, 0, 1, 1],\n [2, 2, 3, 3],\n [2, 2, 3, 3]]\n >>> array.shape\n (4, 4)\n\n Args:\n tiled_array: a tiled array\n array_shape: shape to untile the array to. If array_shape's size is smaller\n than the tiled array, `untile_array` will subset the tiled array assuming\n bottom right padding was added when tiling.\n\n Returns:\n the untiled array\n \"\"\"\n _, tile_rows, tile_columns = tiled_array.shape\n array_rows, array_columns = array_shape\n\n untiled_rows = int(np.ceil(array_rows / tile_rows))\n untiled_columns = int(np.ceil(array_columns / tile_columns))\n\n untiled = np.zeros((untiled_rows*tile_rows, untiled_columns*tile_columns), dtype=tiled_array.dtype)\n\n if (array_size := array_rows * array_columns) > tiled_array.size:\n raise ValueError(\n f'array_shape {array_shape} will result in an array bigger than the tiled array:'\n f' {array_size} > {tiled_array.size}'\n )\n\n for ii in range(untiled_rows):\n for jj in range(untiled_columns):\n untiled[ii*tile_rows:(ii+1)*tile_rows, jj*tile_columns:(jj+1)*tile_columns] = \\\n tiled_array[ii * untiled_columns + jj, :, :]\n\n if isinstance(tiled_array, np.ma.MaskedArray):\n untiled_mask = untile_array(tiled_array.mask, untiled.shape)\n untiled = np.ma.MaskedArray(untiled, mask=untiled_mask)\n\n return untiled[:array_rows, :array_columns]\n" ]
[ [ "numpy.ceil", "numpy.ma.MaskedArray", "numpy.zeros", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
klobrien89/differential-privacy
[ "327972c1ae710e8cd0a4754fffdd78c3500272ee" ]
[ "python/dp_accounting/privacy_loss_distribution.py" ]
[ "# Copyright 2020 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implementing Privacy Loss Distribution.\n\nThis file implements the privacy loss distribution (PLD) and its basic\nfunctionalities. The main feature of PLD is that it allows for accurate\ncomputation of privacy parameters under composition. Please refer to the\nsupplementary material below for more details:\n../../common_docs/Privacy_Loss_Distributions.pdf\n\"\"\"\n\nimport collections\nimport math\nimport typing\n\nimport numpy as np\n\nfrom dp_accounting import common\nfrom dp_accounting import privacy_loss_mechanism\n\n\nclass PrivacyLossDistribution(object):\n \"\"\"Class for privacy loss distributions and computation involving them.\n\n The privacy loss distribution (PLD) of two discrete distributions, the upper\n distribution mu_upper and the lower distribution mu_lower, is defined as a\n distribution on real numbers generated by first sampling an outcome o\n according to mu_upper and then outputting the privacy loss\n ln(mu_upper(o) / mu_lower(o)) where mu_lower(o) and mu_upper(o) are the\n probability masses of o in mu_lower and mu_upper respectively. This class\n allows one to create and manipulate privacy loss distributions.\n\n PLD allows one to (approximately) compute the epsilon-hockey stick divergence\n between mu_upper and mu_lower, which is defined as\n sum_{o} [mu_upper(o) - e^{epsilon} * mu_lower(o)]_+. This quantity in turn\n governs the parameter delta of (eps, delta)-differential privacy of the\n corresponding protocol. (See Observation 1 in the supplementary material.)\n\n The above definitions extend to continuous distributions. The PLD of two\n continuous distributions mu_upper and mu_lower is defined as a distribution on\n real numbers generated by first sampling an outcome o according to mu_upper\n and then outputting the privacy loss ln(f_{mu_upper}(o) / f_{mu_lower}(o))\n where f_{mu_lower}(o) and f_{mu_upper}(o) are the probability density\n functions at o in mu_lower and mu_upper respectively. Moreover, for continuous\n distributions the epsilon-hockey stick divergence is defined as\n integral [f_{mu_upper}(o) - e^{epsilon} * f_{mu_lower}(o)]_+ do.\n\n Attributes:\n value_discretization_interval: the interval length for which the values of\n the privacy loss distribution are discretized. In particular, the values\n are always integer multiples of value_discretization_interval.\n rounded_probability_mass_function: the probability mass function for the\n privacy loss distribution where each value is rounded to be an integer\n multiple of value_discretization_interval. To avoid floating point errors\n in the values, the keys here are the integer multipliers. For example,\n suppose that the probability mass function assigns mass of 0.1 to the\n value 2 * value_discretization_interval, then the dictionary will have\n (key: value) pair (2: 0.1).\n infinity_mass: The probability mass of mu_upper over all the outcomes that\n can occur only in mu_upper but not in mu_lower.(These outcomes result in\n privacy loss ln(mu_upper(o) / mu_lower(o)) of infinity.)\n pessimistic_estimate: whether the rounding is done in such a way that the\n resulting epsilon-hockey stick divergence computation gives an upper\n estimate to the real value.\n \"\"\"\n\n def __init__(self,\n rounded_probability_mass_function: typing.Mapping[int, float],\n value_discretization_interval: float,\n infinity_mass: float,\n pessimistic_estimate: bool = True):\n self.rounded_probability_mass_function = rounded_probability_mass_function\n self.value_discretization_interval = value_discretization_interval\n self.infinity_mass = infinity_mass\n self.pessimistic_estimate = pessimistic_estimate\n\n @classmethod\n def identity(\n cls,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Constructs an identity privacy loss distribution.\n\n Args:\n value_discretization_interval: the dicretization interval for the privacy\n loss distribution. The values will be rounded up/down to be integer\n multiples of this number.\n\n Returns:\n The privacy loss distribution corresponding to an algorithm with no\n privacy leak (i.e. output is independent of input).\n \"\"\"\n return cls({0: 1}, value_discretization_interval, 0)\n\n @classmethod\n def from_two_probability_mass_functions(\n cls,\n log_probability_mass_function_lower: typing.Mapping[typing.Any, float],\n log_probability_mass_function_upper: typing.Mapping[typing.Any, float],\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4,\n log_mass_truncation_bound: float = -math.inf\n ) -> 'PrivacyLossDistribution':\n \"\"\"Constructs a privacy loss distribution from mu_lower and mu_upper.\n\n Args:\n log_probability_mass_function_lower: the probability mass function of\n mu_lower represented as a dictionary where each key is an outcome o of\n mu_lower and the corresponding value is the natural log of the\n probability mass of mu_lower at o.\n log_probability_mass_function_upper: the probability mass function of\n mu_upper represented as a dictionary where each key is an outcome o of\n mu_upper and the corresponding value is the natural log of the\n probability mass of mu_upper at o.\n pessimistic_estimate: whether the rounding is done in such a way that the\n resulting epsilon-hockey stick divergence computation gives an upper\n estimate to the real value.\n value_discretization_interval: the dicretization interval for the privacy\n loss distribution. The values will be rounded up/down to be integer\n multiples of this number.\n log_mass_truncation_bound: when the log of the probability mass of the\n upper distribution is below this bound, it is either (i) included in\n infinity_mass in the case of pessimistic estimate or (ii) discarded\n completely in the case of optimistic estimate. The larger\n log_mass_truncation_bound is, the more error it may introduce in\n divergence calculations.\n\n Returns:\n The privacy loss distribution constructed as specified.\n \"\"\"\n\n infinity_mass = 0\n for outcome in log_probability_mass_function_upper:\n if (log_probability_mass_function_lower.get(outcome,\n -math.inf) == -math.inf):\n # When an outcome only appears in the upper distribution but not in the\n # lower distribution, then it must be counted in infinity_mass as such\n # an outcome contributes to the hockey stick divergence.\n infinity_mass += math.exp(log_probability_mass_function_upper[outcome])\n\n # Compute the (non-discretized) probability mass function for the privacy\n # loss distribution.\n probability_mass_function = {}\n for outcome in log_probability_mass_function_lower:\n if log_probability_mass_function_lower[outcome] == -math.inf:\n # This outcome never occurs in mu_lower. This case was already included\n # as infinity_mass above.\n continue\n elif (log_probability_mass_function_upper.get(outcome, -math.inf) >\n log_mass_truncation_bound):\n # When the probability mass of mu_upper at the outcome is greater than\n # the threshold, add it to the distribution.\n privacy_loss_value = (\n log_probability_mass_function_upper[outcome] -\n log_probability_mass_function_lower[outcome])\n probability_mass_function[privacy_loss_value] = (\n probability_mass_function.get(privacy_loss_value, 0) +\n math.exp(log_probability_mass_function_upper[outcome]))\n else:\n if pessimistic_estimate:\n # When the probability mass of mu_upper at the outcome is no more than\n # the threshold and we would like to get a pessimistic estimate,\n # account for this in infinity_mass.\n infinity_mass += math.exp(\n log_probability_mass_function_upper.get(outcome, -math.inf))\n\n # Discretize the probability mass so that the values are integer multiples\n # of value_discretization_interval\n rounded_probability_mass_function = collections.defaultdict(lambda: 0)\n round_fn = math.ceil if pessimistic_estimate else math.floor\n for val in probability_mass_function:\n rounded_probability_mass_function[round_fn(\n val /\n value_discretization_interval)] += probability_mass_function[val]\n\n return cls(\n rounded_probability_mass_function,\n value_discretization_interval,\n infinity_mass,\n pessimistic_estimate=pessimistic_estimate)\n\n @classmethod\n def create_from_additive_noise(\n cls,\n additive_noise_privacy_loss:\n 'privacy_loss_mechanism.AdditiveNoisePrivacyLoss',\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Constructs the privacy loss distribution of an additive noise mechanism.\n\n An additive noise mechanism for computing a scalar-valued function f is a\n mechanism that outputs the sum of the true value of the function and a noise\n drawn from a certain distribution mu. This function calculates the privacy\n loss distribution for such an additive noise mechanism.\n\n Args:\n additive_noise_privacy_loss: the privacy loss representation of the\n mechanism.\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution constructed as specified.\n \"\"\"\n round_fn = math.ceil if pessimistic_estimate else math.floor\n\n tail_pld = additive_noise_privacy_loss.privacy_loss_tail()\n\n rounded_probability_mass_function = collections.defaultdict(lambda: 0)\n infinity_mass = tail_pld.tail_probability_mass_function.get(math.inf, 0)\n for privacy_loss in tail_pld.tail_probability_mass_function:\n if privacy_loss != math.inf:\n rounded_probability_mass_function[round_fn(\n privacy_loss / value_discretization_interval\n )] += tail_pld.tail_probability_mass_function[privacy_loss]\n\n if additive_noise_privacy_loss.discrete_noise:\n xs = list(\n range(\n math.ceil(tail_pld.lower_x_truncation) - 1,\n math.floor(tail_pld.upper_x_truncation) + 1))\n\n # Compute PMF for the x's. Note that a vectorized call to noise_cdf can be\n # much faster than many scalar calls.\n cdf_values = additive_noise_privacy_loss.noise_cdf(xs)\n probability_mass = cdf_values[1:] - cdf_values[:-1]\n\n for x, prob in zip(xs[1:], probability_mass):\n rounded_probability_mass_function[round_fn(\n additive_noise_privacy_loss.privacy_loss(x) /\n value_discretization_interval)] += prob\n else:\n lower_x = tail_pld.lower_x_truncation\n rounded_down_value = math.floor(\n additive_noise_privacy_loss.privacy_loss(lower_x) /\n value_discretization_interval)\n\n # Compute discretization intervals for PLD approximation.\n xs, rounded_values = [lower_x], []\n x = lower_x\n while x < tail_pld.upper_x_truncation:\n x = min(\n tail_pld.upper_x_truncation,\n additive_noise_privacy_loss.inverse_privacy_loss(\n value_discretization_interval * rounded_down_value))\n\n xs.append(x)\n rounded_values.append(round_fn(rounded_down_value + 0.5))\n rounded_down_value -= 1\n\n # Compute PLD for discretization intervals. Note that a vectorized call to\n # noise_cdf is much faster than many scalar calls.\n cdf_values = additive_noise_privacy_loss.noise_cdf(xs)\n probability_mass = cdf_values[1:] - cdf_values[:-1]\n\n # Each x in [lower_x, upper_x] results in privacy loss that lies in\n # [value_discretization_interval * rounded_down_value,\n # value_discretization_interval * (rounded_down_value + 1)]\n for rounded_value, prob in zip(rounded_values, probability_mass):\n rounded_probability_mass_function[rounded_value] += prob\n\n return cls(\n dict(rounded_probability_mass_function),\n value_discretization_interval,\n infinity_mass,\n pessimistic_estimate=pessimistic_estimate)\n\n @classmethod\n def from_randomized_response(\n cls,\n noise_parameter: float,\n num_buckets: int,\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Constructs the privacy loss distribution of Randomized Response.\n\n The Randomized Response over k buckets with noise parameter p takes in an\n input which is one of the k buckets. With probability 1 - p, it simply\n outputs the input bucket. Otherwise, with probability p, it outputs a bucket\n drawn uniformly at random from the k buckets.\n\n This function calculates the privacy loss distribution for the\n aforementioned Randomized Response with a given number of buckets, and a\n given noise parameter.\n\n Specifically, suppose that the original input is x and it is changed to x'.\n Recall that the privacy loss distribution of the Randomized Response\n mechanism is generated as follows: first pick o according to R(x), where\n R(x) denote the output distribution of the Randomized Response mechanism\n on input x. Then, the privacy loss is ln(Pr[R(x) = o] / Pr[R(x') = o]).\n There are three cases here:\n - When o = x, ln(Pr[R(x) = o] / Pr[R(x') = o]) =\n ln(Pr[R(x) = x] / Pr[R(x') = x]). Here Pr[R(x) = x] = 1 - p + p / k\n and Pr[R(x') = x] = p / k.\n - When o = x', ln(Pr[R(x) = o] / Pr[R(x') = o]) =\n ln(Pr[R(x') = x'] / Pr[R(x) = x']), which is just the negation of the\n previous privacy loss.\n - When o != x, x', the privacy loss is zero.\n\n Args:\n noise_parameter: the probability that the Randomized Response outputs a\n completely random bucket.\n num_buckets: the total number of possible input values (which is equal to\n the total number of possible output values).\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution constructed as specified.\n \"\"\"\n\n if noise_parameter <= 0 or noise_parameter >= 1:\n raise ValueError(f'Noise parameter must be strictly between 0 and 1: '\n f'{noise_parameter}')\n\n if num_buckets <= 1:\n raise ValueError(\n f'Number of buckets must be strictly greater than 1: {num_buckets}')\n\n round_fn = math.ceil if pessimistic_estimate else math.floor\n\n rounded_probability_mass_function = collections.defaultdict(lambda: 0)\n\n # Probability that the output is equal to the input, i.e., Pr[R(x) = x]\n probability_output_equal_input = ((1 - noise_parameter) +\n noise_parameter / num_buckets)\n # Probability that the output is equal to a specific bucket that is not the\n # input, i.e., Pr[R(x') = x] for x' != x.\n probability_output_not_input = noise_parameter / num_buckets\n\n # Add privacy loss for the case o = x\n rounded_value = round_fn(\n math.log(probability_output_equal_input / probability_output_not_input)\n / value_discretization_interval)\n rounded_probability_mass_function[\n rounded_value] += probability_output_equal_input\n\n # Add privacy loss for the case o = x'\n rounded_value = round_fn(\n math.log(probability_output_not_input / probability_output_equal_input)\n / value_discretization_interval)\n rounded_probability_mass_function[\n rounded_value] += probability_output_not_input\n\n # Add privacy loss for the case o != x, x'\n rounded_probability_mass_function[0] += (\n probability_output_not_input * (num_buckets - 2))\n\n return cls(\n rounded_probability_mass_function,\n value_discretization_interval,\n 0,\n pessimistic_estimate=pessimistic_estimate)\n\n @classmethod\n def from_laplace_mechanism(\n cls,\n parameter: float,\n sensitivity: float = 1,\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Computes the privacy loss distribution of the Laplace mechanism.\n\n Args:\n parameter: the parameter of the Laplace distribution.\n sensitivity: the sensitivity of function f. (i.e. the maximum absolute\n change in f when an input to a single user changes.)\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution corresponding to the Laplace mechanism with\n given parameters.\n \"\"\"\n return PrivacyLossDistribution.create_from_additive_noise(\n privacy_loss_mechanism.LaplacePrivacyLoss(\n parameter, sensitivity=sensitivity),\n pessimistic_estimate=pessimistic_estimate,\n value_discretization_interval=value_discretization_interval)\n\n @classmethod\n def from_gaussian_mechanism(\n cls,\n standard_deviation: float,\n sensitivity: float = 1,\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4,\n log_mass_truncation_bound: float = -50) -> 'PrivacyLossDistribution':\n \"\"\"Creates the privacy loss distribution of the Gaussian mechanism.\n\n Args:\n standard_deviation: the standard_deviation of the Gaussian distribution.\n sensitivity: the sensitivity of function f. (i.e. the maximum absolute\n change in f when an input to a single user changes.)\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n log_mass_truncation_bound: the ln of the probability mass that might be\n discarded from the noise distribution. The larger this number, the more\n error it may introduce in divergence calculations.\n\n Returns:\n The privacy loss distribution corresponding to the Gaussian mechanism with\n given parameters.\n \"\"\"\n return PrivacyLossDistribution.create_from_additive_noise(\n privacy_loss_mechanism.GaussianPrivacyLoss(\n standard_deviation,\n sensitivity=sensitivity,\n pessimistic_estimate=pessimistic_estimate,\n log_mass_truncation_bound=log_mass_truncation_bound),\n pessimistic_estimate=pessimistic_estimate,\n value_discretization_interval=value_discretization_interval)\n\n @classmethod\n def from_discrete_laplace_mechanism(\n cls,\n parameter: float,\n sensitivity: int = 1,\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Computes the privacy loss distribution of the Discrete Laplace mechanism.\n\n Args:\n parameter: the parameter of the discrete Laplace distribution.\n sensitivity: the sensitivity of function f. (i.e. the maximum absolute\n change in f when an input to a single user changes.)\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution corresponding to the Discrete Laplace\n mechanism with given parameters.\n \"\"\"\n return PrivacyLossDistribution.create_from_additive_noise(\n privacy_loss_mechanism.DiscreteLaplacePrivacyLoss(\n parameter, sensitivity=sensitivity),\n pessimistic_estimate=pessimistic_estimate,\n value_discretization_interval=value_discretization_interval)\n\n @classmethod\n def from_discrete_gaussian_mechanism(\n cls,\n sigma: float,\n sensitivity: int = 1,\n truncation_bound: int = None,\n pessimistic_estimate: bool = True,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Creates the privacy loss distribution of the discrete Gaussian mechanism.\n\n Args:\n sigma: the parameter of the discrete Gaussian distribution. Note that\n unlike the (continuous) Gaussian distribution this is not equal to the\n standard deviation of the noise.\n sensitivity: the sensitivity of function f. (i.e. the maximum absolute\n change in f when an input to a single user changes.)\n truncation_bound: bound for truncating the noise, i.e. the noise will only\n have a support in [-truncation_bound, truncation_bound]. When not\n specified, truncation_bound will be chosen in such a way that the mass\n of the noise outside of this range is at most 1e-30.\n pessimistic_estimate: a value indicating whether the rounding is done in\n such a way that the resulting epsilon-hockey stick divergence\n computation gives an upper estimate to the real value.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution corresponding to the discrete Gaussian\n mechanism with given parameters.\n \"\"\"\n return PrivacyLossDistribution.create_from_additive_noise(\n privacy_loss_mechanism.DiscreteGaussianPrivacyLoss(\n sigma, sensitivity=sensitivity, truncation_bound=truncation_bound),\n pessimistic_estimate=pessimistic_estimate,\n value_discretization_interval=value_discretization_interval)\n\n @classmethod\n def from_privacy_parameters(\n cls,\n privacy_parameters: common.DifferentialPrivacyParameters,\n value_discretization_interval: float = 1e-4) -> 'PrivacyLossDistribution':\n \"\"\"Constructs pessimistic PLD from epsilon and delta parameters.\n\n When the mechanism is (epsilon, delta)-differentially private, the following\n is a pessimistic estimate of its privacy loss distribution (see Section 3.5\n of the supplementary material for more explanation):\n - infinity with probability delta.\n - epsilon with probability (1 - delta) / (1 + exp(-eps))\n - -epsilon with probability (1 - delta) / (1 + exp(eps))\n\n Args:\n privacy_parameters: the privacy guarantee of the mechanism.\n value_discretization_interval: the length of the dicretization interval\n for the privacy loss distribution. The values will be rounded up/down to\n be integer multiples of this number.\n\n Returns:\n The privacy loss distribution constructed as specified.\n \"\"\"\n delta = privacy_parameters.delta\n epsilon = privacy_parameters.epsilon\n\n rounded_probability_mass_function = {\n math.ceil(epsilon / value_discretization_interval):\n (1 - delta) / (1 + math.exp(-epsilon)),\n math.ceil(-epsilon / value_discretization_interval):\n (1 - delta) / (1 + math.exp(epsilon))\n }\n\n return cls(rounded_probability_mass_function, value_discretization_interval,\n privacy_parameters.delta)\n\n def get_delta_for_epsilon(self, epsilon: float) -> float:\n \"\"\"Computes the epsilon-hockey stick divergence between mu_upper, mu_lower.\n\n When this privacy loss distribution corresponds to a mechanism, the\n epsilon-hockey stick divergence gives the value of delta for which the\n mechanism is (epsilon, delta)-differentially private. (See Observation 1 in\n the supplementary material.)\n\n Args:\n epsilon: the epsilon in epsilon-hockey stick divergence.\n\n Returns:\n A non-negative real number which is the epsilon-hockey stick divergence\n between the upper (mu_upper) and the lower (mu_lower) distributions\n corresponding to this privacy loss distribution.\n \"\"\"\n\n # The epsilon-hockey stick divergence of mu_upper with respect to mu_lower\n # is equal to (the sum over all the values in the privacy loss distribution\n # of the probability mass at value times max(0, 1 - e^{epsilon - value}) )\n # plus the infinity_mass.\n shifted_privacy_losses = []\n probability_masses = []\n for i in self.rounded_probability_mass_function:\n val = i * self.value_discretization_interval\n if val > epsilon and self.rounded_probability_mass_function[i] > 0:\n shifted_privacy_losses.append(epsilon - val)\n probability_masses.append(self.rounded_probability_mass_function[i])\n\n return self.infinity_mass + np.dot(\n (1 - np.exp(shifted_privacy_losses)), probability_masses)\n\n def get_epsilon_for_delta(self, delta: float) -> float:\n \"\"\"Computes epsilon for which hockey stick divergence is at most delta.\n\n This function computes the smallest non-negative epsilon for which the\n epsilon-hockey stick divergence between mu_upper, mu_lower is at most delta.\n\n When this privacy loss distribution corresponds to a mechanism and the\n rounding is pessimistic, the returned value corresponds to an epsilon for\n which the mechanism is (epsilon, delta)-differentially private. (See\n Observation 1 in the supplementary material.)\n\n Args:\n delta: the target epsilon-hockey stick divergence.\n\n Returns:\n A non-negative real number which is the smallest epsilon such that the\n epsilon-hockey stick divergence between the upper (mu_upper) and the\n lower (mu_lower) distributions is at most delta. When no such finite\n epsilon exists, return math.inf.\n \"\"\"\n\n if self.infinity_mass > delta:\n return math.inf\n\n mass_upper = self.infinity_mass\n mass_lower = 0\n for i in sorted(\n self.rounded_probability_mass_function.keys(), reverse=True):\n val = i * self.value_discretization_interval\n\n if (mass_upper > delta and mass_lower > 0 and math.log(\n (mass_upper - delta) / mass_lower) >= val):\n # Epsilon is greater than or equal to val.\n break\n\n mass_upper += self.rounded_probability_mass_function[i]\n mass_lower += (math.exp(-val) * self.rounded_probability_mass_function[i])\n\n if mass_upper >= delta and mass_lower == 0:\n # This only occurs when val is very large, which results in exp(-val)\n # being treated as zero.\n return max(0, val)\n\n if mass_upper <= mass_lower + delta:\n return 0\n else:\n return math.log((mass_upper - delta) / mass_lower)\n\n def validate_composable(self,\n privacy_loss_distribution: 'PrivacyLossDistribution'):\n \"\"\"Verifies that a given PLD can be composed with this PLD.\n\n The two privacy loss distributions must have the same discretization\n interval and estimate type for the composition to be allowed.\n\n Args:\n privacy_loss_distribution: the privacy loss distribution to be composed\n with the current privacy loss distribution.\n\n Raises:\n ValueError if the value_discretization_interval or estimate_type of the\n two PLDs are different.\n \"\"\"\n if (self.value_discretization_interval !=\n privacy_loss_distribution.value_discretization_interval):\n raise ValueError(\n f'Discretization intervals are different: '\n f'{self.value_discretization_interval}'\n f'{privacy_loss_distribution.value_discretization_interval}')\n\n if (self.pessimistic_estimate !=\n privacy_loss_distribution.pessimistic_estimate):\n raise ValueError(f'Estimation types are different: '\n f'{self.pessimistic_estimate}'\n f'{privacy_loss_distribution.pessimistic_estimate}')\n\n def compose(\n self,\n privacy_loss_distribution: 'PrivacyLossDistribution',\n tail_mass_truncation: float = 1e-15,\n ) -> 'PrivacyLossDistribution':\n \"\"\"Computes a privacy loss distribution resulting from composing two PLDs.\n\n Args:\n privacy_loss_distribution: the privacy loss distribution to be composed\n with the current privacy loss distribution. The two must have the same\n value_discretization_interval.\n tail_mass_truncation: an upper bound on the tails of the probability mass\n of the PLD that might be truncated.\n\n Returns:\n A privacy loss distribution which is the result of composing the two.\n \"\"\"\n self.validate_composable(privacy_loss_distribution)\n\n # The probability mass function of the resulting distribution is simply the\n # convolutaion of the two input probability mass functions.\n new_rounded_probability_mass_function = common.convolve_dictionary(\n self.rounded_probability_mass_function,\n privacy_loss_distribution.rounded_probability_mass_function,\n tail_mass_truncation=tail_mass_truncation)\n\n new_infinity_mass = (\n self.infinity_mass + privacy_loss_distribution.infinity_mass -\n (self.infinity_mass * privacy_loss_distribution.infinity_mass))\n\n if self.pessimistic_estimate:\n # In the pessimistic case, the truncated probability mass needs to be\n # treated as if it were infinity.\n new_infinity_mass += tail_mass_truncation\n\n return PrivacyLossDistribution(\n new_rounded_probability_mass_function,\n self.value_discretization_interval,\n new_infinity_mass,\n pessimistic_estimate=self.pessimistic_estimate)\n\n def get_delta_for_epsilon_for_composed_pld(\n self, privacy_loss_distribution: 'PrivacyLossDistribution',\n epsilon: float) -> float:\n \"\"\"Computes delta for given epsilon for the result of composing this PLD and a given PLD.\n\n The output of this function should be the same as first composing this PLD\n and privacy_loss_distribution, and then call get_delta_for_epsilon on the\n resulting PLD. The main advantage is that this function is faster.\n\n Args:\n privacy_loss_distribution: the privacy loss distribution to be composed\n with the current privacy loss distribution. The two must have the same\n value_discretization_interval.\n epsilon: the epsilon in epsilon-hockey stick divergence.\n\n Returns:\n A non-negative real number which is the epsilon-hockey stick divergence\n of the privacy loss distribution which is the result of composing this PLD\n with privacy_loss_distribution.\n \"\"\"\n self.validate_composable(privacy_loss_distribution)\n\n this_offset, this_probability_mass_function = common.dictionary_to_list(\n self.rounded_probability_mass_function)\n this_exponentiated_privacy_loss_values = np.exp(\n self.value_discretization_interval * np.arange(\n this_offset, this_offset + len(this_probability_mass_function)))\n\n other_offset, other_probability_mass_function = common.dictionary_to_list(\n privacy_loss_distribution.rounded_probability_mass_function)\n other_exponentiated_privacy_loss_values = np.exp(\n privacy_loss_distribution.value_discretization_interval * np.arange(\n other_offset, other_offset + len(other_probability_mass_function)))\n\n exp_epsilon = math.exp(epsilon)\n\n # Compute the hockey stick divergence using equation (2) in the\n # supplementary material. other_cumulative_upper_mass below represents the\n # summation in equation (3) and other_cumulative_lower_mass represents the\n # summation in equation (4).\n\n other_cumulative_upper_mass = 0\n other_cumulative_lower_mass = 0\n current_index = len(other_probability_mass_function) - 1\n divergence = 0\n for this_exponentiated_privacy_loss, this_probability_mass in zip(\n this_exponentiated_privacy_loss_values, this_probability_mass_function):\n cutoff = exp_epsilon / this_exponentiated_privacy_loss\n while current_index >= 0 and other_exponentiated_privacy_loss_values[\n current_index] > cutoff:\n other_cumulative_upper_mass += other_probability_mass_function[\n current_index]\n other_cumulative_lower_mass += (\n other_probability_mass_function[current_index] /\n other_exponentiated_privacy_loss_values[current_index])\n current_index -= 1\n divergence += this_probability_mass * (\n other_cumulative_upper_mass - cutoff * other_cumulative_lower_mass)\n\n # The probability that the composed privacy loss is infinite\n composed_infinity_mass = 1 - (1 - self.infinity_mass) * (\n 1 - privacy_loss_distribution.infinity_mass)\n\n return divergence + composed_infinity_mass\n\n def self_compose(\n self,\n num_times: int,\n tail_mass_truncation: float = 1e-15) -> 'PrivacyLossDistribution':\n \"\"\"Computes PLD resulting from repeated composing the PLD with itself.\n\n Args:\n num_times: the number of times to compose this PLD with itself.\n tail_mass_truncation: an upper bound on the tails of the probability mass\n of the PLD that might be truncated. Currently only supports for\n pessimistic estimates.\n\n Returns:\n A privacy loss distribution which is the result of the composition.\n \"\"\"\n if not self.pessimistic_estimate:\n # Currently support truncation only for pessimistic estimates.\n tail_mass_truncation = 0\n\n new_rounded_probability_mass_function = common.self_convolve_dictionary(\n self.rounded_probability_mass_function,\n num_times,\n tail_mass_truncation=tail_mass_truncation)\n\n new_infinity_mass = (1 - ((1 - self.infinity_mass)**num_times))\n new_infinity_mass += tail_mass_truncation\n\n return PrivacyLossDistribution(\n new_rounded_probability_mass_function,\n self.value_discretization_interval,\n new_infinity_mass,\n pessimistic_estimate=self.pessimistic_estimate)\n" ]
[ [ "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
samtkapadia/IMT_new_languages
[ "71439a8623de0bbbbb982b2ba7788f76e386818a" ]
[ "IMT_django/teacher/helper_functions.py" ]
[ "import numpy as np\nimport os\n\n\ndef pad_audio(audio_in, length=40000):\n trail_space = max(length - audio_in.shape[0], 0)\n\n print(trail_space)\n audio_padded = np.pad(audio_in, (0, trail_space))\n\n return audio_padded\n\n\ndef calculate_final_score(request):\n total_score = np.sum(request.session['score'])\n final_score = total_score / len(request.session['score'])\n\n return final_score\n\n\n\n\n\n\n\n" ]
[ [ "numpy.sum", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dnnspark/trainer
[ "cdf28eaf22e4b97e11b08d4d04274e2e178f20e3" ]
[ "experiment_interface/utils.py" ]
[ "import numpy as np\n\ndef np_encode(x):\n\t'''\n\tInput\n\t=====\n\t\tx : np.ndarray\n\n\n\tReturn\n\t======\n\t\tencoded_array: bytes\n\t\tencoded_shape: bytes\n\t\tencoded_dtype: bytes\n\t'''\n\n\tencoded_array = x.tostring()\n\tencoded_shape = str(x.shape)[1:-1]\n\tencoded_dtype = str(x.dtype)\n\n\treturn encoded_array, encoded_shape, encoded_dtype\n\n\ndef np_decode(encoded_array, encoded_shape, encoded_dtype): \n\n\tdecoded_dtype = np.dtype(encoded_dtype)\n\tdecoded_shape = tuple( np.fromstring(encoded_shape, dtype=np.int64, sep=',') )\n\t# decoded_array = np.fromstring(encoded_array, dtype=decoded_dtype).reshape(decoded_shape)\n\tdecoded_array = np.frombuffer(encoded_array, dtype=decoded_dtype).reshape(decoded_shape)\n\n\treturn decoded_array\n\ndef extract_np_array(df, name):\n\t'''\n\tDecode numpy array from DataFrame\n\n\tInput\n\t=====\n\t\tdf: pd.DataFrame\n\t\tname: str\n\t\t\tdf must have <name>, <name>_shape, <name>_dtype as columns.\n\n\tReturn\n\t======\n\t\tdecoded: pd.Series\n\t\t\tEach element is a np.array.\n\n\t'''\n\t_df = df[ [name, name+'_shape', name+'_dtype'] ]\n\tdecoded = df.apply(lambda x: np_decode(x[name], x[name+'_shape'], x[name+'_dtype']), axis=1)\n\treturn decoded\n\n\n" ]
[ [ "numpy.frombuffer", "numpy.fromstring", "numpy.dtype" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
aboddie/pandaSDMX
[ "224bf7c86ca159628e0087c8041be6cf9228f4c5" ]
[ "pandasdmx/tests/writer/test_pandas.py" ]
[ "\"\"\"Tests for pandasdmx/writer.py.\"\"\"\nimport pandas as pd\nimport pytest\nfrom pytest import raises\n\nimport pandasdmx\nfrom pandasdmx.model import TimeDimension\nfrom pandasdmx.tests import assert_pd_equal\nfrom pandasdmx.tests.data import expected_data, specimen, test_files\n\n# file name → (exception raised, exception message, comment/reason)\nssds = (\n \"Reading StructureSpecificDataSet does not distinguish between attrs \"\n \"and dimension values.\"\n)\n\nfile_marks = {\n \"exr-action-delete.json\": (\n AssertionError,\n \"Expected type <class 'pandas.core.frame.DataFrame'>, found <class \"\n \" 'list'> instead\",\n \"Message contains two DataSets; test infrastructure currently handles \"\n \"only messages with a single DataSet.\",\n ),\n \"ECB/EXR/ng-ts-ss.xml\": (AssertionError, \"Series.index are different\", ssds),\n \"ECB/EXR/ng-flat-ss.xml\": (AssertionError, \"Series.index are different\", ssds),\n \"ECB/EXR/ng-xs-ss.xml\": (AssertionError, \"Series.index are different\", ssds),\n \"ECB/EXR/ng-ts-gf-ss.xml\": (AssertionError, \"Series.index are different\", ssds),\n}\n\n\ndef pytest_generate_tests(metafunc):\n if \"data_path\" in metafunc.fixturenames:\n params = []\n tf = test_files(kind=\"data\")\n for value, id in zip(tf[\"argvalues\"], tf[\"ids\"]):\n kwargs = dict(id=id)\n for cond, info in file_marks.items():\n if cond in str(value):\n kwargs[\"marks\"] = pytest.mark.skip(reason=info[2])\n break\n\n params.append(pytest.param(value, **kwargs))\n\n metafunc.parametrize(\"data_path\", params)\n\n\ndef test_write_data_arguments():\n msg = pandasdmx.read_sdmx(test_files(kind=\"data\")[\"argvalues\"][0])\n\n # Attributes must be a string\n with raises(TypeError):\n pandasdmx.to_pandas(msg, attributes=2)\n\n # Attributes must contain only 'dgso'\n with raises(ValueError):\n pandasdmx.to_pandas(msg, attributes=\"foobarbaz\")\n\n\ndef test_write_data(data_path):\n msg = pandasdmx.read_sdmx(data_path)\n\n result = pandasdmx.to_pandas(msg)\n\n expected = expected_data(data_path)\n if expected is not None:\n print(expected, result, sep=\"\\n\")\n assert_pd_equal(expected, result)\n\n # TODO incomplete\n assert isinstance(result, (pd.Series, pd.DataFrame, list)), type(result)\n\n\[email protected](\"path\", **test_files(kind=\"data\"))\ndef test_write_data_attributes(path):\n msg = pandasdmx.read_sdmx(path)\n\n result = pandasdmx.to_pandas(msg, attributes=\"osgd\")\n # TODO incomplete\n assert isinstance(result, (pd.Series, pd.DataFrame, list)), type(result)\n\n\ndef test_write_agencyscheme():\n # Convert an agency scheme\n with specimen(\"ECB/orgscheme.xml\") as f:\n msg = pandasdmx.read_sdmx(f)\n data = pandasdmx.to_pandas(msg)\n\n assert data[\"organisation_scheme\"][\"AGENCIES\"][\"name\"][\"ESTAT\"] == \"Eurostat\"\n\n # to_pandas only returns keys for non-empty attributes of StructureMessage\n # https://github.com/dr-leo/pandaSDMX/issues/90\n assert set(data.keys()) == {\"organisation_scheme\"}\n\n # Attribute access works\n assert data.organisation_scheme.AGENCIES.name.ESTAT == \"Eurostat\"\n\n with pytest.raises(AttributeError):\n data.codelist\n with pytest.raises(AttributeError):\n data.dataflow\n with pytest.raises(AttributeError):\n data.structure\n\n\ndef test_write_categoryscheme():\n with specimen(\"IPI-2010-A21-structure.xml\") as f:\n msg = pandasdmx.read_sdmx(f)\n data = pandasdmx.to_pandas(msg)\n\n cs = data[\"category_scheme\"][\"CLASSEMENT_DATAFLOWS\"]\n\n assert cs.loc[\"COMPTA-NAT\", \"name\"] == \"National accounts (GDP, consumption...)\"\n\n # Children appear\n assert cs.loc[\"CNA-PIB-2005\", \"parent\"] == \"CNA-PIB\"\n\n\ndef test_write_codelist():\n # Retrieve codelists from a test specimen and convert to pandas\n with specimen(\"common-structure.xml\") as f:\n dsd_common = pandasdmx.read_sdmx(f)\n codelists = pandasdmx.to_pandas(dsd_common)[\"codelist\"]\n\n # File contains 5 code lists\n assert len(codelists) == 5\n\n # Code lists have expected number of items\n assert len(codelists[\"CL_FREQ\"]) == 8\n\n # Items names can be retrieved by ID\n freq = codelists[\"CL_FREQ\"]\n assert freq[\"name\"][\"A\"] == \"Annual\"\n\n # Non-hierarchical code list has a string name\n # does not work as DataFrame has no name.\n # assert freq.name == \"Code list for Frequency (FREQ)\"\n\n # Hierarchical code list\n with specimen(\"codelist_partial.xml\") as f:\n msg = pandasdmx.read_sdmx(f)\n\n # Convert single codelist\n CL_AREA = pandasdmx.to_pandas(msg.codelist[\"CL_AREA\"])\n\n # Hierichical list has a 'parent' column; parent of Africa is the World\n assert CL_AREA.loc[\"002\", \"parent\"] == \"001\"\n\n # Pandas features can be used to merge parent names\n area_hierarchy = pd.merge(\n CL_AREA,\n CL_AREA,\n how=\"left\",\n left_on=\"parent\",\n right_index=True,\n suffixes=(\"\", \"_parent\"),\n )\n assert area_hierarchy.loc[\"002\", \"name_parent\"] == \"World\"\n\n\ndef test_write_conceptscheme():\n with specimen(\"common-structure.xml\") as f:\n msg = pandasdmx.read_sdmx(f)\n data = pandasdmx.to_pandas(msg)\n\n cdc = data[\"concept_scheme\"][\"CROSS_DOMAIN_CONCEPTS\"]\n assert cdc.loc[\"UNIT_MEASURE\", \"name\"] == \"Unit of Measure\"\n\n\ndef test_write_dataflow():\n # Read the INSEE dataflow definition\n with specimen(\"INSEE/dataflow\") as f:\n msg = pandasdmx.read_sdmx(f)\n\n # Convert to pandas\n result = pandasdmx.to_pandas(msg, include=\"dataflow\")\n\n # Number of Dataflows described in the file\n assert len(result[\"dataflow\"]) == 663\n\n # ID and names of first Dataflows\n mbop = \"Monthly Balance of Payments - \"\n expected = pd.Series(\n {\n \"ACT-TRIM-ANC\": \"Activity by sex and age - Quarterly series\",\n \"BPM6-CCAPITAL\": \"{}Capital account\".format(mbop),\n \"BPM6-CFINANCIER\": \"{}Financial account\".format(mbop),\n \"BPM6-CTRANSACTION\": \"{}Current transactions account\".format(mbop),\n \"BPM6-TOTAL\": \"{}Overall total and main headings\".format(mbop),\n }\n )\n assert_pd_equal(result[\"dataflow\"].head(), expected)\n\n\ndef test_write_dataset_datetime():\n \"\"\"Test datetime arguments to write_dataset().\"\"\"\n # Load structure\n with specimen(\"IPI-2010-A21-structure.xml\") as f:\n dsd = pandasdmx.read_sdmx(f).structure[\"IPI-2010-A21\"]\n TIME_PERIOD = dsd.dimensions.get(\"TIME_PERIOD\")\n FREQ = dsd.dimensions.get(\"FREQ\")\n\n assert isinstance(TIME_PERIOD, TimeDimension)\n\n # Load data, two ways\n with specimen(\"IPI-2010-A21.xml\") as f:\n msg = pandasdmx.read_sdmx(f, dsd=dsd)\n ds = msg.data[0]\n with specimen(\"IPI-2010-A21.xml\") as f:\n msg_no_structure = pandasdmx.read_sdmx(f)\n\n other_dims = list(\n filter(lambda n: n != \"TIME_PERIOD\", [d.id for d in dsd.dimensions.components])\n )\n\n def expected(df, axis=0, cls=pd.DatetimeIndex):\n axes = [\"index\", \"columns\"] if axis else [\"columns\", \"index\"]\n assert getattr(df, axes[0]).names == other_dims\n assert isinstance(getattr(df, axes[1]), cls)\n\n # Write with datetime=str\n df = pandasdmx.to_pandas(ds, datetime=\"TIME_PERIOD\")\n expected(df)\n\n # Write with datetime=Dimension instance\n df = pandasdmx.to_pandas(ds, datetime=TIME_PERIOD)\n expected(df)\n\n # Write with datetime=True fails because the data message contains no\n # actual structure information\n with pytest.raises(ValueError, match=r\"no TimeDimension in \\[.*\\]\"):\n pandasdmx.to_pandas(msg_no_structure, datetime=True)\n with pytest.raises(ValueError, match=r\"no TimeDimension in \\[.*\\]\"):\n pandasdmx.to_pandas(msg_no_structure.data[0], datetime=True)\n\n # DataMessage parsed with a DSD allows write_dataset to infer the\n # TimeDimension\n df = pandasdmx.to_pandas(msg, datetime=True)\n expected(df)\n # Same for DataSet\n df = pandasdmx.to_pandas(ds, datetime=True)\n expected(df)\n\n # As above, with axis=1\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=\"TIME_PERIOD\", axis=1))\n expected(df, axis=1)\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=TIME_PERIOD, axis=1))\n expected(df, axis=1)\n ds.structured_by = dsd\n df = pandasdmx.to_pandas(ds, datetime=dict(axis=1))\n expected(df, axis=1)\n df = pandasdmx.to_pandas(msg, datetime=dict(axis=1))\n expected(df, axis=1)\n\n # Write with freq='M' works\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=\"TIME_PERIOD\", freq=\"M\"))\n expected(df, cls=pd.PeriodIndex)\n\n # Write with freq='A' works\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=\"TIME_PERIOD\", freq=\"A\"))\n expected(df, cls=pd.PeriodIndex)\n # …but the index is not unique, because month information was discarded\n assert not df.index.is_unique\n\n # Write specifying the FREQ dimension by name fails\n with pytest.raises(\n ValueError,\n match=\"cannot convert to PeriodIndex with \" r\"non-unique freq=\\['A', 'M'\\]\",\n ):\n pandasdmx.to_pandas(ds, datetime=dict(dim=\"TIME_PERIOD\", freq=\"FREQ\"))\n\n # Remove non-monthly obs\n # TODO use a constraint, when this is supported\n ds.obs = list(filter(lambda o: o.key.FREQ != \"A\", ds.obs))\n\n # Now specifying the dimension by name works\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=\"TIME_PERIOD\", freq=\"FREQ\"))\n\n # and FREQ is no longer in the columns index\n other_dims.pop(other_dims.index(\"FREQ\"))\n expected(df, cls=pd.PeriodIndex)\n\n # Specifying a Dimension works\n df = pandasdmx.to_pandas(ds, datetime=dict(dim=TIME_PERIOD, freq=FREQ))\n expected(df, cls=pd.PeriodIndex)\n\n # As above, using DSD attached to the DataMessage\n df = pandasdmx.to_pandas(msg, datetime=dict(dim=TIME_PERIOD, freq=\"FREQ\"))\n expected(df, cls=pd.PeriodIndex)\n\n # Invalid arguments\n with pytest.raises(ValueError, match=\"X\"):\n pandasdmx.to_pandas(msg, datetime=dict(dim=TIME_PERIOD, freq=\"X\"))\n with pytest.raises(ValueError, match=\"foo\"):\n pandasdmx.to_pandas(ds, datetime=dict(foo=\"bar\"))\n with pytest.raises(ValueError, match=\"43\"):\n pandasdmx.to_pandas(ds, datetime=43)\n\n\[email protected](\"path\", **test_files(kind=\"structure\"))\ndef test_writer_structure(path):\n msg = pandasdmx.read_sdmx(path)\n\n pandasdmx.to_pandas(msg)\n\n # TODO test contents\n\n\[email protected]\ndef test_write_constraint():\n \"\"\"'constraint' argument to writer.write_dataset.\"\"\"\n with specimen(\"ng-ts.xml\") as f:\n msg = pandasdmx.read_sdmx(f)\n\n # Fetch the message's DSD\n assert msg.structure.is_external_reference\n # NB the speciment included in tests/data has 'ECB_EXR_NG' as the\n # data structure ID; but a query against the web service gives\n # 'ECB_EXR1' for the same data structure.\n id = \"ECB_EXR1\"\n dsd = (\n pandasdmx.Request(msg.structure.maintainer.id)\n .get(\"datastructure\", id)\n .structure[id]\n )\n\n # Create a ContentConstraint\n cc = dsd.make_constraint({\"CURRENCY\": \"JPY+USD\"})\n\n # Write the message without constraint\n s1 = pandasdmx.to_pandas(msg)\n assert len(s1) == 12\n assert set(s1.index.to_frame()[\"CURRENCY\"]) == {\"CHF\", \"GBP\", \"JPY\", \"USD\"}\n\n # Writing using constraint produces a fewer items; only those matching the\n # constraint\n s2 = pandasdmx.to_pandas(msg, constraint=cc)\n assert len(s2) == 6\n assert set(s2.index.to_frame()[\"CURRENCY\"]) == {\"JPY\", \"USD\"}\n" ]
[ [ "pandas.merge" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]